Add support for the burst capacity API. This API will provide the calling
application with the remaining capacity of the current burst (limited by
max HW batch size).

Signed-off-by: Kevin Laatz <kevin.la...@intel.com>
Reviewed-by: Conor Walsh <conor.wa...@intel.com>
---
 drivers/dma/idxd/idxd_bus.c      |  1 +
 drivers/dma/idxd/idxd_common.c   | 20 ++++++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  1 +
 drivers/dma/idxd/idxd_pci.c      |  2 ++
 4 files changed, 24 insertions(+)

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index e6caa048a9..54129e5083 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -102,6 +102,7 @@ static const struct rte_dma_dev_ops idxd_bus_ops = {
                .stats_get = idxd_stats_get,
                .stats_reset = idxd_stats_reset,
                .vchan_status = idxd_vchan_status,
+               .burst_capacity = idxd_burst_capacity,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 87d84c081e..b31611c8a4 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -469,6 +469,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct 
rte_dma_info *info, uint32_t
        return 0;
 }
 
+uint16_t
+idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+{
+       struct idxd_dmadev *idxd = dev->dev_private;
+       uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+       uint16_t used_space;
+
+       /* Check for space in the batch ring */
+       if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == 
idxd->max_batches) ||
+                       idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+               return 0;
+
+       /* For descriptors, check for wrap-around on write but not read */
+       if (idxd->ids_returned > write_idx)
+               write_idx += idxd->desc_ring_mask + 1;
+       used_space = write_idx - idxd->ids_returned;
+
+       return RTE_MIN((idxd->desc_ring_mask - used_space), 
idxd->max_batch_size);
+}
+
 int
 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf 
*dev_conf,
                uint32_t conf_sz)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index a291ad26d9..3ef2f729a8 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t 
vchan,
 int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
 int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
                enum rte_dma_vchan_status *status);
+uint16_t idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 2464d4a06c..03ddd63f38 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -119,6 +119,7 @@ static const struct rte_dma_dev_ops idxd_pci_ops = {
        .dev_start = idxd_pci_dev_start,
        .dev_stop = idxd_pci_dev_stop,
        .vchan_status = idxd_vchan_status,
+       .burst_capacity = idxd_burst_capacity,
 };
 
 /* each portal uses 4 x 4k pages */
@@ -232,6 +233,7 @@ init_pci_device(struct rte_pci_device *dev, struct 
idxd_dmadev *idxd,
 
        idxd->u.pci = pci;
        idxd->max_batches = wq_size;
+       idxd->max_batch_size = 1 << lg2_max_batch;
 
        /* enable the device itself */
        err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
-- 
2.30.2

Reply via email to