Add support for the burst capacity API. This API will provide the calling application with the remaining capacity of the current burst (limited by max HW batch size).
Signed-off-by: Kevin Laatz <kevin.la...@intel.com> --- drivers/dma/idxd/idxd_bus.c | 1 + drivers/dma/idxd/idxd_common.c | 20 ++++++++++++++++++++ drivers/dma/idxd/idxd_internal.h | 1 + drivers/dma/idxd/idxd_pci.c | 2 ++ 4 files changed, 24 insertions(+) diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c index 8f0fcad87a..e2bcca1c74 100644 --- a/drivers/dma/idxd/idxd_bus.c +++ b/drivers/dma/idxd/idxd_bus.c @@ -102,6 +102,7 @@ static const struct rte_dmadev_ops idxd_vdev_ops = { .stats_get = idxd_stats_get, .stats_reset = idxd_stats_reset, .vchan_status = idxd_vchan_status, + .burst_capacity = idxd_burst_capacity, }; static void * diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c index e20b41ae54..ced9f81772 100644 --- a/drivers/dma/idxd/idxd_common.c +++ b/drivers/dma/idxd/idxd_common.c @@ -470,6 +470,26 @@ idxd_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *info, uint32 return 0; } +uint16_t +idxd_burst_capacity(const struct rte_dmadev *dev, uint16_t vchan __rte_unused) +{ + struct idxd_dmadev *idxd = dev->dev_private; + uint16_t write_idx = idxd->batch_start + idxd->batch_size; + uint16_t used_space; + + /* Check for space in the batch ring */ + if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) || + idxd->batch_idx_write + 1 == idxd->batch_idx_read) + return 0; + + /* For descriptors, check for wrap-around on write but not read */ + if (idxd->ids_returned > write_idx) + write_idx += idxd->desc_ring_mask + 1; + used_space = write_idx - idxd->ids_returned; + + return RTE_MIN((idxd->desc_ring_mask - used_space), idxd->max_batch_size); +} + int idxd_configure(struct rte_dmadev *dev __rte_unused, const struct rte_dmadev_conf *dev_conf, uint32_t conf_sz) diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h index fcc0235a1d..692d27cf72 100644 --- a/drivers/dma/idxd/idxd_internal.h +++ b/drivers/dma/idxd/idxd_internal.h @@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dmadev *dev, uint16_t vchan, int idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan); int idxd_vchan_status(const struct rte_dmadev *dev, uint16_t vchan, enum rte_dmadev_vchan_status *status); +uint16_t idxd_burst_capacity(const struct rte_dmadev *dev, uint16_t vchan); #endif /* _IDXD_INTERNAL_H_ */ diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c index f3a5d2a970..5da14eb9a2 100644 --- a/drivers/dma/idxd/idxd_pci.c +++ b/drivers/dma/idxd/idxd_pci.c @@ -119,6 +119,7 @@ static const struct rte_dmadev_ops idxd_pci_ops = { .dev_start = idxd_pci_dev_start, .dev_stop = idxd_pci_dev_stop, .vchan_status = idxd_vchan_status, + .burst_capacity = idxd_burst_capacity, }; /* each portal uses 4 x 4k pages */ @@ -232,6 +233,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd, idxd->u.pci = pci; idxd->max_batches = wq_size; + idxd->max_batch_size = 1 << lg2_max_batch; /* enable the device itself */ err_code = idxd_pci_dev_command(idxd, idxd_enable_dev); -- 2.30.2