Add support for the burst capacity API. This API will provide the calling
application with the remaining capacity of the current burst (limited by
max HW batch size).

Signed-off-by: Kevin Laatz <kevin.la...@intel.com>
Reviewed-by: Conor Walsh <conor.wa...@intel.com>
Reviewed-by: Bruce Richardson <bruce.richard...@intel.com>

---
v6: updates for burst capacity api moving to fastpath
---
 drivers/dma/idxd/idxd_common.c   | 21 +++++++++++++++++++++
 drivers/dma/idxd/idxd_internal.h |  1 +
 drivers/dma/idxd/idxd_pci.c      |  1 +
 3 files changed, 23 insertions(+)

diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 12c113a93b..a00fadc431 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -468,6 +468,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct 
rte_dma_info *info, uint32_t
        return 0;
 }
 
+uint16_t
+idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+{
+       struct idxd_dmadev *idxd = dev->dev_private;
+       uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+       uint16_t used_space;
+
+       /* Check for space in the batch ring */
+       if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == 
idxd->max_batches) ||
+                       idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+               return 0;
+
+       /* For descriptors, check for wrap-around on write but not read */
+       if (idxd->ids_returned > write_idx)
+               write_idx += idxd->desc_ring_mask + 1;
+       used_space = write_idx - idxd->ids_returned;
+
+       return RTE_MIN((idxd->desc_ring_mask - used_space), 
idxd->max_batch_size);
+}
+
 int
 idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf 
*dev_conf,
                uint32_t conf_sz)
@@ -553,6 +573,7 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
        dmadev->submit = idxd_submit;
        dmadev->completed = idxd_completed;
        dmadev->completed_status = idxd_completed_status;
+       dmadev->burst_capacity = idxd_burst_capacity;
 
        idxd = dmadev->dev_private;
        *idxd = *base_idxd; /* copy over the main fields already passed in */
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index a291ad26d9..3ef2f729a8 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t 
vchan,
 int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
 int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
                enum rte_dma_vchan_status *status);
+uint16_t idxd_burst_capacity(const struct rte_dma_dev *dev, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 3152ec1289..f76383710c 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -254,6 +254,7 @@ init_pci_device(struct rte_pci_device *dev, struct 
idxd_dmadev *idxd,
 
        idxd->u.pci = pci;
        idxd->max_batches = wq_size;
+       idxd->max_batch_size = 1 << lg2_max_batch;
 
        /* enable the device itself */
        err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
-- 
2.30.2

Reply via email to