Add data path functions for enqueuing and submitting operations to DSA
devices.
Signed-off-by: Bruce Richardson <[email protected]>
Signed-off-by: Kevin Laatz <[email protected]>
<snip>
+static __rte_always_inline int
+__idxd_write_desc(struct rte_dmadev *dev,
+ const uint32_t op_flags,
+ const rte_iova_t src,
+ const rte_iova_t dst,
+ const uint32_t size,
+ const uint32_t flags)
+{
+ struct idxd_dmadev *idxd = dev->dev_private;
+ uint16_t mask = idxd->desc_ring_mask;
+ uint16_t job_id = idxd->batch_start + idxd->batch_size;
+ /* we never wrap batches, so we only mask the start and allow
start+size to overflow */
+ uint16_t write_idx = (idxd->batch_start & mask) + idxd->batch_size;
+
+ /* first check batch ring space then desc ring space */
+ if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write ==
idxd->max_batches) ||
+ idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+ goto failed;
+ if (((write_idx + 1) & mask) == (idxd->ids_returned & mask))
+ goto failed;
+
+ /* write desc. Note: descriptors don't wrap, but the completion address
does */
+ const uint64_t op_flags64 = (uint64_t)(op_flags |
IDXD_FLAG_COMPLETION_ADDR_VALID) << 32;
+ const uint64_t comp_addr = __desc_idx_to_iova(idxd, write_idx & mask);
+ _mm256_store_si256((void *)&idxd->desc_ring[write_idx],
+ _mm256_set_epi64x(dst, src, comp_addr, op_flags64));
+ _mm256_store_si256((void *)&idxd->desc_ring[write_idx].size,
+ _mm256_set_epi64x(0, 0, 0, size));
+
+ idxd->batch_size++;
+
+ rte_prefetch0_write(&idxd->desc_ring[write_idx + 1]);
+
+ if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+ __submit(idxd);
+
+ return job_id;
+
+failed:
+ return -1;
+}
If the failed goto just returns -1 it would probably be better to remove
it and just return -1 in the 2 spots above.
Reviewed-by: Conor Walsh <[email protected]>