On 7/3/24 12:19, Jerin Jacob wrote:
On Wed, Jul 3, 2024 at 3:43 PM Srujana Challa <scha...@marvell.com> wrote:
This patch modifies the code to convert descriptor buffer IOVA
addresses to virtual addresses during the processing of shadow
control queue when IOVA mode is PA. This change enables Virtio-user
to operate with IOVA as the descriptor buffer address.
Signed-off-by: Srujana Challa <scha...@marvell.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 33 ++++++++++++-------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 1365c8a5c8..7f35f4b06b 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -896,6 +896,15 @@ virtio_user_handle_mq(struct virtio_user_dev *dev,
uint16_t q_pairs)
#define CVQ_MAX_DATA_DESCS 32
+static inline void *
+virtio_user_iova2virt(rte_iova_t iova)
+{
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
There is RTE_IOVA_DC as well. So we may put positive logic. i.e
rte_eal_iova_mode() == RTE_IOVA_VA
Indeed, that would be better.
I can fix it while applying.
Thanks,
Maxime
+ return rte_mem_iova2virt(iova);
+ else
+ return (void *)(uintptr_t)iova;
+}
+
static uint32_t
virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring
*vring,
uint16_t idx_hdr)
@@ -921,17 +930,18 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev
*dev, struct vring *vri
idx_status = i;
n_descs++;
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ uint16_t queues, *addr;
- queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+ queues = *addr;
status = virtio_user_handle_mq(dev, queues);
} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd ==
VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss
*)(uintptr_t)vring->desc[idx_data].addr;
+ rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
} else if (hdr->class == VIRTIO_NET_CTRL_RX ||
hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -944,7 +954,7 @@ virtio_user_handle_ctrl_msg_split(struct virtio_user_dev
*dev, struct vring *vri
(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr =
status;
+ *(virtio_net_ctrl_ack
*)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
return n_descs;
}
@@ -986,18 +996,18 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev
*dev,
n_descs++;
}
- hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ hdr = virtio_user_iova2virt(vring->desc[idx_hdr].addr);
if (hdr->class == VIRTIO_NET_CTRL_MQ &&
hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
- uint16_t queues;
+ uint16_t queues, *addr;
- queues = *(uint16_t *)(uintptr_t)
- vring->desc[idx_data].addr;
+ addr = virtio_user_iova2virt(vring->desc[idx_data].addr);
+ queues = *addr;
status = virtio_user_handle_mq(dev, queues);
} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd ==
VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
struct virtio_net_ctrl_rss *rss;
- rss = (struct virtio_net_ctrl_rss
*)(uintptr_t)vring->desc[idx_data].addr;
+ rss = virtio_user_iova2virt(vring->desc[idx_data].addr);
status = virtio_user_handle_mq(dev, rss->max_tx_vq);
} else if (hdr->class == VIRTIO_NET_CTRL_RX ||
hdr->class == VIRTIO_NET_CTRL_MAC ||
@@ -1010,8 +1020,7 @@ virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev
*dev,
(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
/* Update status */
- *(virtio_net_ctrl_ack *)(uintptr_t)
- vring->desc[idx_status].addr = status;
+ *(virtio_net_ctrl_ack
*)virtio_user_iova2virt(vring->desc[idx_status].addr) = status;
/* Update used descriptor */
vring->desc[idx_hdr].id = vring->desc[idx_status].id;
--
2.25.1