On 7/19/21 5:00 PM, Jiayu Hu wrote:
> This patch reworks the async configuration structure to improve code
> readability. In addition, add preserved padding fields on the structure
> for future usage.
>
> Signed-off-by: Jiayu Hu <jiayu...@intel.com>
> Reviewed-by: Chenbo Xia <chenbo....@intel.com>
> ---
> doc/guides/prog_guide/vhost_lib.rst | 21 ++++++++++--------
> examples/vhost/main.c | 8 +++----
> lib/vhost/rte_vhost_async.h | 44
> ++++++++++++++++++-------------------
> lib/vhost/vhost.c | 19 +++++++---------
> lib/vhost/vhost.h | 3 +--
> 5 files changed, 46 insertions(+), 49 deletions(-)
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
Please see one comment below:
> diff --git a/doc/guides/prog_guide/vhost_lib.rst
> b/doc/guides/prog_guide/vhost_lib.rst
> index d18fb98..2a61b85 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -218,26 +218,29 @@ The following is an overview of some key Vhost API
> functions:
>
> Enable or disable zero copy feature of the vhost crypto backend.
>
> -* ``rte_vhost_async_channel_register(vid, queue_id, features, ops)``
> +* ``rte_vhost_async_channel_register(vid, queue_id, config, ops)``
>
> - Register a vhost queue with async copy device channel after vring
> - is enabled. Following device ``features`` must be specified together
> + Register an async copy device channel for a vhost queue after vring
> + is enabled. Following device ``config`` must be specified together
> with the registration:
>
> - * ``async_inorder``
> + * ``features``
>
> - Async copy device can guarantee the ordering of copy completion
> - sequence. Copies are completed in the same order with that at
> - the submission time.
> + This field is used to specify async copy device features.
>
> - Currently, only ``async_inorder`` capable device is supported by vhost.
> + ``RTE_VHOST_ASYNC_INORDER`` represents the async copy device can
> + guarantee the order of copy completion is the same as the order
> + of copy submission.
> +
> + Currently, only ``RTE_VHOST_ASYNC_INORDER`` capable device is
> + supported by vhost.
>
> * ``async_threshold``
>
> The copy length (in bytes) below which CPU copy will be used even if
> applications call async vhost APIs to enqueue/dequeue data.
>
> - Typical value is 512~1024 depending on the async device capability.
> + Typical value is 256~1024 depending on the async device capability.
>
> Applications must provide following ``ops`` callbacks for vhost lib to
> work with the async copy devices:
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index d2179ea..9cd855a 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -1468,7 +1468,7 @@ new_device(int vid)
> vid, vdev->coreid);
>
> if (async_vhost_driver) {
> - struct rte_vhost_async_features f;
> + struct rte_vhost_async_config config = {0};
> struct rte_vhost_async_channel_ops channel_ops;
>
> if (dma_type != NULL && strncmp(dma_type, "ioat", 4) == 0) {
> @@ -1476,11 +1476,11 @@ new_device(int vid)
> channel_ops.check_completed_copies =
> ioat_check_completed_copies_cb;
>
> - f.async_inorder = 1;
> - f.async_threshold = 256;
> + config.features = RTE_VHOST_ASYNC_INORDER;
> + config.async_threshold = 256;
>
> return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
> - f.intval, &channel_ops);
> + config, &channel_ops);
> }
> }
>
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f..ef8c1a2 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -93,49 +93,47 @@ struct async_inflight_info {
> };
>
> /**
> - * dma channel feature bit definition
> + * async channel features
> */
> -struct rte_vhost_async_features {
> - union {
> - uint32_t intval;
> - struct {
> - uint32_t async_inorder:1;
> - uint32_t resvd_0:15;
> - uint32_t async_threshold:12;
> - uint32_t resvd_1:4;
> - };
> - };
> +enum {
> + RTE_VHOST_ASYNC_INORDER = 1U << 0,
I think I understand the point of this flag, but the naming is
misleading. The code later on fails registration if the "INORDER" flag
is not passed, but the code behaves in the opposite, it does not support
Virtio INORDER mode as soon as a threashold is used.
Why do we need the DMA to write the descriptors buffers in order? My
understanding is that it is an optimization in order to ease the DMA
completion handling. So we are requiring DMA to be in-order, but on the
other hand we cannot guarantee the Virtio ring to be handled in order,
moving the complexity (and potential performance cost) to the guest.
> };
>
> /**
> - * register an async channel for vhost
> + * async channel configuration
> + */
> +struct rte_vhost_async_config {
> + uint32_t async_threshold;
> + uint32_t features;
> + uint32_t rsvd[2];
> +};
> +
> +/**
> + * Register an async channel for a vhost queue
> *
> * @param vid
> * vhost device id async channel to be attached to
> * @param queue_id
> * vhost queue id async channel to be attached to
> - * @param features
> - * DMA channel feature bit
> - * b0 : DMA supports inorder data transfer
> - * b1 - b15: reserved
> - * b16 - b27: Packet length threshold for DMA transfer
> - * b28 - b31: reserved
> + * @param config
> + * Async channel configuration structure
> * @param ops
> - * DMA operation callbacks
> + * Async channel operation callbacks
> * @return
> * 0 on success, -1 on failures
> */
> __rte_experimental
> int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
> - uint32_t features, struct rte_vhost_async_channel_ops *ops);
> + struct rte_vhost_async_config config,
> + struct rte_vhost_async_channel_ops *ops);
>
> /**
> - * unregister a dma channel for vhost
> + * Unregister an async channel for a vhost queue
> *
> * @param vid
> - * vhost device id DMA channel to be detached
> + * vhost device id async channel to be detached from
> * @param queue_id
> - * vhost queue id DMA channel to be detached
> + * vhost queue id async channel to be detached from
> * @return
> * 0 on success, -1 on failures
> */
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index 53a470f..908758e 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -1619,19 +1619,17 @@ int rte_vhost_extern_callback_register(int vid,
> return 0;
> }
>
> -int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
> - uint32_t features,
> - struct rte_vhost_async_channel_ops *ops)
> +int
> +rte_vhost_async_channel_register(int vid, uint16_t queue_id,
> + struct rte_vhost_async_config config,
> + struct rte_vhost_async_channel_ops *ops)
> {
> struct vhost_virtqueue *vq;
> struct virtio_net *dev = get_device(vid);
> - struct rte_vhost_async_features f;
>
> if (dev == NULL || ops == NULL)
> return -1;
>
> - f.intval = features;
> -
> if (queue_id >= VHOST_MAX_VRING)
> return -1;
>
> @@ -1640,7 +1638,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t
> queue_id,
> if (unlikely(vq == NULL || !dev->async_copy))
> return -1;
>
> - if (unlikely(!f.async_inorder)) {
> + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
> VHOST_LOG_CONFIG(ERR,
> "async copy is not supported on non-inorder mode "
> "(vid %d, qid: %d)\n", vid, queue_id);
> @@ -1719,9 +1717,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t
> queue_id,
>
> vq->async_ops.check_completed_copies = ops->check_completed_copies;
> vq->async_ops.transfer_data = ops->transfer_data;
> -
> - vq->async_inorder = f.async_inorder;
> - vq->async_threshold = f.async_threshold;
> + vq->async_threshold = config.async_threshold;
>
> vq->async_registered = true;
>
> @@ -1731,7 +1727,8 @@ int rte_vhost_async_channel_register(int vid, uint16_t
> queue_id,
> return 0;
> }
>
> -int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
> +int
> +rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
> {
> struct vhost_virtqueue *vq;
> struct virtio_net *dev = get_device(vid);
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 8ffe387..d98ca8a 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -218,9 +218,8 @@ struct vhost_virtqueue {
> };
>
> /* vq async features */
> - bool async_inorder;
> bool async_registered;
> - uint16_t async_threshold;
> + uint32_t async_threshold;
>
> int notif_enable;
> #define VIRTIO_UNINITIALIZED_NOTIF (-1)
>