Hi Cheng, 

Comments inline.

<snipped>

> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> +             struct rte_mbuf **pkts, uint16_t count) {
> +     struct virtio_net *dev = get_device(vid);
> +     struct vhost_virtqueue *vq;
> +     uint16_t n_pkts = count;
> +
> +     if (!dev)
> +             return 0;
> +
> +     VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> +     if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> +             VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx
> %d.\n",
> +                     dev->vid, __func__, queue_id);
> +             return 0;
> +     }
> +
> +     vq = dev->virtqueue[queue_id];
> +
> +     if (unlikely(!vq->async_registered)) {
> +             VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> +                     dev->vid, __func__, queue_id);
> +             return 0;
> +     }
> +
> +     while (count)
> +             count -= vhost_poll_enqueue_completed(dev, queue_id,
> pkts, count);

I think the drain API here assumes there is per virtqueue assignment of DMA 
device which need not be true.
If there are multiple DMA devices per virtqueue , the application would need a 
mechanism to change the device id per call to the drain API. 
So, its probably better to just call vhost_poll_enqueue_completed here and 
return to the application ? and have the loop in the application instead ?

> +
> +     return n_pkts;
> +}
> +

<snipped>



Reply via email to