Re: [PATCH 30/31] vdpa: Move vhost_vdpa_get_iova_range to net/vhost-vdpa.c

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Since it's a device property, it can be done in net/. This helps SVQ to
allocate the rings in vdpa device initialization, rather than delay
that.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-vdpa.c | 15 ---
  net/vhost-vdpa.c   | 32 



I don't understand here, since we will support device other than net?



  2 files changed, 24 insertions(+), 23 deletions(-)

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 75090d65e8..2491c05d29 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -350,19 +350,6 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, 
uint8_t status)
  return 0;
  }
  
-static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)

-{
-int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
-  >iova_range);
-if (ret != 0) {
-v->iova_range.first = 0;
-v->iova_range.last = UINT64_MAX;
-}
-
-trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
-v->iova_range.last);
-}



Let's just export this instead?

Thanks



-
  static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
  {
  struct vhost_vdpa *v = dev->opaque;
@@ -1295,8 +1282,6 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void 
*opaque, Error **errp)
  goto err;
  }
  
-vhost_vdpa_get_iova_range(v);

-
  if (vhost_vdpa_one_time_request(dev)) {
  return 0;
  }
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 4befba5cc7..cc9cecf8d1 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -22,6 +22,7 @@
  #include 
  #include 
  #include "standard-headers/linux/virtio_net.h"
+#include "standard-headers/linux/vhost_types.h"
  #include "monitor/monitor.h"
  #include "hw/virtio/vhost.h"
  
@@ -187,13 +188,25 @@ static NetClientInfo net_vhost_vdpa_info = {

  .check_peer_type = vhost_vdpa_check_peer_type,
  };
  
+static void vhost_vdpa_get_iova_range(int fd,

+  struct vhost_vdpa_iova_range *iova_range)
+{
+int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
+
+if (ret < 0) {
+iova_range->first = 0;
+iova_range->last = UINT64_MAX;
+}
+}
+
  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
-   const char *device,
-   const char *name,
-   int vdpa_device_fd,
-   int queue_pair_index,
-   int nvqs,
-   bool is_datapath)
+   const char *device,
+   const char *name,
+   int vdpa_device_fd,
+   int queue_pair_index,
+   int nvqs,
+   bool is_datapath,
+   struct vhost_vdpa_iova_range iova_range)
  {
  NetClientState *nc = NULL;
  VhostVDPAState *s;
@@ -211,6 +224,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState 
*peer,
  
  s->vhost_vdpa.device_fd = vdpa_device_fd;

  s->vhost_vdpa.index = queue_pair_index;
+s->vhost_vdpa.iova_range = iova_range;
  ret = vhost_vdpa_add(nc, (void *)>vhost_vdpa, queue_pair_index, nvqs);
  if (ret) {
  qemu_del_net_client(nc);
@@ -267,6 +281,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
  g_autofree NetClientState **ncs = NULL;
  NetClientState *nc;
  int queue_pairs, i, has_cvq = 0;
+struct vhost_vdpa_iova_range iova_range;
  
  assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);

  opts = >u.vhost_vdpa;
@@ -286,19 +301,20 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
  qemu_close(vdpa_device_fd);
  return queue_pairs;
  }
+vhost_vdpa_get_iova_range(vdpa_device_fd, _range);
  
  ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
  
  for (i = 0; i < queue_pairs; i++) {

  ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 2, true);
+ vdpa_device_fd, i, 2, true, iova_range);
  if (!ncs[i])
  goto err;
  }
  
  if (has_cvq) {

  nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
- vdpa_device_fd, i, 1, false);
+ vdpa_device_fd, i, 1, false, iova_range);
  if (!nc)
  goto err;
  }


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 29/31] vdpa: Make ncs autofree

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Simplifying memory management.

Signed-off-by: Eugenio Pérez 



To reduce the size of this series. This can be sent as an separate patch 
if I was not wrong.


Thanks



---
  net/vhost-vdpa.c | 5 ++---
  1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 4125d13118..4befba5cc7 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -264,7 +264,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
  {
  const NetdevVhostVDPAOptions *opts;
  int vdpa_device_fd;
-NetClientState **ncs, *nc;
+g_autofree NetClientState **ncs = NULL;
+NetClientState *nc;
  int queue_pairs, i, has_cvq = 0;
  
  assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);

@@ -302,7 +303,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char 
*name,
  goto err;
  }
  
-g_free(ncs);

  return 0;
  
  err:

@@ -310,7 +310,6 @@ err:
  qemu_del_net_client(ncs[0]);
  }
  qemu_close(vdpa_device_fd);
-g_free(ncs);
  
  return -1;

  }


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 28/31] vdpa: Expose VHOST_F_LOG_ALL on SVQ

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

SVQ is able to log the dirty bits by itself, so let's use it to not
block migration.

Also, ignore set and clear of VHOST_F_LOG_ALL on set_features if SVQ is
enabled. Even if the device supports it, the reports would be nonsense
because SVQ memory is in the qemu region.

The log region is still allocated. Future changes might skip that, but
this series is already long enough.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-vdpa.c | 20 
  1 file changed, 20 insertions(+)

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index fb0a338baa..75090d65e8 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1022,6 +1022,9 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev, 
uint64_t *features)
  if (ret == 0 && v->shadow_vqs_enabled) {
  /* Filter only features that SVQ can offer to guest */
  vhost_svq_valid_guest_features(features);
+
+/* Add SVQ logging capabilities */
+*features |= BIT_ULL(VHOST_F_LOG_ALL);
  }
  
  return ret;

@@ -1039,8 +1042,25 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev,
  
  if (v->shadow_vqs_enabled) {

  uint64_t dev_features, svq_features, acked_features;
+uint8_t status = 0;
  bool ok;
  
+ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, );

+if (unlikely(ret)) {
+return ret;
+}
+
+if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+/*
+ * vhost is trying to enable or disable _F_LOG, and the device
+ * would report wrong dirty pages. SVQ handles it.
+ */



I fail to understand this comment, I'd think there's no way to disable 
dirty page tracking for SVQ.


Thanks



+return 0;
+}
+
+/* We must not ack _F_LOG if SVQ is enabled */
+features &= ~BIT_ULL(VHOST_F_LOG_ALL);
+
  ret = vhost_vdpa_get_dev_features(dev, _features);
  if (ret != 0) {
  error_report("Can't get vdpa device features, got (%d)", ret);


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 18/31] vhost: Shadow virtqueue buffers forwarding

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

@@ -272,6 +590,28 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, 
int svq_kick_fd)
  void vhost_svq_stop(VhostShadowVirtqueue *svq)
  {
  event_notifier_set_handler(>svq_kick, NULL);
+g_autofree VirtQueueElement *next_avail_elem = NULL;
+
+if (!svq->vq) {
+return;
+}
+
+/* Send all pending used descriptors to guest */
+vhost_svq_flush(svq, false);



Do we need to wait for all the pending descriptors to be completed here?

Thanks



+
+for (unsigned i = 0; i < svq->vring.num; ++i) {
+g_autofree VirtQueueElement *elem = NULL;
+elem = g_steal_pointer(>ring_id_maps[i]);
+if (elem) {
+virtqueue_detach_element(svq->vq, elem, elem->len);
+}
+}
+
+next_avail_elem = g_steal_pointer(>next_guest_avail_elem);
+if (next_avail_elem) {
+virtqueue_detach_element(svq->vq, next_avail_elem,
+ next_avail_elem->len);
+}
  }


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 23/31] vdpa: Add custom IOTLB translations to SVQ

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Use translations added in VhostIOVATree in SVQ.

Only introduce usage here, not allocation and deallocation. As with
previous patches, we use the dead code paths of shadow_vqs_enabled to
avoid commiting too many changes at once. These are impossible to take
at the moment.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-shadow-virtqueue.h |   3 +-
  include/hw/virtio/vhost-vdpa.h |   3 +
  hw/virtio/vhost-shadow-virtqueue.c | 111 
  hw/virtio/vhost-vdpa.c | 161 +
  4 files changed, 238 insertions(+), 40 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index 19c934af49..c6f67d6f76 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -12,6 +12,7 @@
  
  #include "hw/virtio/vhost.h"

  #include "qemu/event_notifier.h"
+#include "hw/virtio/vhost-iova-tree.h"
  
  typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
  
@@ -37,7 +38,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,

   VirtQueue *vq);
  void vhost_svq_stop(VhostShadowVirtqueue *svq);
  
-VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize);

+VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize, VhostIOVATree *iova_map);
  
  void vhost_svq_free(VhostShadowVirtqueue *vq);
  
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h

index 009a9f3b6b..cd2388b3be 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -14,6 +14,7 @@
  
  #include 
  
+#include "hw/virtio/vhost-iova-tree.h"

  #include "hw/virtio/virtio.h"
  #include "standard-headers/linux/vhost_types.h"
  
@@ -30,6 +31,8 @@ typedef struct vhost_vdpa {

  MemoryListener listener;
  struct vhost_vdpa_iova_range iova_range;
  bool shadow_vqs_enabled;
+/* IOVA mapping used by Shadow Virtqueue */
+VhostIOVATree *iova_tree;
  GPtrArray *shadow_vqs;
  struct vhost_dev *dev;
  VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index a1a404f68f..c7888eb8cf 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -11,6 +11,7 @@
  #include "hw/virtio/vhost-shadow-virtqueue.h"
  #include "hw/virtio/vhost.h"
  #include "hw/virtio/virtio-access.h"
+#include "hw/virtio/vhost-iova-tree.h"
  #include "standard-headers/linux/vhost_types.h"
  
  #include "qemu/error-report.h"

@@ -45,6 +46,9 @@ typedef struct VhostShadowVirtqueue {
  /* Virtio device */
  VirtIODevice *vdev;
  
+/* IOVA mapping */

+VhostIOVATree *iova_tree;
+
  /* Map for returning guest's descriptors */
  VirtQueueElement **ring_id_maps;
  
@@ -97,13 +101,7 @@ bool vhost_svq_valid_device_features(uint64_t *dev_features)

  continue;
  
  case VIRTIO_F_ACCESS_PLATFORM:

-/* SVQ does not know how to translate addresses */
-if (*dev_features & BIT_ULL(b)) {
-clear_bit(b, dev_features);
-r = false;
-}
-break;
-
+/* SVQ trust in host's IOMMU to translate addresses */
  case VIRTIO_F_VERSION_1:
  /* SVQ trust that guest vring is little endian */
  if (!(*dev_features & BIT_ULL(b))) {
@@ -205,7 +203,55 @@ static void 
vhost_svq_set_notification(VhostShadowVirtqueue *svq, bool enable)
  }
  }
  
+/**

+ * Translate addresses between qemu's virtual address and SVQ IOVA
+ *
+ * @svqShadow VirtQueue
+ * @vaddr  Translated IOVA addresses
+ * @iovec  Source qemu's VA addresses
+ * @numLength of iovec and minimum length of vaddr
+ */
+static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
+ void **addrs, const struct iovec *iovec,
+ size_t num)
+{
+size_t i;
+
+if (num == 0) {
+return true;
+}
+
+for (i = 0; i < num; ++i) {
+DMAMap needle = {
+.translated_addr = (hwaddr)iovec[i].iov_base,
+.size = iovec[i].iov_len,
+};
+size_t off;
+
+const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, );
+/*
+ * Map cannot be NULL since iova map contains all guest space and
+ * qemu already has a physical address mapped
+ */
+if (unlikely(!map)) {
+error_report("Invalid address 0x%"HWADDR_PRIx" given by guest",
+ needle.translated_addr);



This can be triggered by guest, we need use once or log_guest_error() etc.



+return false;
+}
+
+/*
+ * Map->iova chunk size is ignored. What to do if descriptor
+ * (addr, size) does not fit is delegated to the device.
+ */



I think we need at least check the size and fail if the size doesn't 
match here. 

Re: [PATCH 22/31] vhost: Add VhostIOVATree

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

This tree is able to look for a translated address from an IOVA address.

At first glance it is similar to util/iova-tree. However, SVQ working on
devices with limited IOVA space need more capabilities,



So did the IOVA tree (e.g l2 vtd can only work in the range of GAW and 
without RMRRs).




  like allocating
IOVA chunks or performing reverse translations (qemu addresses to iova).



This looks like a general request as well. So I wonder if we can simply 
extend iova tree instead.


Thanks




The allocation capability, as "assign a free IOVA address to this chunk
of memory in qemu's address space" allows shadow virtqueue to create a
new address space that is not restricted by guest's addressable one, so
we can allocate shadow vqs vrings outside of it.

It duplicates the tree so it can search efficiently both directions,
and it will signal overlap if iova or the translated address is
present in any tree.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-iova-tree.h |  27 +++
  hw/virtio/vhost-iova-tree.c | 157 
  hw/virtio/meson.build   |   2 +-
  3 files changed, 185 insertions(+), 1 deletion(-)
  create mode 100644 hw/virtio/vhost-iova-tree.h
  create mode 100644 hw/virtio/vhost-iova-tree.c

diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h
new file mode 100644
index 00..610394eaf1
--- /dev/null
+++ b/hw/virtio/vhost-iova-tree.h
@@ -0,0 +1,27 @@
+/*
+ * vhost software live migration ring
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez 
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VIRTIO_VHOST_IOVA_TREE_H
+#define HW_VIRTIO_VHOST_IOVA_TREE_H
+
+#include "qemu/iova-tree.h"
+#include "exec/memory.h"
+
+typedef struct VhostIOVATree VhostIOVATree;
+
+VhostIOVATree *vhost_iova_tree_new(uint64_t iova_first, uint64_t iova_last);
+void vhost_iova_tree_delete(VhostIOVATree *iova_tree);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete);
+
+const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree,
+const DMAMap *map);
+int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map);
+void vhost_iova_tree_remove(VhostIOVATree *iova_tree, const DMAMap *map);
+
+#endif
diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c
new file mode 100644
index 00..0021dbaf54
--- /dev/null
+++ b/hw/virtio/vhost-iova-tree.c
@@ -0,0 +1,157 @@
+/*
+ * vhost software live migration ring
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez 
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/iova-tree.h"
+#include "vhost-iova-tree.h"
+
+#define iova_min_addr qemu_real_host_page_size
+
+/**
+ * VhostIOVATree, able to:
+ * - Translate iova address
+ * - Reverse translate iova address (from translated to iova)
+ * - Allocate IOVA regions for translated range (potentially slow operation)
+ *
+ * Note that it cannot remove nodes.
+ */
+struct VhostIOVATree {
+/* First addresable iova address in the device */
+uint64_t iova_first;
+
+/* Last addressable iova address in the device */
+uint64_t iova_last;
+
+/* IOVA address to qemu memory maps. */
+IOVATree *iova_taddr_map;
+
+/* QEMU virtual memory address to iova maps */
+GTree *taddr_iova_map;
+};
+
+static gint vhost_iova_tree_cmp_taddr(gconstpointer a, gconstpointer b,
+  gpointer data)
+{
+const DMAMap *m1 = a, *m2 = b;
+
+if (m1->translated_addr > m2->translated_addr + m2->size) {
+return 1;
+}
+
+if (m1->translated_addr + m1->size < m2->translated_addr) {
+return -1;
+}
+
+/* Overlapped */
+return 0;
+}
+
+/**
+ * Create a new IOVA tree
+ *
+ * Returns the new IOVA tree
+ */
+VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last)
+{
+VhostIOVATree *tree = g_new(VhostIOVATree, 1);
+
+/* Some devices does not like 0 addresses */
+tree->iova_first = MAX(iova_first, iova_min_addr);
+tree->iova_last = iova_last;
+
+tree->iova_taddr_map = iova_tree_new();
+tree->taddr_iova_map = g_tree_new_full(vhost_iova_tree_cmp_taddr, NULL,
+   NULL, g_free);
+return tree;
+}
+
+/**
+ * Delete an iova tree
+ */
+void vhost_iova_tree_delete(VhostIOVATree *iova_tree)
+{
+iova_tree_destroy(iova_tree->iova_taddr_map);
+g_tree_unref(iova_tree->taddr_iova_map);
+g_free(iova_tree);
+}
+
+/**
+ * Find the IOVA address stored from a memory address
+ *
+ * @tree The iova tree
+ * @map  The map with the memory address
+ *
+ * Return the stored mapping, or NULL if not found.
+ */
+const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree,
+const DMAMap *map)
+{
+

Re: [PATCH 21/31] util: Add iova_tree_alloc

2022-01-29 Thread Jason Wang


在 2022/1/24 下午5:20, Eugenio Perez Martin 写道:

On Mon, Jan 24, 2022 at 5:33 AM Peter Xu  wrote:

On Fri, Jan 21, 2022 at 09:27:23PM +0100, Eugenio Pérez wrote:

+int iova_tree_alloc(IOVATree *tree, DMAMap *map, hwaddr iova_begin,

I forgot to s/iova_tree_alloc/iova_tree_alloc_map/ here.


+hwaddr iova_last)
+{
+const DMAMapInternal *last, *i;
+
+assert(iova_begin < iova_last);
+
+/*
+ * Find a valid hole for the mapping
+ *
+ * TODO: Replace all this with g_tree_node_first/next/last when available
+ * (from glib since 2.68). Using a sepparated QTAILQ complicates code.
+ *
+ * Try to allocate first at the end of the list.
+ */
+last = QTAILQ_LAST(>list);
+if (iova_tree_alloc_map_in_hole(last, NULL, iova_begin, iova_last,
+map->size)) {
+goto alloc;
+}
+
+/* Look for inner hole */
+last = NULL;
+for (i = QTAILQ_FIRST(>list); i;
+ last = i, i = QTAILQ_NEXT(i, entry)) {
+if (iova_tree_alloc_map_in_hole(last, i, iova_begin, iova_last,
+map->size)) {
+goto alloc;
+}
+}
+
+return IOVA_ERR_NOMEM;
+
+alloc:
+map->iova = last ? last->map.iova + last->map.size + 1 : iova_begin;
+return iova_tree_insert(tree, map);
+}

Hi, Eugenio,

Have you tried with what Jason suggested previously?

   
https://lore.kernel.org/qemu-devel/cacgkmetzapd9xqtp_r4w296n_qz7vuv1flnb544fevoyo0o...@mail.gmail.com/

That solution still sounds very sensible to me even without the newly
introduced list in previous two patches.

IMHO we could move "DMAMap *previous, *this" into the IOVATreeAllocArgs*
stucture that was passed into the traverse func though, so it'll naturally work
with threading.

Or is there any blocker for it?


Hi Peter,

I can try that solution again, but the main problem was the special
cases of the beginning and ending.

For the function to locate a hole, DMAMap first = {.iova = 0, .size =
0} means that it cannot account 0 for the hole.

In other words, with that algorithm, if the only valid hole is [0, N)
and we try to allocate a block of size N, it would fail.

Same happens with iova_end, although in practice it seems that IOMMU
hardware iova upper limit is never UINT64_MAX.

Maybe we could treat .size = 0 as a special case?



Yes, the pseudo-code I past is just to show the idea of using 
g_tree_foreach() instead of introducing new auxiliary data structures. 
That will simplify both the codes and the reviewers.


Down the road, we may start from an iova range specified during the 
creation of the iova tree. E.g for vtd, it's the GAW, for vhost-vdpa, 
it's the one that we get from VHOST_VDPA_GET_IOVA_RANGE.


Thanks



I see cleaner either
to build the list (but insert needs to take the list into account) or
to explicitly tell that prev == NULL means to use iova_first.

Another solution that comes to my mind: to add both exceptions outside
of transverse function, and skip the first iteration with something
like:

if (prev == NULL) {
   prev = this;
   return false /* continue */
}

So the transverse callback has way less code paths. Would it work for
you if I send a separate RFC from SVQ only to validate this?

Thanks!


Thanks,
--
Peter Xu



___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 18/31] vhost: Shadow virtqueue buffers forwarding

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Initial version of shadow virtqueue that actually forward buffers. There
is no iommu support at the moment, and that will be addressed in future
patches of this series. Since all vhost-vdpa devices use forced IOMMU,
this means that SVQ is not usable at this point of the series on any
device.

For simplicity it only supports modern devices, that expects vring
in little endian, with split ring and no event idx or indirect
descriptors. Support for them will not be added in this series.

It reuses the VirtQueue code for the device part. The driver part is
based on Linux's virtio_ring driver, but with stripped functionality
and optimizations so it's easier to review.

However, forwarding buffers have some particular pieces: One of the most
unexpected ones is that a guest's buffer can expand through more than
one descriptor in SVQ. While this is handled gracefully by qemu's
emulated virtio devices, it may cause unexpected SVQ queue full. This
patch also solves it by checking for this condition at both guest's
kicks and device's calls. The code may be more elegant in the future if
SVQ code runs in its own iocontext.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-shadow-virtqueue.h |   2 +
  hw/virtio/vhost-shadow-virtqueue.c | 365 -
  hw/virtio/vhost-vdpa.c | 111 -
  3 files changed, 462 insertions(+), 16 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index 39aef5ffdf..19c934af49 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -33,6 +33,8 @@ uint16_t vhost_svq_get_num(const VhostShadowVirtqueue *svq);
  size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
  size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
  
+void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,

+ VirtQueue *vq);
  void vhost_svq_stop(VhostShadowVirtqueue *svq);
  
  VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize);

diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 7c168075d7..a1a404f68f 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -9,6 +9,8 @@
  
  #include "qemu/osdep.h"

  #include "hw/virtio/vhost-shadow-virtqueue.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/virtio-access.h"
  #include "standard-headers/linux/vhost_types.h"
  
  #include "qemu/error-report.h"

@@ -36,6 +38,33 @@ typedef struct VhostShadowVirtqueue {
  
  /* Guest's call notifier, where SVQ calls guest. */

  EventNotifier svq_call;
+
+/* Virtio queue shadowing */
+VirtQueue *vq;
+
+/* Virtio device */
+VirtIODevice *vdev;
+
+/* Map for returning guest's descriptors */
+VirtQueueElement **ring_id_maps;
+
+/* Next VirtQueue element that guest made available */
+VirtQueueElement *next_guest_avail_elem;
+
+/* Next head to expose to device */
+uint16_t avail_idx_shadow;
+
+/* Next free descriptor */
+uint16_t free_head;
+
+/* Last seen used idx */
+uint16_t shadow_used_idx;
+
+/* Next head to consume from device */
+uint16_t last_used_idx;
+
+/* Cache for the exposed notification flag */
+bool notification;
  } VhostShadowVirtqueue;
  
  #define INVALID_SVQ_KICK_FD -1

@@ -148,30 +177,294 @@ bool vhost_svq_ack_guest_features(uint64_t dev_features,
  return true;
  }
  
-/* Forward guest notifications */

-static void vhost_handle_guest_kick(EventNotifier *n)
+/**
+ * Number of descriptors that SVQ can make available from the guest.
+ *
+ * @svq   The svq
+ */
+static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
  {
-VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
- svq_kick);
+return svq->vring.num - (svq->avail_idx_shadow - svq->shadow_used_idx);
+}
+
+static void vhost_svq_set_notification(VhostShadowVirtqueue *svq, bool enable)
+{
+uint16_t notification_flag;
  
-if (unlikely(!event_notifier_test_and_clear(n))) {

+if (svq->notification == enable) {
+return;
+}
+
+notification_flag = cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+
+svq->notification = enable;
+if (enable) {
+svq->vring.avail->flags &= ~notification_flag;
+} else {
+svq->vring.avail->flags |= notification_flag;
+}
+}
+
+static void vhost_vring_write_descs(VhostShadowVirtqueue *svq,
+const struct iovec *iovec,
+size_t num, bool more_descs, bool write)
+{
+uint16_t i = svq->free_head, last = svq->free_head;
+unsigned n;
+uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
+vring_desc_t *descs = svq->vring.desc;
+
+if (num == 0) {
+return;
+}
+
+for (n = 0; n < num; n++) {
+if (more_descs || (n + 1 < num)) 

Re: [PATCH 17/31] vdpa: adapt vhost_ops callbacks to svq

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

First half of the buffers forwarding part, preparing vhost-vdpa
callbacks to SVQ to offer it. QEMU cannot enable it at this moment, so
this is effectively dead code at the moment, but it helps to reduce
patch size.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-shadow-virtqueue.h |   2 +-
  hw/virtio/vhost-shadow-virtqueue.c |  21 -
  hw/virtio/vhost-vdpa.c | 133 ++---
  3 files changed, 143 insertions(+), 13 deletions(-)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index 035207a469..39aef5ffdf 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -35,7 +35,7 @@ size_t vhost_svq_device_area_size(const VhostShadowVirtqueue 
*svq);
  
  void vhost_svq_stop(VhostShadowVirtqueue *svq);
  
-VhostShadowVirtqueue *vhost_svq_new(void);

+VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize);
  
  void vhost_svq_free(VhostShadowVirtqueue *vq);
  
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c

index f129ec8395..7c168075d7 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -277,9 +277,17 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
  /**
   * Creates vhost shadow virtqueue, and instruct vhost device to use the shadow
   * methods and file descriptors.
+ *
+ * @qsize Shadow VirtQueue size
+ *
+ * Returns the new virtqueue or NULL.
+ *
+ * In case of error, reason is reported through error_report.
   */
-VhostShadowVirtqueue *vhost_svq_new(void)
+VhostShadowVirtqueue *vhost_svq_new(uint16_t qsize)
  {
+size_t desc_size = sizeof(vring_desc_t) * qsize;
+size_t device_size, driver_size;
  g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
  int r;
  
@@ -300,6 +308,15 @@ VhostShadowVirtqueue *vhost_svq_new(void)

  /* Placeholder descriptor, it should be deleted at set_kick_fd */
  event_notifier_init_fd(>svq_kick, INVALID_SVQ_KICK_FD);
  
+svq->vring.num = qsize;



I wonder if this is the best. E.g some hardware can support up to 32K 
queue size. So this will probably end up with:


1) SVQ use 32K queue size
2) hardware queue uses 256

? Or we SVQ can stick to 256 but this will this cause trouble if we want 
to add event index support?




+driver_size = vhost_svq_driver_area_size(svq);
+device_size = vhost_svq_device_area_size(svq);
+svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
+svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+memset(svq->vring.desc, 0, driver_size);
+svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
+memset(svq->vring.used, 0, device_size);
+
  event_notifier_set_handler(>hdev_call, vhost_svq_handle_call);
  return g_steal_pointer();
  
@@ -318,5 +335,7 @@ void vhost_svq_free(VhostShadowVirtqueue *vq)

  event_notifier_cleanup(>hdev_kick);
  event_notifier_set_handler(>hdev_call, NULL);
  event_notifier_cleanup(>hdev_call);
+qemu_vfree(vq->vring.desc);
+qemu_vfree(vq->vring.used);
  g_free(vq);
  }
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 9d801cf907..53e14bafa0 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -641,20 +641,52 @@ static int vhost_vdpa_set_vring_addr(struct vhost_dev 
*dev,
  return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
  }
  
-static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,

-  struct vhost_vring_state *ring)
+static int vhost_vdpa_set_dev_vring_num(struct vhost_dev *dev,
+struct vhost_vring_state *ring)
  {
  trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
  return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
  }
  
-static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,

-   struct vhost_vring_state *ring)
+static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
+struct vhost_vring_state *ring)
+{
+struct vhost_vdpa *v = dev->opaque;
+
+if (v->shadow_vqs_enabled) {
+/*
+ * Vring num was set at device start. SVQ num is handled by VirtQueue
+ * code
+ */
+return 0;
+}
+
+return vhost_vdpa_set_dev_vring_num(dev, ring);
+}
+
+static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
  {
  trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
  return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
  }
  
+static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,

+ struct vhost_vring_state *ring)
+{
+struct vhost_vdpa *v = dev->opaque;
+
+if (v->shadow_vqs_enabled) {
+/*
+ * Vring base was set at device start. 

Re: [PATCH 16/31] vhost: pass queue index to vhost_vq_get_addr

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Doing that way allows vhost backend to know what address to return.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 7b03efccec..64b955ba0c 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -798,9 +798,10 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
  struct vhost_virtqueue *vq,
  unsigned idx, bool enable_log)
  {
-struct vhost_vring_addr addr;
+struct vhost_vring_addr addr = {
+.index = idx,
+};
  int r;
-memset(, 0, sizeof(struct vhost_vring_addr));
  
  if (dev->vhost_ops->vhost_vq_get_addr) {

  r = dev->vhost_ops->vhost_vq_get_addr(dev, , vq);
@@ -813,7 +814,6 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
  addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
  addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
  }



I'm a bit lost in the logic above, any reason we need call 
vhost_vq_get_addr() :) ?


Thanks



-addr.index = idx;
  addr.log_guest_addr = vq->used_phys;
  addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
  r = dev->vhost_ops->vhost_set_vring_addr(dev, );


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 15/31] vdpa: Add vhost_svq_get_num

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

This reports the guest's visible SVQ effective length, not the device's
one.



I think we need to explain if there could be a case that the SVQ size is 
not equal to the device queue size.


Thanks




Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-shadow-virtqueue.h | 1 +
  hw/virtio/vhost-shadow-virtqueue.c | 5 +
  2 files changed, 6 insertions(+)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index 3521e8094d..035207a469 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -29,6 +29,7 @@ const EventNotifier *vhost_svq_get_svq_call_notifier(
const VhostShadowVirtqueue 
*svq);
  void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
struct vhost_vring_addr *addr);
+uint16_t vhost_svq_get_num(const VhostShadowVirtqueue *svq);
  size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
  size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
  
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c

index 0f2c2403ff..f129ec8395 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -212,6 +212,11 @@ void vhost_svq_get_vring_addr(const VhostShadowVirtqueue 
*svq,
  addr->used_user_addr = (uint64_t)svq->vring.used;
  }
  
+uint16_t vhost_svq_get_num(const VhostShadowVirtqueue *svq)

+{
+return svq->vring.num;
+}
+
  size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
  {
  size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 11/31] vhost: Add vhost_svq_valid_device_features to shadow vq

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

This allows SVQ to negotiate features with the device. For the device,
SVQ is a driver. While this function needs to bypass all non-transport
features, it needs to disable the features that SVQ does not support
when forwarding buffers. This includes packed vq layout, indirect
descriptors or event idx.

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-shadow-virtqueue.h |  2 ++
  hw/virtio/vhost-shadow-virtqueue.c | 44 ++
  hw/virtio/vhost-vdpa.c | 21 ++
  3 files changed, 67 insertions(+)

diff --git a/hw/virtio/vhost-shadow-virtqueue.h 
b/hw/virtio/vhost-shadow-virtqueue.h
index c9ffa11fce..d963867a04 100644
--- a/hw/virtio/vhost-shadow-virtqueue.h
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -15,6 +15,8 @@
  
  typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
  
+bool vhost_svq_valid_device_features(uint64_t *features);

+
  void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
  void vhost_svq_set_guest_call_notifier(VhostShadowVirtqueue *svq, int 
call_fd);
  const EventNotifier *vhost_svq_get_dev_kick_notifier(
diff --git a/hw/virtio/vhost-shadow-virtqueue.c 
b/hw/virtio/vhost-shadow-virtqueue.c
index 9619c8082c..51442b3dbf 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -45,6 +45,50 @@ const EventNotifier *vhost_svq_get_dev_kick_notifier(
  return >hdev_kick;
  }
  
+/**

+ * Validate the transport device features that SVQ can use with the device
+ *
+ * @dev_features  The device features. If success, the acknowledged features.
+ *
+ * Returns true if SVQ can go with a subset of these, false otherwise.
+ */
+bool vhost_svq_valid_device_features(uint64_t *dev_features)
+{
+bool r = true;
+
+for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END;
+ ++b) {
+switch (b) {
+case VIRTIO_F_NOTIFY_ON_EMPTY:
+case VIRTIO_F_ANY_LAYOUT:
+continue;
+
+case VIRTIO_F_ACCESS_PLATFORM:
+/* SVQ does not know how to translate addresses */



I may miss something but any reason that we need to disable 
ACCESS_PLATFORM? I'd expect the vring helper we used for shadow 
virtqueue can deal with vIOMMU perfectly.




+if (*dev_features & BIT_ULL(b)) {
+clear_bit(b, dev_features);
+r = false;
+}
+break;
+
+case VIRTIO_F_VERSION_1:



I had the same question here.

Thanks



+/* SVQ trust that guest vring is little endian */
+if (!(*dev_features & BIT_ULL(b))) {
+set_bit(b, dev_features);
+r = false;
+}
+continue;
+
+default:
+if (*dev_features & BIT_ULL(b)) {
+clear_bit(b, dev_features);
+}
+}
+}
+
+return r;
+}
+
  /* Forward guest notifications */
  static void vhost_handle_guest_kick(EventNotifier *n)
  {
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index bdb45c8808..9d801cf907 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -855,10 +855,31 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, 
struct vhost_vdpa *v,
  size_t n_svqs = v->shadow_vqs_enabled ? hdev->nvqs : 0;
  g_autoptr(GPtrArray) shadow_vqs = g_ptr_array_new_full(n_svqs,
 vhost_psvq_free);
+uint64_t dev_features;
+uint64_t svq_features;
+int r;
+bool ok;
+
  if (!v->shadow_vqs_enabled) {
  goto out;
  }
  
+r = vhost_vdpa_get_features(hdev, _features);

+if (r != 0) {
+error_setg(errp, "Can't get vdpa device features, got (%d)", r);
+return r;
+}
+
+svq_features = dev_features;
+ok = vhost_svq_valid_device_features(_features);
+if (unlikely(!ok)) {
+error_setg(errp,
+"SVQ Invalid device feature flags, offer: 0x%"PRIx64", ok: 
0x%"PRIx64,
+hdev->features, svq_features);
+return -1;
+}
+
+shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_psvq_free);
  for (unsigned n = 0; n < hdev->nvqs; ++n) {
  VhostShadowVirtqueue *svq = vhost_svq_new();
  


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 09/31] vhost-vdpa: Take into account SVQ in vhost_vdpa_set_vring_call

2022-01-29 Thread Jason Wang


在 2022/1/22 上午4:27, Eugenio Pérez 写道:

Signed-off-by: Eugenio Pérez 
---
  hw/virtio/vhost-vdpa.c | 20 ++--
  1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 18de14f0fb..029f98feee 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -687,13 +687,29 @@ static int vhost_vdpa_set_vring_kick(struct vhost_dev 
*dev,
  }
  }
  
-static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,

-   struct vhost_vring_file *file)
+static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
  {
  trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
  return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
  }
  
+static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,

+ struct vhost_vring_file *file)
+{
+struct vhost_vdpa *v = dev->opaque;
+
+if (v->shadow_vqs_enabled) {
+int vdpa_idx = vhost_vdpa_get_vq_index(dev, file->index);
+VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+
+vhost_svq_set_guest_call_notifier(svq, file->fd);



Two questions here (had similar questions for vring kick):

1) Any reason that we setup the eventfd for vhost-vdpa in 
vhost_vdpa_svq_setup() not here?


2) The call could be disabled by using -1 as the fd, I don't see any 
code to deal with that.


Thanks



+return 0;
+} else {
+return vhost_vdpa_set_vring_dev_call(dev, file);
+}
+}
+
  /**
   * Set shadow virtqueue descriptors to the device
   *


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization