On 10/26/23 12:30, Zhenzhong Duan wrote:
From: Eric Auger <eric.au...@redhat.com>

dirty_pages_supported field is also moved to the base container

No fucntional change intended.

Signed-off-by: Eric Auger <eric.au...@redhat.com>
Signed-off-by: Yi Liu <yi.l....@intel.com>
Signed-off-by: Yi Sun <yi.y....@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.d...@intel.com>
Signed-off-by: Cédric Le Goater <c...@redhat.com>
---
  include/hw/vfio/vfio-common.h         |  6 ------
  include/hw/vfio/vfio-container-base.h |  6 ++++++
  hw/vfio/common.c                      | 12 ++++++++----
  hw/vfio/container-base.c              | 23 +++++++++++++++++++++++
  hw/vfio/container.c                   | 21 ++++++++++++++-------
  5 files changed, 51 insertions(+), 17 deletions(-)

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 857d2b8076..d053c61872 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -83,7 +83,6 @@ typedef struct VFIOContainer {
      unsigned iommu_type;
      Error *error;
      bool initialized;
-    bool dirty_pages_supported;
      uint64_t dirty_pgsizes;
      uint64_t max_dirty_bitmap_size;
      unsigned long pgsizes;
@@ -190,11 +189,6 @@ VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
  void vfio_put_address_space(VFIOAddressSpace *space);
  bool vfio_devices_all_running_and_saving(VFIOContainer *container);
-/* container->fd */
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start);
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
-                            hwaddr iova, hwaddr size);
-
  /* SPAPR specific */
  int vfio_container_add_section_window(VFIOContainer *container,
                                        MemoryRegionSection *section,
diff --git a/include/hw/vfio/vfio-container-base.h 
b/include/hw/vfio/vfio-container-base.h
index a5fef3e6a8..ea8436a064 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -50,6 +50,7 @@ typedef struct VFIOAddressSpace {
  typedef struct VFIOContainerBase {
      const VFIOIOMMUOps *ops;
      VFIOAddressSpace *space;
+    bool dirty_pages_supported;
      QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
      QLIST_ENTRY(VFIOContainerBase) next;
  } VFIOContainerBase;
@@ -68,6 +69,11 @@ int vfio_container_dma_map(VFIOContainerBase *bcontainer,
  int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
                               hwaddr iova, ram_addr_t size,
                               IOMMUTLBEntry *iotlb);
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+                                           bool start);
+int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+                                      VFIOBitmap *vbmap,
+                                      hwaddr iova, hwaddr size);
void vfio_container_init(VFIOContainerBase *bcontainer,
                           VFIOAddressSpace *space,
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index f87a0dcec3..7d9b87fc67 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1081,7 +1081,8 @@ static void vfio_listener_log_global_start(MemoryListener 
*listener)
      if (vfio_devices_all_device_dirty_tracking(container)) {
          ret = vfio_devices_dma_logging_start(container);
      } else {
-        ret = vfio_set_dirty_page_tracking(container, true);
+        ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
+                                                     true);
      }
if (ret) {
@@ -1099,7 +1100,8 @@ static void vfio_listener_log_global_stop(MemoryListener 
*listener)
      if (vfio_devices_all_device_dirty_tracking(container)) {
          vfio_devices_dma_logging_stop(container);
      } else {
-        ret = vfio_set_dirty_page_tracking(container, false);
+        ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
+                                                     false);
      }
if (ret) {
@@ -1167,7 +1169,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, 
uint64_t iova,
      VFIOBitmap vbmap;
      int ret;
- if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
+    if (!container->bcontainer.dirty_pages_supported &&
+        !all_device_dirty_tracking) {
          cpu_physical_memory_set_dirty_range(ram_addr, size,
                                              tcg_enabled() ? DIRTY_CLIENTS_ALL 
:
                                              DIRTY_CLIENTS_NOCODE);
@@ -1182,7 +1185,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, 
uint64_t iova,
      if (all_device_dirty_tracking) {
          ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
      } else {
-        ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+        ret = vfio_container_query_dirty_bitmap(&container->bcontainer, &vbmap,
+                                                iova, size);
      }
if (ret) {
diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c
index 99b2957d7b..a7cf517dd2 100644
--- a/hw/vfio/container-base.c
+++ b/hw/vfio/container-base.c
@@ -48,11 +48,34 @@ int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
      return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
  }
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+                                           bool start)
+{
+    /* Fallback to all pages dirty if dirty page sync isn't supported */
+    if (!bcontainer->ops->set_dirty_page_tracking) {
+        return 0;
+    }


I would start with an assert and relax the check later on, if needed and
in its own patch.

+    return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
+}
+
+int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+                                      VFIOBitmap *vbmap,
+                                      hwaddr iova, hwaddr size)
+{
+    if (!bcontainer->ops->query_dirty_bitmap) {
+        return -EINVAL;
+    }

Same comment.

Thanks,

C.

+    return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
+}
+
  void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace 
*space,
                           const VFIOIOMMUOps *ops)
  {
      bcontainer->ops = ops;
      bcontainer->space = space;
+    bcontainer->dirty_pages_supported = false;
      QLIST_INIT(&bcontainer->giommu_list);
  }
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index 761310fa51..6f02ca133e 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -131,7 +131,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase 
*bcontainer, hwaddr iova,
if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
          if (!vfio_devices_all_device_dirty_tracking(container) &&
-            container->dirty_pages_supported) {
+            container->bcontainer.dirty_pages_supported) {
              return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
          }
@@ -205,14 +205,17 @@ static int vfio_legacy_dma_map(VFIOContainerBase *bcontainer, hwaddr iova,
      return -errno;
  }
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
+static int vfio_legacy_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+                                               bool start)
  {
+    VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+                                            bcontainer);
      int ret;
      struct vfio_iommu_type1_dirty_bitmap dirty = {
          .argsz = sizeof(dirty),
      };
- if (!container->dirty_pages_supported) {
+    if (!bcontainer->dirty_pages_supported) {
          return 0;
      }
@@ -232,9 +235,12 @@ int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
      return ret;
  }
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
-                            hwaddr iova, hwaddr size)
+static int vfio_legacy_query_dirty_bitmap(VFIOContainerBase *bcontainer,
+                                          VFIOBitmap *vbmap,
+                                          hwaddr iova, hwaddr size)
  {
+    VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+                                            bcontainer);
      struct vfio_iommu_type1_dirty_bitmap *dbitmap;
      struct vfio_iommu_type1_dirty_bitmap_get *range;
      int ret;
@@ -461,7 +467,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer 
*container,
       * qemu_real_host_page_size to mark those dirty.
       */
      if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
-        container->dirty_pages_supported = true;
+        container->bcontainer.dirty_pages_supported = true;
          container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
          container->dirty_pgsizes = cap_mig->pgsize_bitmap;
      }
@@ -553,7 +559,6 @@ static int vfio_connect_container(VFIOGroup *group, 
AddressSpace *as,
      container = g_malloc0(sizeof(*container));
      container->fd = fd;
      container->error = NULL;
-    container->dirty_pages_supported = false;
      container->dma_max_mappings = 0;
      container->iova_ranges = NULL;
      QLIST_INIT(&container->vrdl_list);
@@ -938,4 +943,6 @@ void vfio_detach_device(VFIODevice *vbasedev)
  const VFIOIOMMUOps vfio_legacy_ops = {
      .dma_map = vfio_legacy_dma_map,
      .dma_unmap = vfio_legacy_dma_unmap,
+    .set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
+    .query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
  };


Reply via email to