>-----Original Message-----
>From: Eric Auger <eric.au...@redhat.com>
>Subject: [PATCH 3/6] virtio-iommu: Free [host_]resv_ranges on
>unset_iommu_devices
>
>We are currently missing the deallocation of the [host_]resv_regions
>in case of hot unplug. Also to make things more simple let's rule
>out the case where multiple HostIOMMUDevices would be aliased and
>attached to the same IOMMUDevice. This allows to remove the handling
>of conflicting Host reserved regions. Anyway this is not properly
>supported at guest kernel level. On hotunplug the reserved regions
>are reset to the ones set by virtio-iommu property.
>
>Signed-off-by: Eric Auger <eric.au...@redhat.com>
>---
> hw/virtio/virtio-iommu.c | 62 ++++++++++++++++++----------------------
> 1 file changed, 28 insertions(+), 34 deletions(-)
>
>diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
>index 2c54c0d976..2de41ab412 100644
>--- a/hw/virtio/virtio-iommu.c
>+++ b/hw/virtio/virtio-iommu.c
>@@ -538,8 +538,6 @@ static int
>virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
> {
>     IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
>     IOMMUDevice *sdev;
>-    GList *current_ranges;
>-    GList *l, *tmp, *new_ranges = NULL;
>     int ret = -EINVAL;
>
>     if (!sbus) {
>@@ -553,33 +551,10 @@ static int
>virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
>         return ret;
>     }
>
>-    current_ranges = sdev->host_resv_ranges;
>-
>-    /* check that each new resv region is included in an existing one */
>     if (sdev->host_resv_ranges) {
>-        range_inverse_array(iova_ranges,
>-                            &new_ranges,
>-                            0, UINT64_MAX);
>-
>-        for (tmp = new_ranges; tmp; tmp = tmp->next) {
>-            Range *newr = (Range *)tmp->data;
>-            bool included = false;
>-
>-            for (l = current_ranges; l; l = l->next) {
>-                Range * r = (Range *)l->data;
>-
>-                if (range_contains_range(r, newr)) {
>-                    included = true;
>-                    break;
>-                }
>-            }
>-            if (!included) {
>-                goto error;
>-            }
>-        }
>-        /* all new reserved ranges are included in existing ones */
>-        ret = 0;
>-        goto out;
>+        error_setg(errp, "%s virtio-iommu does not support aliased BDF",
>+                   __func__);
>+        return ret;
>     }
>
>     range_inverse_array(iova_ranges,
>@@ -588,14 +563,31 @@ static int
>virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus *bus,
>     rebuild_resv_regions(sdev);
>
>     return 0;
>-error:
>-    error_setg(errp, "%s Conflicting host reserved ranges set!",
>-               __func__);
>-out:
>-    g_list_free_full(new_ranges, g_free);
>-    return ret;
> }
>
>+static void virtio_iommu_unset_host_iova_ranges(VirtIOIOMMU *s,
>PCIBus *bus,
>+                                                int devfn)
>+{
>+    IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
>+    IOMMUDevice *sdev;
>+
>+    if (!sbus) {
>+        return;
>+    }
>+
>+    sdev = sbus->pbdev[devfn];
>+    if (!sdev) {
>+        return;
>+    }
>+
>+    g_list_free_full(g_steal_pointer(&sdev->host_resv_ranges), g_free);
>+    g_list_free_full(sdev->resv_regions, g_free);
>+    sdev->host_resv_ranges = NULL;
>+    sdev->resv_regions = NULL;
>+    add_prop_resv_regions(sdev);

Is this necessary? rebuild_resv_regions() will do that again.

Other than that, for the whole series,

Reviewed-by: Zhenzhong Duan <zhenzhong.d...@intel.com>

Thanks
Zhenzhong

>+}
>+
>+
> static bool check_page_size_mask(VirtIOIOMMU *viommu, uint64_t
>new_mask,
>                                  Error **errp)
> {
>@@ -704,6 +696,8 @@ virtio_iommu_unset_iommu_device(PCIBus *bus,
>void *opaque, int devfn)
>     if (!hiod) {
>         return;
>     }
>+    virtio_iommu_unset_host_iova_ranges(viommu, hiod->aliased_bus,
>+                                        hiod->aliased_devfn);
>
>     g_hash_table_remove(viommu->host_iommu_devices, &key);
> }
>--
>2.41.0


Reply via email to