>-----Original Message-----
>From: Eric Auger <eric.au...@redhat.com>
>Subject: Re: [RFC v2 4/7] virtio-iommu: Compute host reserved regions
>
>Hi Zhenzhong,
>
>On 6/11/24 05:25, Duan, Zhenzhong wrote:
>>
>>> -----Original Message-----
>>> From: Eric Auger <eric.au...@redhat.com>
>>> Subject: [RFC v2 4/7] virtio-iommu: Compute host reserved regions
>>>
>>> Compute the host reserved regions in virtio_iommu_set_iommu_device().
>>> The usable IOVA regions are retrieved from the HOSTIOMMUDevice.
>>> The virtio_iommu_set_host_iova_ranges() helper turns usable regions
>>> into complementary reserved regions while testing the inclusion
>>> into existing ones. virtio_iommu_set_host_iova_ranges() reuse the
>>> implementation of virtio_iommu_set_iova_ranges() which will be
>>> removed in subsequent patches. rebuild_resv_regions() is just moved.
>>>
>>> Signed-off-by: Eric Auger <eric.au...@redhat.com>
>>> ---
>>> hw/virtio/virtio-iommu.c | 151 ++++++++++++++++++++++++++++++----
>----
>>> -
>>> 1 file changed, 117 insertions(+), 34 deletions(-)
>>>
>>> diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
>>> index 0680a357f0..33e9682b83 100644
>>> --- a/hw/virtio/virtio-iommu.c
>>> +++ b/hw/virtio/virtio-iommu.c
>>> @@ -494,12 +494,114 @@ get_host_iommu_device(VirtIOIOMMU
>>> *viommu, PCIBus *bus, int devfn) {
>>>     return g_hash_table_lookup(viommu->host_iommu_devices, &key);
>>> }
>>>
>>> +/**
>>> + * rebuild_resv_regions: rebuild resv regions with both the
>>> + * info of host resv ranges and property set resv ranges
>>> + */
>>> +static int rebuild_resv_regions(IOMMUDevice *sdev)
>>> +{
>>> +    GList *l;
>>> +    int i = 0;
>>> +
>>> +    /* free the existing list and rebuild it from scratch */
>>> +    g_list_free_full(sdev->resv_regions, g_free);
>>> +    sdev->resv_regions = NULL;
>>> +
>>> +    /* First add host reserved regions if any, all tagged as RESERVED */
>>> +    for (l = sdev->host_resv_ranges; l; l = l->next) {
>>> +        ReservedRegion *reg = g_new0(ReservedRegion, 1);
>>> +        Range *r = (Range *)l->data;
>>> +
>>> +        reg->type = VIRTIO_IOMMU_RESV_MEM_T_RESERVED;
>>> +        range_set_bounds(&reg->range, range_lob(r), range_upb(r));
>>> +        sdev->resv_regions = resv_region_list_insert(sdev->resv_regions,
>reg);
>>> +        trace_virtio_iommu_host_resv_regions(sdev-
>>>> iommu_mr.parent_obj.name, i,
>>> +                                             range_lob(&reg->range),
>>> +                                             range_upb(&reg->range));
>>> +        i++;
>>> +    }
>>> +    /*
>>> +     * then add higher priority reserved regions set by the machine
>>> +     * through properties
>>> +     */
>>> +    add_prop_resv_regions(sdev);
>>> +    return 0;
>>> +}
>>> +
>>> +static int virtio_iommu_set_host_iova_ranges(VirtIOIOMMU *s, PCIBus
>>> *bus,
>>> +                                             int devfn, GList *iova_ranges,
>>> +                                             Error **errp)
>>> +{
>>> +    IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
>>> +    IOMMUDevice *sdev;
>>> +    GList *current_ranges;
>>> +    GList *l, *tmp, *new_ranges = NULL;
>>> +    int ret = -EINVAL;
>>> +
>>> +    if (!sbus) {
>>> +        error_report("%s no sbus", __func__);
>>> +    }
>>> +
>>> +    sdev = sbus->pbdev[devfn];
>>> +
>>> +    current_ranges = sdev->host_resv_ranges;
>>> +
>>> +    if (sdev->probe_done) {
>> Will this still happen with new interface?
>no this shouldn't. Replaced by a g_assert(!sdev->probe_done) to make
>sure the i/f is used properly.
>>
>>> +        error_setg(errp,
>>> +                   "%s: Notified about new host reserved regions after 
>>> probe",
>>> +                   __func__);
>>> +        goto out;
>>> +    }
>>> +
>>> +    /* check that each new resv region is included in an existing one */
>>> +    if (sdev->host_resv_ranges) {
>> Same here.
>To me this one can still happen in case several devices belong to the
>same group.

If same slot is used to plug/unplug vfio devices with different reserved
ranges, the second device plug may check failed.
It looks sdev->host_resv_ranges is not freed after unplug.

>>
>>> +        range_inverse_array(iova_ranges,
>>> +                            &new_ranges,
>>> +                            0, UINT64_MAX);
>>> +
>>> +        for (tmp = new_ranges; tmp; tmp = tmp->next) {
>>> +            Range *newr = (Range *)tmp->data;
>>> +            bool included = false;
>>> +
>>> +            for (l = current_ranges; l; l = l->next) {
>>> +                Range * r = (Range *)l->data;
>>> +
>>> +                if (range_contains_range(r, newr)) {
>>> +                    included = true;
>>> +                    break;
>>> +                }
>>> +            }
>>> +            if (!included) {
>>> +                goto error;
>>> +            }
>>> +        }
>>> +        /* all new reserved ranges are included in existing ones */
>>> +        ret = 0;
>>> +        goto out;
>>> +    }
>>> +
>>> +    range_inverse_array(iova_ranges,
>>> +                        &sdev->host_resv_ranges,
>>> +                        0, UINT64_MAX);
>>> +    rebuild_resv_regions(sdev);
>>> +
>>> +    return 0;
>>> +error:
>>> +    error_setg(errp, "%s Conflicting host reserved ranges set!",
>>> +               __func__);
>>> +out:
>>> +    g_list_free_full(new_ranges, g_free);
>>> +    return ret;
>>> +}
>>> +
>>> static bool virtio_iommu_set_iommu_device(PCIBus *bus, void *opaque,
>>> int devfn,
>>>                                           HostIOMMUDevice *hiod, Error 
>>> **errp)
>>> {
>>>     VirtIOIOMMU *viommu = opaque;
>>>     VirtioHostIOMMUDevice *vhiod;
>>> +    HostIOMMUDeviceClass *hiodc =
>>> HOST_IOMMU_DEVICE_GET_CLASS(hiod);
>>>     struct hiod_key *new_key;
>>> +    GList *host_iova_ranges = NULL;
>> g_autoptr(GList)?
>are you sure this frees all the elements of the list?  

Not quite sure, just see some code in qemu use it that way.

> As of now I would
>be tempted to leave the code as is.

Sure, that's fine.

Thanks
Zhenzhong

Reply via email to