Hi Cédric,

>-----Original Message-----
>From: Cédric Le Goater <c...@redhat.com>
>Sent: Friday, October 27, 2023 11:52 PM
>Subject: Re: [PATCH v3 14/37] vfio/container: Move vrdl_list, pgsizes and
>dma_max_mappings to base container
>
>On 10/26/23 12:30, Zhenzhong Duan wrote:
>> From: Eric Auger <eric.au...@redhat.com>
>>
...

>>   void vfio_container_destroy(VFIOContainerBase *bcontainer)
>>   {
>> +    VFIORamDiscardListener *vrdl, *vrdl_tmp;
>>       VFIOGuestIOMMU *giommu, *tmp;
>>
>>       QLIST_REMOVE(bcontainer, next);
>>
>> +    QLIST_FOREACH_SAFE(vrdl, &bcontainer->vrdl_list, next, vrdl_tmp) {
>> +        RamDiscardManager *rdm;
>> +
>> +        rdm = memory_region_get_ram_discard_manager(vrdl->mr);
>> +        ram_discard_manager_unregister_listener(rdm, &vrdl->listener);
>> +        QLIST_REMOVE(vrdl, next);
>> +        g_free(vrdl);
>> +    }
>
>Where was this done previously ?

Good catch! This should be removed.

>May be the vrdl list should be handled
>separatly from pgsizes and dma_max_mappings.

Good suggestion! Will do.

>
>>       QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next,
>tmp) {
>>           memory_region_unregister_iommu_notifier(
>>                   MEMORY_REGION(giommu->iommu_mr), &giommu->n);
>> diff --git a/hw/vfio/container.c b/hw/vfio/container.c
>> index 8d5b408e86..0e265ffa67 100644
>> --- a/hw/vfio/container.c
>> +++ b/hw/vfio/container.c
>> @@ -154,7 +154,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase
>*bcontainer, hwaddr iova,
>>           if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
>>               container->iommu_type == VFIO_TYPE1v2_IOMMU) {
>>               trace_vfio_legacy_dma_unmap_overflow_workaround();
>> -            unmap.size -= 1ULL << ctz64(container->pgsizes);
>> +            unmap.size -= 1ULL << ctz64(container->bcontainer.pgsizes);
>>               continue;
>>           }
>>           error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
>> @@ -559,9 +559,7 @@ static int vfio_connect_container(VFIOGroup *group,
>AddressSpace *as,
>>       container = g_malloc0(sizeof(*container));
>>       container->fd = fd;
>>       container->error = NULL;
>> -    container->dma_max_mappings = 0;
>>       container->iova_ranges = NULL;
>> -    QLIST_INIT(&container->vrdl_list);
>>       bcontainer = &container->bcontainer;
>>       vfio_container_init(bcontainer, space, &vfio_legacy_ops);
>>
>> @@ -589,13 +587,13 @@ static int vfio_connect_container(VFIOGroup *group,
>AddressSpace *as,
>>           }
>>
>>           if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
>> -            container->pgsizes = info->iova_pgsizes;
>> +            container->bcontainer.pgsizes = info->iova_pgsizes;
>>           } else {
>> -            container->pgsizes = qemu_real_host_page_size();
>> +            container->bcontainer.pgsizes = qemu_real_host_page_size();
>>           }
>>
>> -        if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
>> -            container->dma_max_mappings = 65535;
>> +        if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) {
>> +            container->bcontainer.dma_max_mappings = 65535;
>>           }
>>
>>           vfio_get_info_iova_range(info, container);
>> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
>> index 3495737ab2..dbc4c24052 100644
>> --- a/hw/vfio/spapr.c
>> +++ b/hw/vfio/spapr.c
>> @@ -223,13 +223,13 @@ static int vfio_spapr_create_window(VFIOContainer
>*container,
>>       if (pagesize > rampagesize) {
>>           pagesize = rampagesize;
>>       }
>> -    pgmask = container->pgsizes & (pagesize | (pagesize - 1));
>> +    pgmask = container->bcontainer.pgsizes & (pagesize | (pagesize - 1));
>>       pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
>>       if (!pagesize) {
>>           error_report("Host doesn't support page size 0x%"PRIx64
>>                        ", the supported mask is 0x%lx",
>>                        memory_region_iommu_get_min_page_size(iommu_mr),
>> -                     container->pgsizes);
>> +                     container->bcontainer.pgsizes);
>>           return -EINVAL;
>>       }
>>
>> @@ -385,7 +385,7 @@ void
>vfio_container_del_section_window(VFIOContainer *container,
>>
>>   bool vfio_spapr_container_init(VFIOContainer *container, Error **errp)
>>   {
>> -
>> +    VFIOContainerBase *bcontainer = &container->bcontainer;
>>       struct vfio_iommu_spapr_tce_info info;
>>       bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
>>       int ret, fd = container->fd;
>> @@ -424,7 +424,7 @@ bool vfio_spapr_container_init(VFIOContainer
>*container, Error **errp)
>>       }
>>
>>       if (v2) {
>> -        container->pgsizes = info.ddw.pgsizes;
>> +        bcontainer->pgsizes = info.ddw.pgsizes;
>>           /*
>>            * There is a default window in just created container.
>>            * To make region_add/del simpler, we better remove this
>> @@ -439,7 +439,7 @@ bool vfio_spapr_container_init(VFIOContainer
>*container, Error **errp)
>>           }
>>       } else {
>>           /* The default table uses 4K pages */
>> -        container->pgsizes = 0x1000;
>> +        bcontainer->pgsizes = 0x1000;
>>           vfio_host_win_add(container, info.dma32_window_start,
>>                             info.dma32_window_start +
>>                             info.dma32_window_size - 1,
>> @@ -455,7 +455,15 @@ listener_unregister_exit:
>>
>>   void vfio_spapr_container_deinit(VFIOContainer *container)
>>   {
>> +    VFIOHostDMAWindow *hostwin, *next;
>> +
>>       if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
>>           memory_listener_unregister(&container->prereg_listener);
>>       }
>> +    QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
>> +                       next) {
>> +        QLIST_REMOVE(hostwin, hostwin_next);
>> +        g_free(hostwin);
>> +    }
>> +
>>   }
>
>I am sure this change  belongs to this patch.

Good catch! Yes, I should move it into below patch.
"vfio/common: Move vfio_host_win_add/del into spapr.c"

Thanks
Zhenzhong

Reply via email to