Yi,

On 7/4/20 1:26 PM, Liu Yi L wrote:
> This patch exports iommu nesting capability info to user space through
> VFIO. User space is expected to check this info for supported uAPIs (e.g.
> PASID alloc/free, bind page table, and cache invalidation) and the vendor
> specific format information for first level/stage page table that will be
> bound to.
> 
> The nesting info is available only after the nesting iommu type is set
> for a container. Current implementation imposes one limitation - one
> nesting container should include at most one group. The philosophy of
> vfio container is having all groups/devices within the container share
> the same IOMMU context. When vSVA is enabled, one IOMMU context could
> include one 2nd-level address space and multiple 1st-level address spaces.
> While the 2nd-leve address space is reasonably sharable by multiple groups
level
> , blindly sharing 1st-level address spaces across all groups within the
> container might instead break the guest expectation. In the future sub/
> super container concept might be introduced to allow partial address space
> sharing within an IOMMU context. But for now let's go with this restriction
> by requiring singleton container for using nesting iommu features. Below
> link has the related discussion about this decision.
> 
> https://lkml.org/lkml/2020/5/15/1028
> 
> Cc: Kevin Tian <kevin.t...@intel.com>
> CC: Jacob Pan <jacob.jun....@linux.intel.com>
> Cc: Alex Williamson <alex.william...@redhat.com>
> Cc: Eric Auger <eric.au...@redhat.com>
> Cc: Jean-Philippe Brucker <jean-phili...@linaro.org>
> Cc: Joerg Roedel <j...@8bytes.org>
> Cc: Lu Baolu <baolu...@linux.intel.com>
> Signed-off-by: Liu Yi L <yi.l....@intel.com>
> ---
> v3 -> v4:
> *) address comments against v3.
> 
> v1 -> v2:
> *) added in v2
> ---
> 
>  drivers/vfio/vfio_iommu_type1.c | 105 
> +++++++++++++++++++++++++++++++++++-----
>  include/uapi/linux/vfio.h       |  16 ++++++
>  2 files changed, 109 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 7accb59..80623b8 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -62,18 +62,20 @@ MODULE_PARM_DESC(dma_entry_limit,
>                "Maximum number of user DMA mappings per container (65535).");
>  
>  struct vfio_iommu {
> -     struct list_head        domain_list;
> -     struct list_head        iova_list;
> -     struct vfio_domain      *external_domain; /* domain for external user */
> -     struct mutex            lock;
> -     struct rb_root          dma_list;
> -     struct blocking_notifier_head notifier;
> -     unsigned int            dma_avail;
> -     uint64_t                pgsize_bitmap;
> -     bool                    v2;
> -     bool                    nesting;
> -     bool                    dirty_page_tracking;
> -     bool                    pinned_page_dirty_scope;
> +     struct list_head                domain_list;
> +     struct list_head                iova_list;
> +     struct vfio_domain              *external_domain; /* domain for
> +                                                          external user */
nit: put the comment before the field?
> +     struct mutex                    lock;
> +     struct rb_root                  dma_list;
> +     struct blocking_notifier_head   notifier;
> +     unsigned int                    dma_avail;
> +     uint64_t                        pgsize_bitmap;
> +     bool                            v2;
> +     bool                            nesting;
> +     bool                            dirty_page_tracking;
> +     bool                            pinned_page_dirty_scope;
> +     struct iommu_nesting_info       *nesting_info;
>  };
>  
>  struct vfio_domain {
> @@ -130,6 +132,9 @@ struct vfio_regions {
>  #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)      \
>                                       (!list_empty(&iommu->domain_list))
>  
> +#define IS_DOMAIN_IN_CONTAINER(iommu)        ((iommu->external_domain) || \
> +                                      (!list_empty(&iommu->domain_list)))
rename into something like CONTAINER_HAS_DOMAIN()?
> +
>  #define DIRTY_BITMAP_BYTES(n)        (ALIGN(n, BITS_PER_TYPE(u64)) / 
> BITS_PER_BYTE)
>  
>  /*
> @@ -1929,6 +1934,13 @@ static void vfio_iommu_iova_insert_copy(struct 
> vfio_iommu *iommu,
>  
>       list_splice_tail(iova_copy, iova);
>  }
> +
> +static void vfio_iommu_release_nesting_info(struct vfio_iommu *iommu)
> +{
> +     kfree(iommu->nesting_info);
> +     iommu->nesting_info = NULL;
> +}
> +
>  static int vfio_iommu_type1_attach_group(void *iommu_data,
>                                        struct iommu_group *iommu_group)
>  {
> @@ -1959,6 +1971,12 @@ static int vfio_iommu_type1_attach_group(void 
> *iommu_data,
>               }
>       }
>  
> +     /* Nesting type container can include only one group */
> +     if (iommu->nesting && IS_DOMAIN_IN_CONTAINER(iommu)) {
> +             mutex_unlock(&iommu->lock);
> +             return -EINVAL;
> +     }
> +
>       group = kzalloc(sizeof(*group), GFP_KERNEL);
>       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
>       if (!group || !domain) {
> @@ -2029,6 +2047,36 @@ static int vfio_iommu_type1_attach_group(void 
> *iommu_data,
>       if (ret)
>               goto out_domain;
>  
> +     /* Nesting cap info is available only after attaching */
> +     if (iommu->nesting) {
> +             struct iommu_nesting_info tmp;
> +             struct iommu_nesting_info *info;
> +
> +             /* First get the size of vendor specific nesting info */
> +             ret = iommu_domain_get_attr(domain->domain,
> +                                         DOMAIN_ATTR_NESTING,
> +                                         &tmp);
> +             if (ret)
> +                     goto out_detach;
> +
> +             info = kzalloc(tmp.size, GFP_KERNEL);
nit: you may directly use iommu->nesting_info
> +             if (!info) {
> +                     ret = -ENOMEM;
> +                     goto out_detach;
> +             }
> +
> +             /* Now get the nesting info */
> +             info->size = tmp.size;
> +             ret = iommu_domain_get_attr(domain->domain,
> +                                         DOMAIN_ATTR_NESTING,
> +                                         info);
> +             if (ret) {
> +                     kfree(info);
... and set it back to NULL here if it fails
> +                     goto out_detach;
> +             }
> +             iommu->nesting_info = info;
> +     }
> +
>       /* Get aperture info */
>       iommu_domain_get_attr(domain->domain, DOMAIN_ATTR_GEOMETRY, &geo);
>  
> @@ -2138,6 +2186,7 @@ static int vfio_iommu_type1_attach_group(void 
> *iommu_data,
>       return 0;
>  
>  out_detach:
> +     vfio_iommu_release_nesting_info(iommu);
>       vfio_iommu_detach_group(domain, group);
>  out_domain:
>       iommu_domain_free(domain->domain);
> @@ -2338,6 +2387,8 @@ static void vfio_iommu_type1_detach_group(void 
> *iommu_data,
>                                       vfio_iommu_unmap_unpin_all(iommu);
>                               else
>                                       vfio_iommu_unmap_unpin_reaccount(iommu);
> +
> +                             vfio_iommu_release_nesting_info(iommu);
>                       }
>                       iommu_domain_free(domain->domain);
>                       list_del(&domain->next);
> @@ -2546,6 +2597,30 @@ static int vfio_iommu_migration_build_caps(struct 
> vfio_iommu *iommu,
>       return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
>  }
>  
> +static int vfio_iommu_info_add_nesting_cap(struct vfio_iommu *iommu,
> +                                        struct vfio_info_cap *caps)
> +{
> +     struct vfio_info_cap_header *header;
> +     struct vfio_iommu_type1_info_cap_nesting *nesting_cap;
> +     size_t size;
> +
> +     size = sizeof(*nesting_cap) + iommu->nesting_info->size;
> +
> +     header = vfio_info_cap_add(caps, size,
> +                                VFIO_IOMMU_TYPE1_INFO_CAP_NESTING, 1);
> +     if (IS_ERR(header))
> +             return PTR_ERR(header);
> +
> +     nesting_cap = container_of(header,
> +                                struct vfio_iommu_type1_info_cap_nesting,
> +                                header);
> +
> +     memcpy(&nesting_cap->info, iommu->nesting_info,
> +            iommu->nesting_info->size);
> +
> +     return 0;
> +}
> +
>  static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
>                                    unsigned long arg)
>  {
> @@ -2586,6 +2661,12 @@ static int vfio_iommu_type1_get_info(struct vfio_iommu 
> *iommu,
>       if (ret)
>               return ret;
>  
> +     if (iommu->nesting_info) {
> +             ret = vfio_iommu_info_add_nesting_cap(iommu, &caps);
> +             if (ret)
> +                     return ret;
> +     }
> +
>       if (caps.size) {
>               info.flags |= VFIO_IOMMU_INFO_CAPS;
>  
> diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
> index 9204705..3e3de9c 100644
> --- a/include/uapi/linux/vfio.h
> +++ b/include/uapi/linux/vfio.h
> @@ -1039,6 +1039,22 @@ struct vfio_iommu_type1_info_cap_migration {
>       __u64   max_dirty_bitmap_size;          /* in bytes */
>  };
>  
> +#define VFIO_IOMMU_TYPE1_INFO_CAP_NESTING  3

You may improve the documentation by taking examples from the above caps.
> +
> +/*
> + * Reporting nesting info to user space.
> + *
> + * @info:    the nesting info provided by IOMMU driver. Today
> + *           it is expected to be a struct iommu_nesting_info
> + *           data.
Is it expected to change?
> + */
> +struct vfio_iommu_type1_info_cap_nesting {
> +     struct  vfio_info_cap_header header;
> +     __u32   flags;
You may document flags.
> +     __u32   padding;
> +     __u8    info[];
> +};
> +
>  #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
>  
>  /**
> 
Thanks

Eric

Reply via email to