> From: Jason Gunthorpe <j...@nvidia.com>
> Sent: Thursday, May 5, 2022 3:09 AM
> 
> Once the group enters 'owned' mode it can never be assigned back to the
> default_domain or to a NULL domain. It must always be actively assigned to

worth pointing out that a NULL domain is not always translated to DMA
blocked on all platforms. That was a wrong assumption before this patch.

> a current domain. If the caller hasn't provided a domain then the core
> must provide an explicit DMA blocking domain that has no DMA map.
> 
> Lazily create a group-global blocking DMA domain when
> iommu_group_claim_dma_owner is first called and immediately assign the
> group to it. This ensures that DMA is immediately fully isolated on all
> IOMMU drivers.
> 
> If the user attaches/detaches while owned then detach will set the group
> back to the blocking domain.
> 
> Slightly reorganize the call chains so that
> __iommu_group_attach_core_domain() is the function that removes any
> caller
> configured domain and sets the domains back a core owned domain with an
> appropriate lifetime.
> 
> __iommu_group_attach_domain() is the worker function that can change the
> domain assigned to a group to any target domain, including NULL.
> 
> Add comments clarifying how the NULL vs detach_dev vs default_domain
> works
> based on Robin's remarks.
> 
> This fixes an oops with VFIO and SMMUv3 because VFIO will call
> iommu_detach_group() and then immediately iommu_domain_free(), but
> SMMUv3 has no way to know that the domain it is holding a pointer to
> has been freed. Now the iommu_detach_group() will assign the blocking
> domain and SMMUv3 will no longer hold a stale domain reference.

Overall I like what this patch does. Just some nits below.

> 
> Fixes: 1ea2a07a532b ("iommu: Add DMA ownership management
> interfaces")
> Reported-by: Qian Cai <quic_qian...@quicinc.com>
> Tested-by: Baolu Lu <baolu...@linux.intel.com>
> Tested-by: Nicolin Chen <nicol...@nvidia.com>
> Co-developed-by: Robin Murphy <robin.mur...@arm.com>
> Signed-off-by: Robin Murphy <robin.mur...@arm.com>
> Signed-off-by: Jason Gunthorpe <j...@nvidia.com>
> ---
>  drivers/iommu/iommu.c | 122 ++++++++++++++++++++++++++++++------------
>  1 file changed, 87 insertions(+), 35 deletions(-)
> 
> Joerg, this should address the issue, Nicolin reproduced the original issue
> and verified this fix on ARM SMMUv3.
> 
> v2:
>  - Remove redundant detach_dev ops check in __iommu_detach_device and
>    make the added WARN_ON fail instead
>  - Check for blocking_domain in __iommu_attach_group() so VFIO can
>    actually attach a new group
>  - Update comments and spelling
>  - Fix missed change to new_domain in iommu_group_do_detach_device()
> v1: https://lore.kernel.org/r/0-v1-6e9d2d0a759d+11b-
> iommu_dma_block_...@nvidia.com
> 
> Thanks,
> Jason
> 
> diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
> index 0c42ece2585406..c1bdec807ea381 100644
> --- a/drivers/iommu/iommu.c
> +++ b/drivers/iommu/iommu.c
> @@ -44,6 +44,7 @@ struct iommu_group {
>       char *name;
>       int id;
>       struct iommu_domain *default_domain;
> +     struct iommu_domain *blocking_domain;
>       struct iommu_domain *domain;
>       struct list_head entry;
>       unsigned int owner_cnt;
> @@ -82,8 +83,7 @@ static int __iommu_attach_device(struct
> iommu_domain *domain,
>                                struct device *dev);
>  static int __iommu_attach_group(struct iommu_domain *domain,
>                               struct iommu_group *group);
> -static void __iommu_detach_group(struct iommu_domain *domain,
> -                              struct iommu_group *group);
> +static void __iommu_group_attach_core_domain(struct iommu_group
> *group);
>  static int iommu_create_device_direct_mappings(struct iommu_group
> *group,
>                                              struct device *dev);
>  static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
> @@ -596,6 +596,8 @@ static void iommu_group_release(struct kobject
> *kobj)
> 
>       if (group->default_domain)
>               iommu_domain_free(group->default_domain);
> +     if (group->blocking_domain)
> +             iommu_domain_free(group->blocking_domain);
> 
>       kfree(group->name);
>       kfree(group);
> @@ -1963,9 +1965,6 @@ static void __iommu_detach_device(struct
> iommu_domain *domain,
>       if (iommu_is_attach_deferred(dev))
>               return;
> 
> -     if (unlikely(domain->ops->detach_dev == NULL))
> -             return;
> -
>       domain->ops->detach_dev(domain, dev);
>       trace_detach_device_from_domain(dev);
>  }
> @@ -1979,12 +1978,10 @@ void iommu_detach_device(struct
> iommu_domain *domain, struct device *dev)
>               return;
> 
>       mutex_lock(&group->mutex);
> -     if (iommu_group_device_count(group) != 1) {
> -             WARN_ON(1);
> +     if (WARN_ON(domain != group->domain) ||
> +         WARN_ON(iommu_group_device_count(group) != 1))
>               goto out_unlock;
> -     }
> -
> -     __iommu_detach_group(domain, group);
> +     __iommu_group_attach_core_domain(group);
> 
>  out_unlock:
>       mutex_unlock(&group->mutex);
> @@ -2040,7 +2037,8 @@ static int __iommu_attach_group(struct
> iommu_domain *domain,
>  {
>       int ret;
> 
> -     if (group->domain && group->domain != group->default_domain)
> +     if (group->domain && group->domain != group->default_domain &&
> +         group->domain != group->blocking_domain)
>               return -EBUSY;
> 
>       ret = __iommu_group_for_each_dev(group, domain,

Suppose this can be also replaced by __iommu_group_attach_domain()? 

> @@ -2072,38 +2070,68 @@ static int
> iommu_group_do_detach_device(struct device *dev, void *data)
>       return 0;
>  }
> 
> -static void __iommu_detach_group(struct iommu_domain *domain,
> -                              struct iommu_group *group)
> +static int __iommu_group_attach_domain(struct iommu_group *group,
> +                                    struct iommu_domain *new_domain)
>  {
>       int ret;
> 
> +     if (group->domain == new_domain)
> +             return 0;
> +
>       /*
> -      * If the group has been claimed already, do not re-attach the default
> -      * domain.
> +      * New drivers should support default domains and so the
> detach_dev() op
> +      * will never be called. Otherwise the NULL domain indicates the
> +      * translation for the group should be set so it will work with the

translation should be 'blocked'?

> +      * platform DMA ops.

I didn't get the meaning of the last sentence.

>        */
> -     if (!group->default_domain || group->owner) {
> -             __iommu_group_for_each_dev(group, domain,
> +     if (!new_domain) {
> +             if (WARN_ON(!group->domain->ops->detach_dev))
> +                     return -EINVAL;
> +             __iommu_group_for_each_dev(group, group->domain,
>                                          iommu_group_do_detach_device);
>               group->domain = NULL;
> -             return;
> +             return 0;
>       }
> 
> -     if (group->domain == group->default_domain)
> -             return;
> -
> -     /* Detach by re-attaching to the default domain */
> -     ret = __iommu_group_for_each_dev(group, group->default_domain,
> +     /*
> +      * Changing the domain is done by calling attach on the new domain.
> +      * Drivers should implement this so that DMA is always translated by

what does 'this' mean?

> +      * either the new, old, or a blocking domain. DMA should never

isn't the blocking domain passed in as the new domain?

> become
> +      * untranslated.
> +      *
> +      * Note that this is called in error unwind paths, attaching to a
> +      * domain that has already been attached cannot fail.

this is called in the normal path. Where does error unwind happen?

> +      */
> +     ret = __iommu_group_for_each_dev(group, new_domain,
>                                        iommu_group_do_attach_device);
> -     if (ret != 0)
> -             WARN_ON(1);
> +     if (ret)
> +             return ret;
> +     group->domain = new_domain;
> +     return 0;
> +}
> +
> +/*
> + * Put the group's domain back to the appropriate core-owned domain -
> either the
> + * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
> + */
> +static void __iommu_group_attach_core_domain(struct iommu_group
> *group)
> +{
> +     struct iommu_domain *new_domain;
> +     int ret;
> +
> +     if (group->owner)
> +             new_domain = group->blocking_domain;
>       else
> -             group->domain = group->default_domain;
> +             new_domain = group->default_domain;
> +
> +     ret = __iommu_group_attach_domain(group, new_domain);
> +     WARN(ret, "iommu driver failed to attach the default/blocking
> domain");
>  }
> 
>  void iommu_detach_group(struct iommu_domain *domain, struct
> iommu_group *group)
>  {
>       mutex_lock(&group->mutex);
> -     __iommu_detach_group(domain, group);
> +     __iommu_group_attach_core_domain(group);
>       mutex_unlock(&group->mutex);
>  }
>  EXPORT_SYMBOL_GPL(iommu_detach_group);
> @@ -3088,6 +3116,29 @@ void
> iommu_device_unuse_default_domain(struct device *dev)
>       iommu_group_put(group);
>  }
> 
> +static int __iommu_group_alloc_blocking_domain(struct iommu_group
> *group)
> +{
> +     struct group_device *dev =
> +             list_first_entry(&group->devices, struct group_device, list);
> +
> +     if (group->blocking_domain)
> +             return 0;
> +
> +     group->blocking_domain =
> +             __iommu_domain_alloc(dev->dev->bus,
> IOMMU_DOMAIN_BLOCKED);
> +     if (!group->blocking_domain) {
> +             /*
> +              * For drivers that do not yet understand
> IOMMU_DOMAIN_BLOCKED
> +              * create an empty domain instead.
> +              */
> +             group->blocking_domain = __iommu_domain_alloc(
> +                     dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
> +             if (!group->blocking_domain)
> +                     return -EINVAL;
> +     }
> +     return 0;
> +}
> +
>  /**
>   * iommu_group_claim_dma_owner() - Set DMA ownership of a group
>   * @group: The group.
> @@ -3111,9 +3162,15 @@ int iommu_group_claim_dma_owner(struct
> iommu_group *group, void *owner)
>                       goto unlock_out;
>               }
> 
> +             ret = __iommu_group_alloc_blocking_domain(group);
> +             if (ret)
> +                     goto unlock_out;
> +
> +             ret = __iommu_group_attach_domain(group,
> +                                               group->blocking_domain);
> +             if (ret)
> +                     goto unlock_out;
>               group->owner = owner;

Here can use __iommu_group_attach_core_domain() if calling it after
setting group->owner.

> -             if (group->domain)
> -                     __iommu_detach_group(group->domain, group);
>       }
> 
>       group->owner_cnt++;
> @@ -3137,13 +3194,8 @@ void iommu_group_release_dma_owner(struct
> iommu_group *group)
>               goto unlock_out;
> 
>       group->owner_cnt = 0;
> -     /*
> -      * The UNMANAGED domain should be detached before all USER
> -      * owners have been released.
> -      */
> -     if (!WARN_ON(group->domain) && group->default_domain)
> -             __iommu_attach_group(group->default_domain, group);
>       group->owner = NULL;
> +     __iommu_group_attach_core_domain(group);
>  unlock_out:
>       mutex_unlock(&group->mutex);
>  }
> 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to