On 2022/3/21 15:13, Tian, Kevin wrote:
From: Lu Baolu<baolu...@linux.intel.com>
Sent: Sunday, March 20, 2022 2:40 PM

Attaching an IOMMU domain to a PASID of a device is a generic operation
for modern IOMMU drivers which support PASID-granular DMA address
translation. Currently visible usage scenarios include (but not limited):

  - SVA
  - kernel DMA with PASID
  - hardware-assist mediated device

This adds a pair of common domain ops for this purpose and implements a
couple of wrapper helpers for in-kernel usage.

Signed-off-by: Lu Baolu<baolu...@linux.intel.com>
---
  include/linux/iommu.h | 22 ++++++++++++++++++++++
  drivers/iommu/iommu.c | 41
+++++++++++++++++++++++++++++++++++++++++
  2 files changed, 63 insertions(+)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3e179b853380..e51845b9a146 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -268,6 +268,8 @@ struct iommu_ops {
   * struct iommu_domain_ops - domain specific operations
   * @attach_dev: attach an iommu domain to a device
   * @detach_dev: detach an iommu domain from a device
+ * @attach_dev_pasid: attach an iommu domain to a pasid of device
+ * @detach_dev_pasid: detach an iommu domain from a pasid of device
   * @map: map a physically contiguous memory region to an iommu domain
   * @map_pages: map a physically contiguous set of pages of the same size
to
   *             an iommu domain.
@@ -285,6 +287,10 @@ struct iommu_ops {
  struct iommu_domain_ops {
        int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
        void (*detach_dev)(struct iommu_domain *domain, struct device
*dev);
+       int (*attach_dev_pasid)(struct iommu_domain *domain,
+                               struct device *dev, ioasid_t id);
+       void (*detach_dev_pasid)(struct iommu_domain *domain,
+                                struct device *dev, ioasid_t id);

        int (*map)(struct iommu_domain *domain, unsigned long iova,
                   phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
@@ -678,6 +684,11 @@ int iommu_group_claim_dma_owner(struct
iommu_group *group, void *owner);
  void iommu_group_release_dma_owner(struct iommu_group *group);
  bool iommu_group_dma_owner_claimed(struct iommu_group *group);

+int iommu_attach_device_pasid(struct iommu_domain *domain,
+                             struct device *dev, ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+                              struct device *dev, ioasid_t pasid);
+
  #else /* CONFIG_IOMMU_API */

  struct iommu_ops {};
@@ -1046,6 +1057,17 @@ static inline bool
iommu_group_dma_owner_claimed(struct iommu_group *group)
  {
        return false;
  }
+
+static inline int iommu_attach_device_pasid(struct iommu_domain
*domain,
+                                           struct device *dev, ioasid_t pasid)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain
*domain,
+                                            struct device *dev, ioasid_t pasid)
+{
+}
  #endif /* CONFIG_IOMMU_API */

  /**
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0c42ece25854..78c71ee15f36 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3167,3 +3167,44 @@ bool iommu_group_dma_owner_claimed(struct
iommu_group *group)
        return user;
  }
  EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+                             struct device *dev, ioasid_t pasid)
+{
+       struct iommu_group *group;
+       int ret = -EINVAL;
+
+       if (!domain->ops->attach_dev_pasid)
+               return -EINVAL;
+
+       group = iommu_group_get(dev);
+       if (!group)
+               return -ENODEV;
+
+       mutex_lock(&group->mutex);
+       if (iommu_group_device_count(group) != 1)
+               goto out_unlock;
Need move the reason of above limitation from iommu_sva_bind_device()
to here:

        /*
         * To keep things simple, SVA currently doesn't support IOMMU groups
         * with more than one device. Existing SVA-capable systems are not
         * affected by the problems that required IOMMU groups (lack of ACS
         * isolation, device ID aliasing and other hardware issues).
         */
        if (iommu_group_device_count(group) != 1)
                goto out_unlock;

Yes. We need a comment around this code. But it's not only for SVA but
also for all pasid attachment feature. I need more inputs to judge
whether this limitation is reasonable.


btw I didn't see any safeguard on above assumption in device hotplug path
to a group which already has SVA enabled...


Agreed.

Best regards,
baolu
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to