On 2024/8/28 00:59, Nicolin Chen wrote:
Now a VIOMMU can wrap a shareable nested parent HWPT. So, it can act like
a nested parent HWPT to allocate a nested HWPT.

Support that in the IOMMU_HWPT_ALLOC ioctl handler, and update its kdoc.

Also, associate a viommu to an allocating nested HWPT.

it still not quite clear to me what vIOMMU obj stands for. Here, it is a
wrapper of s2 hpwt IIUC. But in the cover letter, vIOMMU obj can instanced
per the vIOMMU units in VM. Does it mean each vIOMMU of VM can only have
one s2 HWPT?

Signed-off-by: Nicolin Chen <nicol...@nvidia.com>
---
  drivers/iommu/iommufd/hw_pagetable.c    | 24 ++++++++++++++++++++++--
  drivers/iommu/iommufd/iommufd_private.h |  1 +
  include/uapi/linux/iommufd.h            | 12 ++++++------
  3 files changed, 29 insertions(+), 8 deletions(-)

diff --git a/drivers/iommu/iommufd/hw_pagetable.c 
b/drivers/iommu/iommufd/hw_pagetable.c
index c21bb59c4022..06adbcc304bc 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -57,6 +57,9 @@ void iommufd_hwpt_nested_destroy(struct iommufd_object *obj)
                container_of(obj, struct iommufd_hwpt_nested, common.obj);
__iommufd_hwpt_destroy(&hwpt_nested->common);
+
+       if (hwpt_nested->viommu)
+               refcount_dec(&hwpt_nested->viommu->obj.users);
        refcount_dec(&hwpt_nested->parent->common.obj.users);
  }
@@ -213,6 +216,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
   */
  static struct iommufd_hwpt_nested *
  iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
+                         struct iommufd_viommu *viommu,
                          struct iommufd_hwpt_paging *parent,
                          struct iommufd_device *idev, u32 flags,
                          const struct iommu_user_data *user_data)
@@ -234,13 +238,16 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
                return ERR_CAST(hwpt_nested);
        hwpt = &hwpt_nested->common;
+ if (viommu)
+               refcount_inc(&viommu->obj.users);
+       hwpt_nested->viommu = viommu;
        refcount_inc(&parent->common.obj.users);
        hwpt_nested->parent = parent;
hwpt->domain = ops->domain_alloc_user(idev->dev,
                                              flags & 
~IOMMU_HWPT_FAULT_ID_VALID,
                                              parent->common.domain,
-                                             NULL, user_data);
+                                             viommu, user_data);
        if (IS_ERR(hwpt->domain)) {
                rc = PTR_ERR(hwpt->domain);
                hwpt->domain = NULL;
@@ -307,7 +314,7 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
                struct iommufd_hwpt_nested *hwpt_nested;
hwpt_nested = iommufd_hwpt_nested_alloc(
-                       ucmd->ictx,
+                       ucmd->ictx, NULL,
                        container_of(pt_obj, struct iommufd_hwpt_paging,
                                     common.obj),
                        idev, cmd->flags, &user_data);
@@ -316,6 +323,19 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
                        goto out_unlock;
                }
                hwpt = &hwpt_nested->common;
+       } else if (pt_obj->type == IOMMUFD_OBJ_VIOMMU) {
+               struct iommufd_hwpt_nested *hwpt_nested;
+               struct iommufd_viommu *viommu;
+
+               viommu = container_of(pt_obj, struct iommufd_viommu, obj);
+               hwpt_nested = iommufd_hwpt_nested_alloc(
+                       ucmd->ictx, viommu, viommu->hwpt, idev,
+                       cmd->flags, &user_data);
+               if (IS_ERR(hwpt_nested)) {
+                       rc = PTR_ERR(hwpt_nested);
+                       goto out_unlock;
+               }
+               hwpt = &hwpt_nested->common;
        } else {
                rc = -EINVAL;
                goto out_put_pt;
diff --git a/drivers/iommu/iommufd/iommufd_private.h 
b/drivers/iommu/iommufd/iommufd_private.h
index 154f7ba5f45c..1f2a1c133b9a 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -313,6 +313,7 @@ struct iommufd_hwpt_paging {
  struct iommufd_hwpt_nested {
        struct iommufd_hw_pagetable common;
        struct iommufd_hwpt_paging *parent;
+       struct iommufd_viommu *viommu;
  };
static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index ac77903b5cc4..51ce6a019c34 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -430,7 +430,7 @@ enum iommu_hwpt_data_type {
   * @size: sizeof(struct iommu_hwpt_alloc)
   * @flags: Combination of enum iommufd_hwpt_alloc_flags
   * @dev_id: The device to allocate this HWPT for
- * @pt_id: The IOAS or HWPT to connect this HWPT to
+ * @pt_id: The IOAS or HWPT or VIOMMU to connect this HWPT to
   * @out_hwpt_id: The ID of the new HWPT
   * @__reserved: Must be 0
   * @data_type: One of enum iommu_hwpt_data_type
@@ -449,11 +449,11 @@ enum iommu_hwpt_data_type {
   * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
   * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
   *
- * A user-managed nested HWPT will be created from a given parent HWPT via
- * @pt_id, in which the parent HWPT must be allocated previously via the
- * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
- * must be set to a pre-defined type corresponding to an I/O page table
- * type supported by the underlying IOMMU hardware.
+ * A user-managed nested HWPT will be created from a given VIOMMU (wrapping a
+ * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be
+ * allocated previously via the same ioctl from a given IOAS (@pt_id). In this
+ * case, the @data_type must be set to a pre-defined type corresponding to an
+ * I/O page table type supported by the underlying IOMMU hardware.
   *
   * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
   * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr

--
Regards,
Yi Liu

Reply via email to