Re: [PATCH RESEND v5 4/5] iommu: Allow max opt DMA len be set for a group via sysfs

2022-04-07 Thread John Garry via iommu

On 07/04/2022 09:21, Leizhen (ThunderTown) wrote:



On 2022/4/4 19:27, John Garry wrote:

Add support to allow the maximum optimised DMA len be set for an IOMMU
group via sysfs.

This is much the same with the method to change the default domain type
for a group.

Signed-off-by: John Garry 
---
  .../ABI/testing/sysfs-kernel-iommu_groups | 16 +
  drivers/iommu/iommu.c | 59 ++-
  include/linux/iommu.h |  6 ++
  3 files changed, 79 insertions(+), 2 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups 
b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
index b15af6a5bc08..ed6f72794f6c 100644
--- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -63,3 +63,19 @@ Description: /sys/kernel/iommu_groups//type shows 
the type of default
system could lead to catastrophic effects (the users might
need to reboot the machine to get it to normal state). So, it's
expected that the users understand what they're doing.
+
+What:  /sys/kernel/iommu_groups//max_opt_dma_size
+Date:  Feb 2022
+KernelVersion: v5.18
+Contact:   iommu@lists.linux-foundation.org
+Description:   /sys/kernel/iommu_groups//max_opt_dma_size shows the
+   max optimised DMA size for the default IOMMU domain associated
+   with the group.
+   Each IOMMU domain has an IOVA domain. The IOVA domain caches
+   IOVAs upto a certain size as a performance optimisation.
+   This sysfs file allows the range of the IOVA domain caching be
+   set, such that larger than default IOVAs may be cached.
+   A value of 0 means that the default caching range is chosen.
+   A privileged user could request the kernel the change the range
+   by writing to this file. For this to happen, the same rules
+   and procedure applies as in changing the default domain type.
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 10bb10c2a210..7c7258f19bed 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -48,6 +48,7 @@ struct iommu_group {
struct iommu_domain *default_domain;
struct iommu_domain *domain;
struct list_head entry;
+   size_t max_opt_dma_size;
  };
  
  struct group_device {

@@ -89,6 +90,9 @@ static int iommu_create_device_direct_mappings(struct 
iommu_group *group,
  static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
  static ssize_t iommu_group_store_type(struct iommu_group *group,
  const char *buf, size_t count);
+static ssize_t iommu_group_store_max_opt_dma_size(struct iommu_group *group,
+ const char *buf,
+ size_t count);
  
  #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\

  struct iommu_group_attribute iommu_group_attr_##_name =   \
@@ -571,6 +575,12 @@ static ssize_t iommu_group_show_type(struct iommu_group 
*group,
return strlen(type);
  }
  
+static ssize_t iommu_group_show_max_opt_dma_size(struct iommu_group *group,

+char *buf)
+{
+   return sprintf(buf, "%zu\n", group->max_opt_dma_size);
+}
+
  static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
  
  static IOMMU_GROUP_ATTR(reserved_regions, 0444,

@@ -579,6 +589,9 @@ static IOMMU_GROUP_ATTR(reserved_regions, 0444,
  static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
iommu_group_store_type);
  
+static IOMMU_GROUP_ATTR(max_opt_dma_size, 0644, iommu_group_show_max_opt_dma_size,

+   iommu_group_store_max_opt_dma_size);
+
  static void iommu_group_release(struct kobject *kobj)
  {
struct iommu_group *group = to_iommu_group(kobj);
@@ -665,6 +678,10 @@ struct iommu_group *iommu_group_alloc(void)
if (ret)
return ERR_PTR(ret);
  
+	ret = iommu_group_create_file(group, _group_attr_max_opt_dma_size);

+   if (ret)
+   return ERR_PTR(ret);
+
pr_debug("Allocated group %d\n", group->id);
  
  	return group;

@@ -2087,6 +2104,11 @@ struct iommu_domain *iommu_get_dma_domain(struct device 
*dev)
return dev->iommu_group->default_domain;
  }
  
+size_t iommu_group_get_max_opt_dma_size(struct iommu_group *group)

+{
+   return group->max_opt_dma_size;
+}
+
  /*
   * IOMMU groups are really the natural working unit of the IOMMU, but
   * the IOMMU API works on domains and devices.  Bridge that gap by
@@ -2871,12 +2893,14 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
   * @prev_dev: The device in the group (this is used to make sure that the 
device
   * hasn't changed after the caller has called this function)
   * @type: The type of the new default domain that gets 

Re: [PATCH RESEND v5 4/5] iommu: Allow max opt DMA len be set for a group via sysfs

2022-04-07 Thread Leizhen (ThunderTown) via iommu



On 2022/4/4 19:27, John Garry wrote:
> Add support to allow the maximum optimised DMA len be set for an IOMMU
> group via sysfs.
> 
> This is much the same with the method to change the default domain type
> for a group.
> 
> Signed-off-by: John Garry 
> ---
>  .../ABI/testing/sysfs-kernel-iommu_groups | 16 +
>  drivers/iommu/iommu.c | 59 ++-
>  include/linux/iommu.h |  6 ++
>  3 files changed, 79 insertions(+), 2 deletions(-)
> 
> diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups 
> b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
> index b15af6a5bc08..ed6f72794f6c 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups
> +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
> @@ -63,3 +63,19 @@ Description:   /sys/kernel/iommu_groups//type 
> shows the type of default
>   system could lead to catastrophic effects (the users might
>   need to reboot the machine to get it to normal state). So, it's
>   expected that the users understand what they're doing.
> +
> +What:/sys/kernel/iommu_groups//max_opt_dma_size
> +Date:Feb 2022
> +KernelVersion:   v5.18
> +Contact: iommu@lists.linux-foundation.org
> +Description: /sys/kernel/iommu_groups//max_opt_dma_size shows the
> + max optimised DMA size for the default IOMMU domain associated
> + with the group.
> + Each IOMMU domain has an IOVA domain. The IOVA domain caches
> + IOVAs upto a certain size as a performance optimisation.
> + This sysfs file allows the range of the IOVA domain caching be
> + set, such that larger than default IOVAs may be cached.
> + A value of 0 means that the default caching range is chosen.
> + A privileged user could request the kernel the change the range
> + by writing to this file. For this to happen, the same rules
> + and procedure applies as in changing the default domain type.
> diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
> index 10bb10c2a210..7c7258f19bed 100644
> --- a/drivers/iommu/iommu.c
> +++ b/drivers/iommu/iommu.c
> @@ -48,6 +48,7 @@ struct iommu_group {
>   struct iommu_domain *default_domain;
>   struct iommu_domain *domain;
>   struct list_head entry;
> + size_t max_opt_dma_size;
>  };
>  
>  struct group_device {
> @@ -89,6 +90,9 @@ static int iommu_create_device_direct_mappings(struct 
> iommu_group *group,
>  static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
>  static ssize_t iommu_group_store_type(struct iommu_group *group,
> const char *buf, size_t count);
> +static ssize_t iommu_group_store_max_opt_dma_size(struct iommu_group *group,
> +   const char *buf,
> +   size_t count);
>  
>  #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)\
>  struct iommu_group_attribute iommu_group_attr_##_name =  \
> @@ -571,6 +575,12 @@ static ssize_t iommu_group_show_type(struct iommu_group 
> *group,
>   return strlen(type);
>  }
>  
> +static ssize_t iommu_group_show_max_opt_dma_size(struct iommu_group *group,
> +  char *buf)
> +{
> + return sprintf(buf, "%zu\n", group->max_opt_dma_size);
> +}
> +
>  static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
>  
>  static IOMMU_GROUP_ATTR(reserved_regions, 0444,
> @@ -579,6 +589,9 @@ static IOMMU_GROUP_ATTR(reserved_regions, 0444,
>  static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
>   iommu_group_store_type);
>  
> +static IOMMU_GROUP_ATTR(max_opt_dma_size, 0644, 
> iommu_group_show_max_opt_dma_size,
> + iommu_group_store_max_opt_dma_size);
> +
>  static void iommu_group_release(struct kobject *kobj)
>  {
>   struct iommu_group *group = to_iommu_group(kobj);
> @@ -665,6 +678,10 @@ struct iommu_group *iommu_group_alloc(void)
>   if (ret)
>   return ERR_PTR(ret);
>  
> + ret = iommu_group_create_file(group, 
> _group_attr_max_opt_dma_size);
> + if (ret)
> + return ERR_PTR(ret);
> +
>   pr_debug("Allocated group %d\n", group->id);
>  
>   return group;
> @@ -2087,6 +2104,11 @@ struct iommu_domain *iommu_get_dma_domain(struct 
> device *dev)
>   return dev->iommu_group->default_domain;
>  }
>  
> +size_t iommu_group_get_max_opt_dma_size(struct iommu_group *group)
> +{
> + return group->max_opt_dma_size;
> +}
> +
>  /*
>   * IOMMU groups are really the natural working unit of the IOMMU, but
>   * the IOMMU API works on domains and devices.  Bridge that gap by
> @@ -2871,12 +2893,14 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
>   * @prev_dev: The device in the group (this is used to make sure that the 
> device
>   *

[PATCH RESEND v5 4/5] iommu: Allow max opt DMA len be set for a group via sysfs

2022-04-04 Thread John Garry via iommu
Add support to allow the maximum optimised DMA len be set for an IOMMU
group via sysfs.

This is much the same with the method to change the default domain type
for a group.

Signed-off-by: John Garry 
---
 .../ABI/testing/sysfs-kernel-iommu_groups | 16 +
 drivers/iommu/iommu.c | 59 ++-
 include/linux/iommu.h |  6 ++
 3 files changed, 79 insertions(+), 2 deletions(-)

diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups 
b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
index b15af6a5bc08..ed6f72794f6c 100644
--- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -63,3 +63,19 @@ Description: /sys/kernel/iommu_groups//type shows 
the type of default
system could lead to catastrophic effects (the users might
need to reboot the machine to get it to normal state). So, it's
expected that the users understand what they're doing.
+
+What:  /sys/kernel/iommu_groups//max_opt_dma_size
+Date:  Feb 2022
+KernelVersion: v5.18
+Contact:   iommu@lists.linux-foundation.org
+Description:   /sys/kernel/iommu_groups//max_opt_dma_size shows the
+   max optimised DMA size for the default IOMMU domain associated
+   with the group.
+   Each IOMMU domain has an IOVA domain. The IOVA domain caches
+   IOVAs upto a certain size as a performance optimisation.
+   This sysfs file allows the range of the IOVA domain caching be
+   set, such that larger than default IOVAs may be cached.
+   A value of 0 means that the default caching range is chosen.
+   A privileged user could request the kernel the change the range
+   by writing to this file. For this to happen, the same rules
+   and procedure applies as in changing the default domain type.
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 10bb10c2a210..7c7258f19bed 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -48,6 +48,7 @@ struct iommu_group {
struct iommu_domain *default_domain;
struct iommu_domain *domain;
struct list_head entry;
+   size_t max_opt_dma_size;
 };
 
 struct group_device {
@@ -89,6 +90,9 @@ static int iommu_create_device_direct_mappings(struct 
iommu_group *group,
 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
 static ssize_t iommu_group_store_type(struct iommu_group *group,
  const char *buf, size_t count);
+static ssize_t iommu_group_store_max_opt_dma_size(struct iommu_group *group,
+ const char *buf,
+ size_t count);
 
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)  \
 struct iommu_group_attribute iommu_group_attr_##_name =\
@@ -571,6 +575,12 @@ static ssize_t iommu_group_show_type(struct iommu_group 
*group,
return strlen(type);
 }
 
+static ssize_t iommu_group_show_max_opt_dma_size(struct iommu_group *group,
+char *buf)
+{
+   return sprintf(buf, "%zu\n", group->max_opt_dma_size);
+}
+
 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 
 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
@@ -579,6 +589,9 @@ static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
iommu_group_store_type);
 
+static IOMMU_GROUP_ATTR(max_opt_dma_size, 0644, 
iommu_group_show_max_opt_dma_size,
+   iommu_group_store_max_opt_dma_size);
+
 static void iommu_group_release(struct kobject *kobj)
 {
struct iommu_group *group = to_iommu_group(kobj);
@@ -665,6 +678,10 @@ struct iommu_group *iommu_group_alloc(void)
if (ret)
return ERR_PTR(ret);
 
+   ret = iommu_group_create_file(group, 
_group_attr_max_opt_dma_size);
+   if (ret)
+   return ERR_PTR(ret);
+
pr_debug("Allocated group %d\n", group->id);
 
return group;
@@ -2087,6 +2104,11 @@ struct iommu_domain *iommu_get_dma_domain(struct device 
*dev)
return dev->iommu_group->default_domain;
 }
 
+size_t iommu_group_get_max_opt_dma_size(struct iommu_group *group)
+{
+   return group->max_opt_dma_size;
+}
+
 /*
  * IOMMU groups are really the natural working unit of the IOMMU, but
  * the IOMMU API works on domains and devices.  Bridge that gap by
@@ -2871,12 +2893,14 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
  * @prev_dev: The device in the group (this is used to make sure that the 
device
  *  hasn't changed after the caller has called this function)
  * @type: The type of the new default domain that gets associated with the 
group
+ * @max_opt_dma_size: Set the IOMMU group max_opt_dma_size if non-zero
  *
  *