> From: Jan Beulich <jbeul...@suse.com>
> Sent: Tuesday, August 24, 2021 10:19 PM
> 
> Or really, in the case of ->map_page(), accommodate it in th existing
> "flags" parameter. All call sites will pass 0 for now.
> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>

Reviewed-by: Kevin Tian <kevin.t...@intel.com>

> 
> --- a/xen/drivers/passthrough/amd/iommu.h
> +++ b/xen/drivers/passthrough/amd/iommu.h
> @@ -225,6 +225,7 @@ int __must_check amd_iommu_map_page(stru
>                                      mfn_t mfn, unsigned int flags,
>                                      unsigned int *flush_flags);
>  int __must_check amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
> +                                      unsigned int order,
>                                        unsigned int *flush_flags);
>  int __must_check amd_iommu_alloc_root(struct domain *d);
>  int amd_iommu_reserve_domain_unity_map(struct domain *domain,
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -328,7 +328,7 @@ int amd_iommu_map_page(struct domain *d,
>      return 0;
>  }
> 
> -int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
> +int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, unsigned int
> order,
>                           unsigned int *flush_flags)
>  {
>      unsigned long pt_mfn = 0;
> --- a/xen/drivers/passthrough/arm/iommu_helpers.c
> +++ b/xen/drivers/passthrough/arm/iommu_helpers.c
> @@ -57,11 +57,13 @@ int __must_check arm_iommu_map_page(stru
>       * The function guest_physmap_add_entry replaces the current mapping
>       * if there is already one...
>       */
> -    return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)),
> 0, t);
> +    return guest_physmap_add_entry(d, _gfn(dfn_x(dfn)), _mfn(dfn_x(dfn)),
> +                                   IOMMUF_order(flags), t);
>  }
> 
>  /* Should only be used if P2M Table is shared between the CPU and the
> IOMMU. */
>  int __must_check arm_iommu_unmap_page(struct domain *d, dfn_t dfn,
> +                                      unsigned int order,
>                                        unsigned int *flush_flags)
>  {
>      /*
> @@ -71,7 +73,8 @@ int __must_check arm_iommu_unmap_page(st
>      if ( !is_domain_direct_mapped(d) )
>          return -EINVAL;
> 
> -    return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)),
> _mfn(dfn_x(dfn)), 0);
> +    return guest_physmap_remove_page(d, _gfn(dfn_x(dfn)),
> _mfn(dfn_x(dfn)),
> +                                     order);
>  }
> 
>  /*
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -271,6 +271,8 @@ int iommu_map(struct domain *d, dfn_t df
>      if ( !is_iommu_enabled(d) )
>          return 0;
> 
> +    ASSERT(!IOMMUF_order(flags));
> +
>      for ( i = 0; i < page_count; i++ )
>      {
>          rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
> @@ -288,7 +290,7 @@ int iommu_map(struct domain *d, dfn_t df
>          while ( i-- )
>              /* if statement to satisfy __must_check */
>              if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
> -                            flush_flags) )
> +                            0, flush_flags) )
>                  continue;
> 
>          if ( !is_hardware_domain(d) )
> @@ -333,7 +335,7 @@ int iommu_unmap(struct domain *d, dfn_t
>      for ( i = 0; i < page_count; i++ )
>      {
>          int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, 
> i),
> -                             flush_flags);
> +                             0, flush_flags);
> 
>          if ( likely(!err) )
>              continue;
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -1932,6 +1932,7 @@ static int __must_check intel_iommu_map_
>  }
> 
>  static int __must_check intel_iommu_unmap_page(struct domain *d, dfn_t
> dfn,
> +                                               unsigned int order,
>                                                 unsigned int *flush_flags)
>  {
>      /* Do nothing if VT-d shares EPT page table */
> --- a/xen/include/asm-arm/iommu.h
> +++ b/xen/include/asm-arm/iommu.h
> @@ -31,6 +31,7 @@ int __must_check arm_iommu_map_page(stru
>                                      unsigned int flags,
>                                      unsigned int *flush_flags);
>  int __must_check arm_iommu_unmap_page(struct domain *d, dfn_t dfn,
> +                                      unsigned int order,
>                                        unsigned int *flush_flags);
> 
>  #endif /* __ARCH_ARM_IOMMU_H__ */
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -127,9 +127,10 @@ void arch_iommu_hwdom_init(struct domain
>   * The following flags are passed to map operations and passed by lookup
>   * operations.
>   */
> -#define _IOMMUF_readable 0
> +#define IOMMUF_order(n)  ((n) & 0x3f)
> +#define _IOMMUF_readable 6
>  #define IOMMUF_readable  (1u<<_IOMMUF_readable)
> -#define _IOMMUF_writable 1
> +#define _IOMMUF_writable 7
>  #define IOMMUF_writable  (1u<<_IOMMUF_writable)
> 
>  /*
> @@ -255,6 +256,7 @@ struct iommu_ops {
>                                   unsigned int flags,
>                                   unsigned int *flush_flags);
>      int __must_check (*unmap_page)(struct domain *d, dfn_t dfn,
> +                                   unsigned int order,
>                                     unsigned int *flush_flags);
>      int __must_check (*lookup_page)(struct domain *d, dfn_t dfn, mfn_t
> *mfn,
>                                      unsigned int *flags);

Reply via email to