Re: [PATCH 5/7] iommu/dma: add support for non-strict mode

2018-06-04 Thread Leizhen (ThunderTown)



On 2018/6/2 1:51, kbuild test robot wrote:
> Hi Zhen,
> 
> Thank you for the patch! Perhaps something to improve:
> 
> [auto build test WARNING on linus/master]
> [also build test WARNING on v4.17-rc7 next-20180601]
> [cannot apply to iommu/next]
> [if your patch is applied to the wrong git tree, please drop us a note to 
> help improve the system]
> 
> url:
> https://github.com/0day-ci/linux/commits/Zhen-Lei/add-non-strict-mode-support-for-arm-smmu-v3/20180602-000418
> config: x86_64-randconfig-x008-201821 (attached as .config)
> compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
> reproduce:
> # save the attached .config to linux build tree
> make ARCH=x86_64 
> 
> All warnings (new ones prefixed by >>):
> 
>drivers//iommu/amd_iommu.c: In function 'amd_iommu_capable':
>>> drivers//iommu/amd_iommu.c:3053:2: warning: enumeration value 
>>> 'IOMMU_CAP_NON_STRICT' not handled in switch [-Wswitch]
>  switch (cap) {
>  ^~
> 
> vim +/IOMMU_CAP_NON_STRICT +3053 drivers//iommu/amd_iommu.c
> 
> 645c4c8d arch/x86/kernel/amd_iommu.c Joerg Roedel 2008-12-02  3050  
> ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3051  static 
> bool amd_iommu_capable(enum iommu_cap cap)
> dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3052  {
> 80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27 @3053switch 
> (cap) {
> 80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3054case 
> IOMMU_CAP_CACHE_COHERENCY:
> ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3055
> return true;
> bdddadcb drivers/iommu/amd_iommu.c   Joerg Roedel 2012-07-02  3056case 
> IOMMU_CAP_INTR_REMAP:
> ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3057
> return (irq_remapping_enabled == 1);
> cfdeec22 drivers/iommu/amd_iommu.c   Will Deacon  2014-10-27  3058case 
> IOMMU_CAP_NOEXEC:
It seems that it's better to change this to 'default'.

> cfdeec22 drivers/iommu/amd_iommu.c   Will Deacon  2014-10-27  3059
> return false;
> 80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3060}
> 80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3061  
> ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3062return 
> false;
> dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3063  }
> dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3064  
> 
> :: The code at line 3053 was first introduced by commit
> :: 80a506b8fdcfa868bb53eb740f928217d0966fc1 x86/amd-iommu: Export 
> cache-coherency capability
> 
> :: TO: Joerg Roedel 
> :: CC: Joerg Roedel 
> 
> ---
> 0-DAY kernel test infrastructureOpen Source Technology Center
> https://lists.01.org/pipermail/kbuild-all   Intel Corporation
> 

-- 
Thanks!
BestRegards

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 5/7] iommu/dma: add support for non-strict mode

2018-06-04 Thread Leizhen (ThunderTown)



On 2018/5/31 21:04, Robin Murphy wrote:
> On 31/05/18 08:42, Zhen Lei wrote:
>> 1. Save the related domain pointer in struct iommu_dma_cookie, make iovad
>> capable call domain->ops->flush_iotlb_all to flush TLB.
>> 2. Define a new iommu capable: IOMMU_CAP_NON_STRICT, which used to indicate
>> that the iommu domain support non-strict mode.
>> 3. During the iommu domain initialization phase, call capable() to check
>> whether it support non-strcit mode. If so, call init_iova_flush_queue
>> to register iovad->flush_cb callback.
>> 4. All unmap(contains iova-free) APIs will finally invoke __iommu_dma_unmap
>> -->iommu_dma_free_iova. Use iovad->flush_cb to check whether its related
>> iommu support non-strict mode or not, and call IOMMU_DOMAIN_IS_STRICT to
>> make sure the IOMMU_DOMAIN_UNMANAGED domain always follow strict mode.
> 
> Once again, this is a whole load of complexity for a property which could 
> just be statically encoded at allocation, e.g. in the cookie type.
That's right. Pass domain to the static function iommu_dma_free_iova will be 
better.

> 
>> Signed-off-by: Zhen Lei 
>> ---
>>   drivers/iommu/dma-iommu.c | 29 ++---
>>   include/linux/iommu.h |  3 +++
>>   2 files changed, 29 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
>> index 4e885f7..2e116d9 100644
>> --- a/drivers/iommu/dma-iommu.c
>> +++ b/drivers/iommu/dma-iommu.c
>> @@ -55,6 +55,7 @@ struct iommu_dma_cookie {
>>   };
>>   struct list_headmsi_page_list;
>>   spinlock_tmsi_lock;
>> +struct iommu_domain*domain;
>>   };
>> static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
>> @@ -64,7 +65,8 @@ static inline size_t cookie_msi_granule(struct 
>> iommu_dma_cookie *cookie)
>>   return PAGE_SIZE;
>>   }
>>   -static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type 
>> type)
>> +static struct iommu_dma_cookie *cookie_alloc(struct iommu_domain *domain,
>> + enum iommu_dma_cookie_type type)
>>   {
>>   struct iommu_dma_cookie *cookie;
>>   @@ -73,6 +75,7 @@ static struct iommu_dma_cookie *cookie_alloc(enum 
>> iommu_dma_cookie_type type)
>>   spin_lock_init(&cookie->msi_lock);
>>   INIT_LIST_HEAD(&cookie->msi_page_list);
>>   cookie->type = type;
>> +cookie->domain = domain;
>>   }
>>   return cookie;
>>   }
>> @@ -94,7 +97,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
>>   if (domain->iova_cookie)
>>   return -EEXIST;
>>   -domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
>> +domain->iova_cookie = cookie_alloc(domain, IOMMU_DMA_IOVA_COOKIE);
>>   if (!domain->iova_cookie)
>>   return -ENOMEM;
>>   @@ -124,7 +127,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, 
>> dma_addr_t base)
>>   if (domain->iova_cookie)
>>   return -EEXIST;
>>   -cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
>> +cookie = cookie_alloc(domain, IOMMU_DMA_MSI_COOKIE);
>>   if (!cookie)
>>   return -ENOMEM;
>>   @@ -261,6 +264,17 @@ static int iova_reserve_iommu_regions(struct device 
>> *dev,
>>   return ret;
>>   }
>>   +static void iova_flush_iotlb_all(struct iova_domain *iovad)
> 
> iommu_dma_flush...
OK

> 
>> +{
>> +struct iommu_dma_cookie *cookie;
>> +struct iommu_domain *domain;
>> +
>> +cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
>> +domain = cookie->domain;
>> +
>> +domain->ops->flush_iotlb_all(domain);
>> +}
>> +
>>   /**
>>* iommu_dma_init_domain - Initialise a DMA mapping domain
>>* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
>> @@ -276,6 +290,7 @@ static int iova_reserve_iommu_regions(struct device *dev,
>>   int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
>>   u64 size, struct device *dev)
>>   {
>> +const struct iommu_ops *ops = domain->ops;
>>   struct iommu_dma_cookie *cookie = domain->iova_cookie;
>>   struct iova_domain *iovad = &cookie->iovad;
>>   unsigned long order, base_pfn, end_pfn;
>> @@ -313,6 +328,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, 
>> dma_addr_t base,
>> init_iova_domain(iovad, 1UL << order, base_pfn);
>>   +if (ops->capable && ops->capable(IOMMU_CAP_NON_STRICT)) {
>> +BUG_ON(!ops->flush_iotlb_all);
>> +init_iova_flush_queue(iovad, iova_flush_iotlb_all, NULL);
>> +}
>> +
>>   return iova_reserve_iommu_regions(dev, domain);
>>   }
>>   EXPORT_SYMBOL(iommu_dma_init_domain);
>> @@ -392,6 +412,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie 
>> *cookie,
>>   /* The MSI case is only ever cleaning up its most recent allocation */
>>   if (cookie->type == IOMMU_DMA_MSI_COOKIE)
>>   cookie->msi_iova -= size;
>> +else if (!IOMMU_DOMAIN_IS_STRICT(cookie->domain) && iovad->flu

Re: [PATCH 5/7] iommu/dma: add support for non-strict mode

2018-06-01 Thread kbuild test robot
Hi Zhen,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v4.17-rc7 next-20180601]
[cannot apply to iommu/next]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:
https://github.com/0day-ci/linux/commits/Zhen-Lei/add-non-strict-mode-support-for-arm-smmu-v3/20180602-000418
config: x86_64-randconfig-x008-201821 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64 

All warnings (new ones prefixed by >>):

   drivers//iommu/amd_iommu.c: In function 'amd_iommu_capable':
>> drivers//iommu/amd_iommu.c:3053:2: warning: enumeration value 
>> 'IOMMU_CAP_NON_STRICT' not handled in switch [-Wswitch]
 switch (cap) {
 ^~

vim +/IOMMU_CAP_NON_STRICT +3053 drivers//iommu/amd_iommu.c

645c4c8d arch/x86/kernel/amd_iommu.c Joerg Roedel 2008-12-02  3050  
ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3051  static bool 
amd_iommu_capable(enum iommu_cap cap)
dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3052  {
80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27 @3053  switch 
(cap) {
80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3054  case 
IOMMU_CAP_CACHE_COHERENCY:
ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3055  
return true;
bdddadcb drivers/iommu/amd_iommu.c   Joerg Roedel 2012-07-02  3056  case 
IOMMU_CAP_INTR_REMAP:
ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3057  
return (irq_remapping_enabled == 1);
cfdeec22 drivers/iommu/amd_iommu.c   Will Deacon  2014-10-27  3058  case 
IOMMU_CAP_NOEXEC:
cfdeec22 drivers/iommu/amd_iommu.c   Will Deacon  2014-10-27  3059  
return false;
80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3060  }
80a506b8 arch/x86/kernel/amd_iommu.c Joerg Roedel 2010-07-27  3061  
ab636481 drivers/iommu/amd_iommu.c   Joerg Roedel 2014-09-05  3062  return 
false;
dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3063  }
dbb9fd86 arch/x86/kernel/amd_iommu.c Sheng Yang   2009-03-18  3064  

:: The code at line 3053 was first introduced by commit
:: 80a506b8fdcfa868bb53eb740f928217d0966fc1 x86/amd-iommu: Export 
cache-coherency capability

:: TO: Joerg Roedel 
:: CC: Joerg Roedel 

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: application/gzip
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Re: [PATCH 5/7] iommu/dma: add support for non-strict mode

2018-05-31 Thread Robin Murphy

On 31/05/18 08:42, Zhen Lei wrote:

1. Save the related domain pointer in struct iommu_dma_cookie, make iovad
capable call domain->ops->flush_iotlb_all to flush TLB.
2. Define a new iommu capable: IOMMU_CAP_NON_STRICT, which used to indicate
that the iommu domain support non-strict mode.
3. During the iommu domain initialization phase, call capable() to check
whether it support non-strcit mode. If so, call init_iova_flush_queue
to register iovad->flush_cb callback.
4. All unmap(contains iova-free) APIs will finally invoke __iommu_dma_unmap
-->iommu_dma_free_iova. Use iovad->flush_cb to check whether its related
iommu support non-strict mode or not, and call IOMMU_DOMAIN_IS_STRICT to
make sure the IOMMU_DOMAIN_UNMANAGED domain always follow strict mode.


Once again, this is a whole load of complexity for a property which 
could just be statically encoded at allocation, e.g. in the cookie type.



Signed-off-by: Zhen Lei 
---
  drivers/iommu/dma-iommu.c | 29 ++---
  include/linux/iommu.h |  3 +++
  2 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4e885f7..2e116d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,7 @@ struct iommu_dma_cookie {
};
struct list_headmsi_page_list;
spinlock_t  msi_lock;
+   struct iommu_domain *domain;
  };
  
  static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)

@@ -64,7 +65,8 @@ static inline size_t cookie_msi_granule(struct 
iommu_dma_cookie *cookie)
return PAGE_SIZE;
  }
  
-static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)

+static struct iommu_dma_cookie *cookie_alloc(struct iommu_domain *domain,
+enum iommu_dma_cookie_type type)
  {
struct iommu_dma_cookie *cookie;
  
@@ -73,6 +75,7 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)

spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
+   cookie->domain = domain;
}
return cookie;
  }
@@ -94,7 +97,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (domain->iova_cookie)
return -EEXIST;
  
-	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);

+   domain->iova_cookie = cookie_alloc(domain, IOMMU_DMA_IOVA_COOKIE);
if (!domain->iova_cookie)
return -ENOMEM;
  
@@ -124,7 +127,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)

if (domain->iova_cookie)
return -EEXIST;
  
-	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);

+   cookie = cookie_alloc(domain, IOMMU_DMA_MSI_COOKIE);
if (!cookie)
return -ENOMEM;
  
@@ -261,6 +264,17 @@ static int iova_reserve_iommu_regions(struct device *dev,

return ret;
  }
  
+static void iova_flush_iotlb_all(struct iova_domain *iovad)


iommu_dma_flush...


+{
+   struct iommu_dma_cookie *cookie;
+   struct iommu_domain *domain;
+
+   cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
+   domain = cookie->domain;
+
+   domain->ops->flush_iotlb_all(domain);
+}
+
  /**
   * iommu_dma_init_domain - Initialise a DMA mapping domain
   * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -276,6 +290,7 @@ static int iova_reserve_iommu_regions(struct device *dev,
  int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
  {
+   const struct iommu_ops *ops = domain->ops;
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
@@ -313,6 +328,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, 
dma_addr_t base,
  
  	init_iova_domain(iovad, 1UL << order, base_pfn);
  
+	if (ops->capable && ops->capable(IOMMU_CAP_NON_STRICT)) {

+   BUG_ON(!ops->flush_iotlb_all);
+   init_iova_flush_queue(iovad, iova_flush_iotlb_all, NULL);
+   }
+
return iova_reserve_iommu_regions(dev, domain);
  }
  EXPORT_SYMBOL(iommu_dma_init_domain);
@@ -392,6 +412,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie 
*cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
+   else if (!IOMMU_DOMAIN_IS_STRICT(cookie->domain) && iovad->flush_cb)
+   queue_iova(iovad, iova_pfn(iovad, iova),
+   size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
diff --g

[PATCH 5/7] iommu/dma: add support for non-strict mode

2018-05-31 Thread Zhen Lei
1. Save the related domain pointer in struct iommu_dma_cookie, make iovad
   capable call domain->ops->flush_iotlb_all to flush TLB.
2. Define a new iommu capable: IOMMU_CAP_NON_STRICT, which used to indicate
   that the iommu domain support non-strict mode.
3. During the iommu domain initialization phase, call capable() to check
   whether it support non-strcit mode. If so, call init_iova_flush_queue
   to register iovad->flush_cb callback.
4. All unmap(contains iova-free) APIs will finally invoke __iommu_dma_unmap
   -->iommu_dma_free_iova. Use iovad->flush_cb to check whether its related
   iommu support non-strict mode or not, and call IOMMU_DOMAIN_IS_STRICT to
   make sure the IOMMU_DOMAIN_UNMANAGED domain always follow strict mode.

Signed-off-by: Zhen Lei 
---
 drivers/iommu/dma-iommu.c | 29 ++---
 include/linux/iommu.h |  3 +++
 2 files changed, 29 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4e885f7..2e116d9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,7 @@ struct iommu_dma_cookie {
};
struct list_headmsi_page_list;
spinlock_t  msi_lock;
+   struct iommu_domain *domain;
 };
 
 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -64,7 +65,8 @@ static inline size_t cookie_msi_granule(struct 
iommu_dma_cookie *cookie)
return PAGE_SIZE;
 }
 
-static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
+static struct iommu_dma_cookie *cookie_alloc(struct iommu_domain *domain,
+enum iommu_dma_cookie_type type)
 {
struct iommu_dma_cookie *cookie;
 
@@ -73,6 +75,7 @@ static struct iommu_dma_cookie *cookie_alloc(enum 
iommu_dma_cookie_type type)
spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
+   cookie->domain = domain;
}
return cookie;
 }
@@ -94,7 +97,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (domain->iova_cookie)
return -EEXIST;
 
-   domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
+   domain->iova_cookie = cookie_alloc(domain, IOMMU_DMA_IOVA_COOKIE);
if (!domain->iova_cookie)
return -ENOMEM;
 
@@ -124,7 +127,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, 
dma_addr_t base)
if (domain->iova_cookie)
return -EEXIST;
 
-   cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
+   cookie = cookie_alloc(domain, IOMMU_DMA_MSI_COOKIE);
if (!cookie)
return -ENOMEM;
 
@@ -261,6 +264,17 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
 }
 
+static void iova_flush_iotlb_all(struct iova_domain *iovad)
+{
+   struct iommu_dma_cookie *cookie;
+   struct iommu_domain *domain;
+
+   cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
+   domain = cookie->domain;
+
+   domain->ops->flush_iotlb_all(domain);
+}
+
 /**
  * iommu_dma_init_domain - Initialise a DMA mapping domain
  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -276,6 +290,7 @@ static int iova_reserve_iommu_regions(struct device *dev,
 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
 {
+   const struct iommu_ops *ops = domain->ops;
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
@@ -313,6 +328,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, 
dma_addr_t base,
 
init_iova_domain(iovad, 1UL << order, base_pfn);
 
+   if (ops->capable && ops->capable(IOMMU_CAP_NON_STRICT)) {
+   BUG_ON(!ops->flush_iotlb_all);
+   init_iova_flush_queue(iovad, iova_flush_iotlb_all, NULL);
+   }
+
return iova_reserve_iommu_regions(dev, domain);
 }
 EXPORT_SYMBOL(iommu_dma_init_domain);
@@ -392,6 +412,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie 
*cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
+   else if (!IOMMU_DOMAIN_IS_STRICT(cookie->domain) && iovad->flush_cb)
+   queue_iova(iovad, iova_pfn(iovad, iova),
+   size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 39b3150..01ff569 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -87,6 +87,8 @@ struct iommu_domain_geometry {
 __I