Tell me it isn't so...

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 59f17ac..6c588ee 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3866,6 +3866,20 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
        struct dmar_domain *dmar_domain = domain->priv;
        size_t size = PAGE_SIZE << gfp_order;
 
+       /* The KVM code is *fucked* in the head. It maps the range
+          one page at a time, using 4KiB pages unless it actually
+          allocated hugepages using hugetlbfs. (So we get to flush
+          the CPU data cache and then the IOTLB for each page in
+          its loop). And on unmap, it unmaps 4KiB at a time (always
+          passing gfp_order==0), regardless of whether it mapped
+          using superpages or not. So on unmap, if we detect a
+          superpage in our page tables we are expected to unmap
+          *more* than we are asked to, and return a value indicating
+          how much we actually unmapped. WTF? */
+       if (dma_pfn_level_pte (dmar_domain, iova >> VTD_PAGE_SHIFT,
+                              1, &gfp_order))
+               size = PAGE_SIZE << gfp_order;
+           
        dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);
 

-- 
David Woodhouse                            Open Source Technology Centre
david.woodho...@intel.com                              Intel Corporation

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to