On Fri, Jan 29, 2010 at 10:05:26AM +0100, Joerg Roedel wrote:
> > Um, that's not a page-size based interface. Page size isn't always 4KiB;
> > this code runs on IA64 too.
> > 
> > We have enough fun with CPU vs. DMA page size on IA64 already :)
> 
> Ah right. So this should be
> 
>       size     = PAGE_SIZE << gfp_order;
> 
> Right? The interface is meant to map the same amount of memory which
> alloc_pages(gfp_order) would return. Same for the return value of the
> unmap function.

Ok, here is an updated patch (also updated in the iommu/largepage
branch). Does it look ok to you David?

>From a3ef8393d8027c795709e11b2f57c6013d2474a6 Mon Sep 17 00:00:00 2001
From: Joerg Roedel <joerg.roe...@amd.com>
Date: Wed, 20 Jan 2010 17:17:37 +0100
Subject: [PATCH 04/11] VT-d: Change {un}map_range functions to implement 
{un}map interface

This patch changes the iommu-api functions for mapping and
unmapping page ranges to use the new page-size based
interface. This allows to remove the range based functions
later.

Signed-off-by: Joerg Roedel <joerg.roe...@amd.com>
---
 drivers/pci/intel-iommu.c |   22 ++++++++++++----------
 1 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a714e3d..371dc56 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3626,14 +3626,15 @@ static void intel_iommu_detach_device(struct 
iommu_domain *domain,
        domain_remove_one_dev_info(dmar_domain, pdev);
 }
 
-static int intel_iommu_map_range(struct iommu_domain *domain,
-                                unsigned long iova, phys_addr_t hpa,
-                                size_t size, int iommu_prot)
+static int intel_iommu_map(struct iommu_domain *domain,
+                          unsigned long iova, phys_addr_t hpa,
+                          int gfp_order, int iommu_prot)
 {
        struct dmar_domain *dmar_domain = domain->priv;
        u64 max_addr;
        int addr_width;
        int prot = 0;
+       size_t size;
        int ret;
 
        if (iommu_prot & IOMMU_READ)
@@ -3643,6 +3644,7 @@ static int intel_iommu_map_range(struct iommu_domain 
*domain,
        if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
                prot |= DMA_PTE_SNP;
 
+       size     = PAGE_SIZE << gfp_order;
        max_addr = iova + size;
        if (dmar_domain->max_addr < max_addr) {
                int min_agaw;
@@ -3669,19 +3671,19 @@ static int intel_iommu_map_range(struct iommu_domain 
*domain,
        return ret;
 }
 
-static void intel_iommu_unmap_range(struct iommu_domain *domain,
-                                   unsigned long iova, size_t size)
+static int intel_iommu_unmap(struct iommu_domain *domain,
+                            unsigned long iova, int gfp_order)
 {
        struct dmar_domain *dmar_domain = domain->priv;
-
-       if (!size)
-               return;
+       size_t size = PAGE_SIZE << gfp_order;
 
        dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
                            (iova + size - 1) >> VTD_PAGE_SHIFT);
 
        if (dmar_domain->max_addr == iova + size)
                dmar_domain->max_addr = iova;
+
+       return gfp_order;
 }
 
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3716,8 @@ static struct iommu_ops intel_iommu_ops = {
        .domain_destroy = intel_iommu_domain_destroy,
        .attach_dev     = intel_iommu_attach_device,
        .detach_dev     = intel_iommu_detach_device,
-       .map_range      = intel_iommu_map_range,
-       .unmap_range    = intel_iommu_unmap_range,
+       .map            = intel_iommu_map,
+       .unmap          = intel_iommu_unmap,
        .iova_to_phys   = intel_iommu_iova_to_phys,
        .domain_has_cap = intel_iommu_domain_has_cap,
 };
-- 
1.6.6


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to