For some common low level functions which will be also used by virtual machine
usage, use domain_flush_cache instead of __iommu_flush_cache.
Signed-off-by: Weidong Han <[EMAIL PROTECTED]>
---
drivers/pci/intel-iommu.c | 40
1 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 429aff4..b00a8f2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -200,6 +200,13 @@ static struct intel_iommu *domain_get_iommu(struct
dmar_domain *domain)
return NULL;
}
+static void domain_flush_cache(struct dmar_domain *domain,
+ void *addr, int size)
+{
+ if (!domain->iommu_coherency)
+ clflush_cache_range(addr, size);
+}
+
static struct intel_iommu *device_find_matched_iommu(u8 bus, u8 devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -316,7 +323,6 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain
*domain, u64 addr)
int level = agaw_to_level(domain->agaw);
int offset;
unsigned long flags;
- struct intel_iommu *iommu = domain_get_iommu(domain);
BUG_ON(!domain->pgd);
@@ -340,8 +346,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain
*domain, u64 addr)
flags);
return NULL;
}
- __iommu_flush_cache(iommu, tmp_page,
- PAGE_SIZE);
+ domain_flush_cache(domain, tmp_page, PAGE_SIZE);
dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
/*
* high level table always sets r/w, last level page
@@ -349,7 +354,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain
*domain, u64 addr)
*/
dma_set_pte_readable(*pte);
dma_set_pte_writable(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(*pte));
level--;
@@ -386,14 +391,13 @@ static struct dma_pte *dma_addr_level_pte(struct
dmar_domain *domain, u64 addr,
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
{
struct dma_pte *pte = NULL;
- struct intel_iommu *iommu = domain_get_iommu(domain);
/* get last level pte */
pte = dma_addr_level_pte(domain, addr, 1);
if (pte) {
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu, pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
}
@@ -422,7 +426,6 @@ static void dma_pte_free_pagetable(struct dmar_domain
*domain,
int addr_width = agaw_to_width(domain->agaw);
struct dma_pte *pte;
int total = agaw_to_level(domain->agaw);
- struct intel_iommu *iommu = domain_get_iommu(domain);
int level;
u64 tmp;
@@ -442,8 +445,7 @@ static void dma_pte_free_pagetable(struct dmar_domain
*domain,
free_pgtable_page(
phys_to_virt(dma_pte_addr(*pte)));
dma_clear_pte(*pte);
- __iommu_flush_cache(iommu,
- pte, sizeof(*pte));
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
tmp += level_size(level);
}
@@ -1158,12 +1160,16 @@ static int domain_context_mapping_one(struct
dmar_domain *domain,
u8 bus, u8 devfn)
{
struct context_entry *context;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
unsigned long flags;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
BUG_ON(!domain->pgd);
+
+ iommu = device_find_matched_iommu(bus, devfn);
+ if (!iommu)
+ return -ENODEV;
context = device_to_context_entry(iommu, bus, devfn);
if (!context)
return -ENOMEM;
@@ -1225,12 +1231,15 @@ domain_context_mapping(struct dmar_domain *domain,
struct pci_dev *pdev)
tmp->bus->number, tmp->devfn);
}
-static int domain_context_mapped(struct dmar_domain *domain,
- struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
{
int ret;
struct pci_dev *tmp, *parent;
- struct intel_iommu *iommu = domain_get_iommu(domain);
+ struct intel_iommu *iommu;
+
+ iommu = device_find_matched_iommu(pdev->bus->number, pdev->devfn);
+ if (!iommu)
+ return -ENODEV;
ret = device_contex