This implements iova_to_phys for AMD IOMMU v1 pagetable,
which will be used by the IO page table framework.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
---
 drivers/iommu/amd/io_pgtable.c | 21 +++++++++++++++++++++
 drivers/iommu/amd/iommu.c      | 16 +---------------
 2 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index f913fc7b1e58..2f36bab23516 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -525,6 +525,26 @@ unsigned long iommu_unmap_page(struct protection_domain 
*dom,
        return unmapped;
 }
 
+static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned 
long iova)
+{
+       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+       unsigned long offset_mask, pte_pgsize;
+       u64 *pte, __pte;
+
+       if (pgtable->mode == PAGE_MODE_NONE)
+               return iova;
+
+       pte = fetch_pte(pgtable, iova, &pte_pgsize);
+
+       if (!pte || !IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       offset_mask = pte_pgsize - 1;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
+
+       return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
 struct io_pgtable_ops *amd_iommu_setup_io_pgtable_ops(struct iommu_dev_data 
*dev_data,
                                             struct protection_domain *domain)
 {
@@ -551,6 +571,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct 
io_pgtable_cfg *cfg, void *coo
 {
        struct protection_domain *pdom = (struct protection_domain *)cookie;
 
+       pdom->iop.iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
        return &pdom->iop.iop;
 }
 
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 87cea1cde414..9a1a16031e00 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2079,22 +2079,8 @@ static phys_addr_t amd_iommu_iova_to_phys(struct 
iommu_domain *dom,
 {
        struct protection_domain *domain = to_pdomain(dom);
        struct io_pgtable_ops *ops = &domain->iop.iop.ops;
-       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
-       unsigned long offset_mask, pte_pgsize;
-       u64 *pte, __pte;
 
-       if (domain->iop.mode == PAGE_MODE_NONE)
-               return iova;
-
-       pte = fetch_pte(pgtable, iova, &pte_pgsize);
-
-       if (!pte || !IOMMU_PTE_PRESENT(*pte))
-               return 0;
-
-       offset_mask = pte_pgsize - 1;
-       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
-
-       return (__pte & ~offset_mask) | (iova & offset_mask);
+       return ops->iova_to_phys(ops, iova);
 }
 
 static bool amd_iommu_capable(enum iommu_cap cap)
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to