The arm and arm64 architectures allow a virtual address to be mapped using
a block descriptor (or huge page, as Linux calls it), and the function
mmu_set_ranges_sect() is made available for a test to do just that. But
virt_to_pte_phys() assumes that all virtual addresses are mapped with page
granularity, which can lead to erroneous addresses being returned in the
case of block mappings.

Signed-off-by: Alexandru Elisei <alexandru.eli...@arm.com>
---
 lib/arm/mmu.c | 89 +++++++++++++++++++++++++++++++--------------------
 1 file changed, 54 insertions(+), 35 deletions(-)

diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index e1a72fe4941f..6022e356ddd4 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -111,10 +111,61 @@ pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, 
void *virt)
                                 __pgprot(PTE_WBWA | PTE_USER));
 }
 
-phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *mem)
+/*
+ * NOTE: The Arm architecture might require the use of a
+ * break-before-make sequence before making changes to a PTE and
+ * certain conditions are met (see Arm ARM D5-2669 for AArch64 and
+ * B3-1378 for AArch32 for more details).
+ */
+pteval_t *mmu_get_pte(pgd_t *pgtable, uintptr_t vaddr)
 {
-       return (*get_pte(pgtable, (uintptr_t)mem) & PHYS_MASK & -PAGE_SIZE)
-               + ((ulong)mem & (PAGE_SIZE - 1));
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       if (!mmu_enabled())
+               return NULL;
+
+       pgd = pgd_offset(pgtable, vaddr);
+       if (!pgd_valid(*pgd))
+               return NULL;
+
+       pud = pud_offset(pgd, vaddr);
+       if (!pud_valid(*pud))
+               return NULL;
+
+       pmd = pmd_offset(pud, vaddr);
+       if (!pmd_valid(*pmd))
+               return NULL;
+       if (pmd_huge(*pmd))
+               return &pmd_val(*pmd);
+
+       pte = pte_offset(pmd, vaddr);
+       if (!pte_valid(*pte))
+               return NULL;
+
+        return &pte_val(*pte);
+}
+
+phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
+{
+       phys_addr_t mask;
+       pteval_t *pteval;
+
+       pteval = mmu_get_pte(pgtable, (uintptr_t)virt);
+       if (!pteval) {
+               install_page(pgtable, (phys_addr_t)(unsigned long)virt, virt);
+               return (phys_addr_t)(unsigned long)virt;
+       }
+
+       if (pmd_huge(__pmd(*pteval)))
+               mask = PMD_MASK;
+       else
+               mask = PAGE_MASK;
+
+       return (*pteval & PHYS_MASK & mask) |
+               ((phys_addr_t)(unsigned long)virt & ~mask);
 }
 
 void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
@@ -231,38 +282,6 @@ unsigned long __phys_to_virt(phys_addr_t addr)
        return addr;
 }
 
-/*
- * NOTE: The Arm architecture might require the use of a
- * break-before-make sequence before making changes to a PTE and
- * certain conditions are met (see Arm ARM D5-2669 for AArch64 and
- * B3-1378 for AArch32 for more details).
- */
-pteval_t *mmu_get_pte(pgd_t *pgtable, uintptr_t vaddr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       if (!mmu_enabled())
-               return NULL;
-
-       pgd = pgd_offset(pgtable, vaddr);
-       assert(pgd_valid(*pgd));
-       pud = pud_offset(pgd, vaddr);
-       assert(pud_valid(*pud));
-       pmd = pmd_offset(pud, vaddr);
-       assert(pmd_valid(*pmd));
-
-       if (pmd_huge(*pmd))
-               return &pmd_val(*pmd);
-
-       pte = pte_offset(pmd, vaddr);
-       assert(pte_valid(*pte));
-
-        return &pte_val(*pte);
-}
-
 void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
 {
        pteval_t *p_pte = mmu_get_pte(pgtable, vaddr);
-- 
2.37.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to