Re: [PATCH 1/7] ARM: KVM: simplify HYP mapping population
On 04/04/13 00:13, Christoffer Dall wrote: On Tue, Apr 02, 2013 at 02:25:09PM +0100, Marc Zyngier wrote: The way we populate HYP mappings is a bit convoluted, to say the least. Passing a pointer around to keep track of the current PFN is quite odd, and we end-up having two different PTE accessors for no good reason. Simplify the whole thing by unifying the two PTE accessors, passing a pgprot_t around, and moving the various validity checks to the upper layers. Signed-off-by: Marc Zyngier marc.zyng...@arm.com --- arch/arm/kvm/mmu.c | 100 ++--- 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 2f12e40..24811d1 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -125,54 +125,34 @@ void free_hyp_pmds(void) } static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, -unsigned long end) +unsigned long end, unsigned long pfn, +pgprot_t prot) { pte_t *pte; unsigned long addr; -struct page *page; -for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { -unsigned long hyp_addr = KERN_TO_HYP(addr); - -pte = pte_offset_kernel(pmd, hyp_addr); -BUG_ON(!virt_addr_valid(addr)); -page = virt_to_page(addr); -kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); -} -} - -static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end, - unsigned long *pfn_base) -{ -pte_t *pte; -unsigned long addr; - -for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { -unsigned long hyp_addr = KERN_TO_HYP(addr); - -pte = pte_offset_kernel(pmd, hyp_addr); -BUG_ON(pfn_valid(*pfn_base)); -kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); -(*pfn_base)++; +for (addr = start; addr end; addr += PAGE_SIZE) { +pte = pte_offset_kernel(pmd, addr); +kvm_set_pte(pte, pfn_pte(pfn, prot)); +pfn++; } } static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, - unsigned long end, unsigned long *pfn_base) + unsigned long end, unsigned long pfn, + pgprot_t prot) { pmd_t *pmd; pte_t *pte; unsigned long addr, next; for (addr = start; addr end; addr = next) { -unsigned long hyp_addr = KERN_TO_HYP(addr); -pmd = pmd_offset(pud, hyp_addr); +pmd = pmd_offset(pud, addr); BUG_ON(pmd_sect(*pmd)); if (pmd_none(*pmd)) { -pte = pte_alloc_one_kernel(NULL, hyp_addr); +pte = pte_alloc_one_kernel(NULL, addr); if (!pte) { kvm_err(Cannot allocate Hyp pte\n); return -ENOMEM; @@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, next = pmd_addr_end(addr, end); -/* - * If pfn_base is NULL, we map kernel pages into HYP with the - * virtual address. Otherwise, this is considered an I/O - * mapping and we map the physical region starting at - * *pfn_base to [start, end[. - */ -if (!pfn_base) -create_hyp_pte_mappings(pmd, addr, next); -else -create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); +create_hyp_pte_mappings(pmd, addr, next, pfn, prot); +pfn += (next - addr) PAGE_SHIFT; so this scheme always assumes a physically contiguous memory reason, and I wasn't sure if we ever wanted to support mapping vmalloc'ed regions into Hyp mode, which is why I wrote the code the way it was (which goes to your for no good reason comment above). So let's look at what we're actually mapping: - kernel code/kmalloc-ed data: this is always physically contiguous - MMIO: While this is mapped in the vmalloc region, this is actually physically contiguous. I'm fine with assuming that this only works for contiguous regions, but I think it deserves a line in the comments on the exported functions (the non-IO one anyway). Sure, I'll add that. } return 0; } -static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) +static int __create_hyp_mappings(pgd_t *pgdp, + unsigned long start, unsigned long end, + unsigned long pfn, pgprot_t prot) { -unsigned long start = (unsigned long)from; -unsigned long end = (unsigned long)to; pgd_t
Re: [PATCH 1/7] ARM: KVM: simplify HYP mapping population
On Tue, Apr 02, 2013 at 02:25:09PM +0100, Marc Zyngier wrote: The way we populate HYP mappings is a bit convoluted, to say the least. Passing a pointer around to keep track of the current PFN is quite odd, and we end-up having two different PTE accessors for no good reason. Simplify the whole thing by unifying the two PTE accessors, passing a pgprot_t around, and moving the various validity checks to the upper layers. Signed-off-by: Marc Zyngier marc.zyng...@arm.com --- arch/arm/kvm/mmu.c | 100 ++--- 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 2f12e40..24811d1 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -125,54 +125,34 @@ void free_hyp_pmds(void) } static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end) + unsigned long end, unsigned long pfn, + pgprot_t prot) { pte_t *pte; unsigned long addr; - struct page *page; - for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - - pte = pte_offset_kernel(pmd, hyp_addr); - BUG_ON(!virt_addr_valid(addr)); - page = virt_to_page(addr); - kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); - } -} - -static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, -unsigned long end, -unsigned long *pfn_base) -{ - pte_t *pte; - unsigned long addr; - - for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - - pte = pte_offset_kernel(pmd, hyp_addr); - BUG_ON(pfn_valid(*pfn_base)); - kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); - (*pfn_base)++; + for (addr = start; addr end; addr += PAGE_SIZE) { + pte = pte_offset_kernel(pmd, addr); + kvm_set_pte(pte, pfn_pte(pfn, prot)); + pfn++; } } static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, -unsigned long end, unsigned long *pfn_base) +unsigned long end, unsigned long pfn, +pgprot_t prot) { pmd_t *pmd; pte_t *pte; unsigned long addr, next; for (addr = start; addr end; addr = next) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - pmd = pmd_offset(pud, hyp_addr); + pmd = pmd_offset(pud, addr); BUG_ON(pmd_sect(*pmd)); if (pmd_none(*pmd)) { - pte = pte_alloc_one_kernel(NULL, hyp_addr); + pte = pte_alloc_one_kernel(NULL, addr); if (!pte) { kvm_err(Cannot allocate Hyp pte\n); return -ENOMEM; @@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, next = pmd_addr_end(addr, end); - /* - * If pfn_base is NULL, we map kernel pages into HYP with the - * virtual address. Otherwise, this is considered an I/O - * mapping and we map the physical region starting at - * *pfn_base to [start, end[. - */ - if (!pfn_base) - create_hyp_pte_mappings(pmd, addr, next); - else - create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); + create_hyp_pte_mappings(pmd, addr, next, pfn, prot); + pfn += (next - addr) PAGE_SHIFT; so this scheme always assumes a physically contiguous memory reason, and I wasn't sure if we ever wanted to support mapping vmalloc'ed regions into Hyp mode, which is why I wrote the code the way it was (which goes to your for no good reason comment above). I'm fine with assuming that this only works for contiguous regions, but I think it deserves a line in the comments on the exported functions (the non-IO one anyway). } return 0; } -static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) +static int __create_hyp_mappings(pgd_t *pgdp, + unsigned long start, unsigned long end, + unsigned long pfn, pgprot_t prot) { - unsigned long start = (unsigned long)from; - unsigned long end = (unsigned long)to; pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -209,21 +181,14 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) if (start = end) return -EINVAL; -
[PATCH 1/7] ARM: KVM: simplify HYP mapping population
The way we populate HYP mappings is a bit convoluted, to say the least. Passing a pointer around to keep track of the current PFN is quite odd, and we end-up having two different PTE accessors for no good reason. Simplify the whole thing by unifying the two PTE accessors, passing a pgprot_t around, and moving the various validity checks to the upper layers. Signed-off-by: Marc Zyngier marc.zyng...@arm.com --- arch/arm/kvm/mmu.c | 100 ++--- 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 2f12e40..24811d1 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -125,54 +125,34 @@ void free_hyp_pmds(void) } static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end) + unsigned long end, unsigned long pfn, + pgprot_t prot) { pte_t *pte; unsigned long addr; - struct page *page; - for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - - pte = pte_offset_kernel(pmd, hyp_addr); - BUG_ON(!virt_addr_valid(addr)); - page = virt_to_page(addr); - kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); - } -} - -static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end, - unsigned long *pfn_base) -{ - pte_t *pte; - unsigned long addr; - - for (addr = start PAGE_MASK; addr end; addr += PAGE_SIZE) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - - pte = pte_offset_kernel(pmd, hyp_addr); - BUG_ON(pfn_valid(*pfn_base)); - kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); - (*pfn_base)++; + for (addr = start; addr end; addr += PAGE_SIZE) { + pte = pte_offset_kernel(pmd, addr); + kvm_set_pte(pte, pfn_pte(pfn, prot)); + pfn++; } } static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, - unsigned long end, unsigned long *pfn_base) + unsigned long end, unsigned long pfn, + pgprot_t prot) { pmd_t *pmd; pte_t *pte; unsigned long addr, next; for (addr = start; addr end; addr = next) { - unsigned long hyp_addr = KERN_TO_HYP(addr); - pmd = pmd_offset(pud, hyp_addr); + pmd = pmd_offset(pud, addr); BUG_ON(pmd_sect(*pmd)); if (pmd_none(*pmd)) { - pte = pte_alloc_one_kernel(NULL, hyp_addr); + pte = pte_alloc_one_kernel(NULL, addr); if (!pte) { kvm_err(Cannot allocate Hyp pte\n); return -ENOMEM; @@ -182,25 +162,17 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, next = pmd_addr_end(addr, end); - /* -* If pfn_base is NULL, we map kernel pages into HYP with the -* virtual address. Otherwise, this is considered an I/O -* mapping and we map the physical region starting at -* *pfn_base to [start, end[. -*/ - if (!pfn_base) - create_hyp_pte_mappings(pmd, addr, next); - else - create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); + create_hyp_pte_mappings(pmd, addr, next, pfn, prot); + pfn += (next - addr) PAGE_SHIFT; } return 0; } -static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) +static int __create_hyp_mappings(pgd_t *pgdp, +unsigned long start, unsigned long end, +unsigned long pfn, pgprot_t prot) { - unsigned long start = (unsigned long)from; - unsigned long end = (unsigned long)to; pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -209,21 +181,14 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) if (start = end) return -EINVAL; - /* Check for a valid kernel memory mapping */ - if (!pfn_base (!virt_addr_valid(from) || !virt_addr_valid(to - 1))) - return -EINVAL; - /* Check for a valid kernel IO mapping */ - if (pfn_base (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))) - return -EINVAL; mutex_lock(kvm_hyp_pgd_mutex); - for (addr = start; addr end; addr = next) { - unsigned long hyp_addr = KERN_TO_HYP(addr); -