[5/5] Split hash MMU specific hugepage code into a new file
This patch separates the parts of hugetlbpage.c which are inherently specific to the hash MMU into a new hugelbpage-hash64.c file. Signed-off-by: David Gibson --- arch/powerpc/include/asm/hugetlb.h |3 arch/powerpc/mm/Makefile |5 - arch/powerpc/mm/hugetlbpage-hash64.c | 167 ++ arch/powerpc/mm/hugetlbpage.c| 168 --- 4 files changed, 176 insertions(+), 167 deletions(-) Index: working-2.6/arch/powerpc/mm/Makefile === --- working-2.6.orig/arch/powerpc/mm/Makefile 2009-09-28 13:51:57.0 +1000 +++ working-2.6/arch/powerpc/mm/Makefile2009-09-28 13:53:21.0 +1000 @@ -28,7 +28,10 @@ obj-$(CONFIG_44x)+= 44x_mmu.o obj-$(CONFIG_FSL_BOOKE)+= fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES)+= slice.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +ifeq ($(CONFIG_HUGETLB_PAGE),y) +obj-y += hugetlbpage.o +obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o +endif obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o Index: working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c === --- /dev/null 1970-01-01 00:00:00.0 + +++ working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c2009-09-28 13:53:21.0 +1000 @@ -0,0 +1,167 @@ +/* + * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) + * + * Copyright (C) 2003 David Gibson, IBM Corporation. + * + * Based on the IA-32 version: + * Copyright (C) 2002, Rohit Seth + */ + +#include +#include +#include +#include +#include +#include + +/* + * Called by asm hashtable.S for doing lazy icache flush + */ +static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, + pte_t pte, int trap, unsigned long sz) +{ + struct page *page; + int i; + + if (!pfn_valid(pte_pfn(pte))) + return rflags; + + page = pte_page(pte); + + /* page is dirty */ + if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (trap == 0x400) { + for (i = 0; i < (sz / PAGE_SIZE); i++) + __flush_dcache_icache(page_address(page+i)); + set_bit(PG_arch_1, &page->flags); + } else { + rflags |= HPTE_R_N; + } + } + return rflags; +} + +int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, +pte_t *ptep, unsigned long trap, int local, int ssize, +unsigned int shift, unsigned int mmu_psize) +{ + unsigned long old_pte, new_pte; + unsigned long va, rflags, pa, sz; + long slot; + int err = 1; + + BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); + + /* Search the Linux page table for a match with va */ + va = hpt_va(ea, vsid, ssize); + + /* +* Check the user's access rights to the page. If access should be +* prevented then send the problem up to do_page_fault. +*/ + if (unlikely(access & ~pte_val(*ptep))) + goto out; + /* +* At this point, we have a pte (old_pte) which can be used to build +* or update an HPTE. There are 2 cases: +* +* 1. There is a valid (present) pte with no associated HPTE (this is +* the most common case) +* 2. There is a valid (present) pte with an associated HPTE. The +* current values of the pp bits in the HPTE prevent access +* because we are doing software DIRTY bit management and the +* page is currently not DIRTY. +*/ + + + do { + old_pte = pte_val(*ptep); + if (old_pte & _PAGE_BUSY) + goto out; + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; + } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, +old_pte, new_pte)); + + rflags = 0x2 | (!(new_pte & _PAGE_RW)); + /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ + rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + sz = ((1UL) << shift); + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + /* No CPU has hugepages but lacks no execute, so we +* don't need to worry about that case */ + rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), + trap, sz); + + /* Check if pte already has an hpte (case 2) */ + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* There MIGHT be an HPTE f
[5/5] Split hash MMU specific hugepage code into a new file
This patch separates the parts of hugetlbpage.c which are inherently specific to the hash MMU into a new hugelbpage-hash64.c file. Signed-off-by: David Gibson --- arch/powerpc/include/asm/hugetlb.h |3 arch/powerpc/mm/Makefile |5 - arch/powerpc/mm/hugetlbpage-hash64.c | 167 ++ arch/powerpc/mm/hugetlbpage.c| 168 --- 4 files changed, 176 insertions(+), 167 deletions(-) Index: working-2.6/arch/powerpc/mm/Makefile === --- working-2.6.orig/arch/powerpc/mm/Makefile 2009-08-14 16:07:54.0 +1000 +++ working-2.6/arch/powerpc/mm/Makefile2009-09-09 15:24:33.0 +1000 @@ -28,7 +28,10 @@ obj-$(CONFIG_44x)+= 44x_mmu.o obj-$(CONFIG_FSL_BOOKE)+= fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES)+= slice.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +ifeq ($(CONFIG_HUGETLB_PAGE),y) +obj-y += hugetlbpage.o +obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o +endif obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o Index: working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c === --- /dev/null 1970-01-01 00:00:00.0 + +++ working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c2009-09-09 15:25:35.0 +1000 @@ -0,0 +1,167 @@ +/* + * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) + * + * Copyright (C) 2003 David Gibson, IBM Corporation. + * + * Based on the IA-32 version: + * Copyright (C) 2002, Rohit Seth + */ + +#include +#include +#include +#include +#include +#include + +/* + * Called by asm hashtable.S for doing lazy icache flush + */ +static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, + pte_t pte, int trap, unsigned long sz) +{ + struct page *page; + int i; + + if (!pfn_valid(pte_pfn(pte))) + return rflags; + + page = pte_page(pte); + + /* page is dirty */ + if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (trap == 0x400) { + for (i = 0; i < (sz / PAGE_SIZE); i++) + __flush_dcache_icache(page_address(page+i)); + set_bit(PG_arch_1, &page->flags); + } else { + rflags |= HPTE_R_N; + } + } + return rflags; +} + +int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, +pte_t *ptep, unsigned long trap, int local, int ssize, +unsigned int shift, unsigned int mmu_psize) +{ + unsigned long old_pte, new_pte; + unsigned long va, rflags, pa, sz; + long slot; + int err = 1; + + BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); + + /* Search the Linux page table for a match with va */ + va = hpt_va(ea, vsid, ssize); + + /* +* Check the user's access rights to the page. If access should be +* prevented then send the problem up to do_page_fault. +*/ + if (unlikely(access & ~pte_val(*ptep))) + goto out; + /* +* At this point, we have a pte (old_pte) which can be used to build +* or update an HPTE. There are 2 cases: +* +* 1. There is a valid (present) pte with no associated HPTE (this is +* the most common case) +* 2. There is a valid (present) pte with an associated HPTE. The +* current values of the pp bits in the HPTE prevent access +* because we are doing software DIRTY bit management and the +* page is currently not DIRTY. +*/ + + + do { + old_pte = pte_val(*ptep); + if (old_pte & _PAGE_BUSY) + goto out; + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; + } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, +old_pte, new_pte)); + + rflags = 0x2 | (!(new_pte & _PAGE_RW)); + /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ + rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + sz = ((1UL) << shift); + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + /* No CPU has hugepages but lacks no execute, so we +* don't need to worry about that case */ + rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), + trap, sz); + + /* Check if pte already has an hpte (case 2) */ + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* There MIGHT be an HPTE f
[5/5] Split hash MMU specific hugepage code into a new file
This patch separates the parts of hugetlbpage.c which are inherently specific to the hash MMU into a new hugelbpage-hash64.c file. Signed-off-by: David Gibson --- arch/powerpc/include/asm/hugetlb.h |3 arch/powerpc/mm/Makefile |5 - arch/powerpc/mm/hugetlbpage-hash64.c | 167 ++ arch/powerpc/mm/hugetlbpage.c| 168 --- 4 files changed, 176 insertions(+), 167 deletions(-) Index: working-2.6/arch/powerpc/mm/Makefile === --- working-2.6.orig/arch/powerpc/mm/Makefile 2009-08-14 16:07:54.0 +1000 +++ working-2.6/arch/powerpc/mm/Makefile2009-09-09 15:24:33.0 +1000 @@ -28,7 +28,10 @@ obj-$(CONFIG_44x)+= 44x_mmu.o obj-$(CONFIG_FSL_BOOKE)+= fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_PPC_MM_SLICES)+= slice.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +ifeq ($(CONFIG_HUGETLB_PAGE),y) +obj-y += hugetlbpage.o +obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o +endif obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o Index: working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c === --- /dev/null 1970-01-01 00:00:00.0 + +++ working-2.6/arch/powerpc/mm/hugetlbpage-hash64.c2009-09-09 15:25:35.0 +1000 @@ -0,0 +1,167 @@ +/* + * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) + * + * Copyright (C) 2003 David Gibson, IBM Corporation. + * + * Based on the IA-32 version: + * Copyright (C) 2002, Rohit Seth + */ + +#include +#include +#include +#include +#include +#include + +/* + * Called by asm hashtable.S for doing lazy icache flush + */ +static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, + pte_t pte, int trap, unsigned long sz) +{ + struct page *page; + int i; + + if (!pfn_valid(pte_pfn(pte))) + return rflags; + + page = pte_page(pte); + + /* page is dirty */ + if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (trap == 0x400) { + for (i = 0; i < (sz / PAGE_SIZE); i++) + __flush_dcache_icache(page_address(page+i)); + set_bit(PG_arch_1, &page->flags); + } else { + rflags |= HPTE_R_N; + } + } + return rflags; +} + +int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, +pte_t *ptep, unsigned long trap, int local, int ssize, +unsigned int shift, unsigned int mmu_psize) +{ + unsigned long old_pte, new_pte; + unsigned long va, rflags, pa, sz; + long slot; + int err = 1; + + BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); + + /* Search the Linux page table for a match with va */ + va = hpt_va(ea, vsid, ssize); + + /* +* Check the user's access rights to the page. If access should be +* prevented then send the problem up to do_page_fault. +*/ + if (unlikely(access & ~pte_val(*ptep))) + goto out; + /* +* At this point, we have a pte (old_pte) which can be used to build +* or update an HPTE. There are 2 cases: +* +* 1. There is a valid (present) pte with no associated HPTE (this is +* the most common case) +* 2. There is a valid (present) pte with an associated HPTE. The +* current values of the pp bits in the HPTE prevent access +* because we are doing software DIRTY bit management and the +* page is currently not DIRTY. +*/ + + + do { + old_pte = pte_val(*ptep); + if (old_pte & _PAGE_BUSY) + goto out; + new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; + } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, +old_pte, new_pte)); + + rflags = 0x2 | (!(new_pte & _PAGE_RW)); + /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ + rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + sz = ((1UL) << shift); + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + /* No CPU has hugepages but lacks no execute, so we +* don't need to worry about that case */ + rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), + trap, sz); + + /* Check if pte already has an hpte (case 2) */ + if (unlikely(old_pte & _PAGE_HASHPTE)) { + /* There MIGHT be an HPTE f