For using 512k pages with hardware assistance, the PTEs have to be spread
every 128 bytes in the L2 table.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 arch/powerpc/include/asm/hugetlb.h |  4 +++-
 arch/powerpc/mm/hugetlbpage.c      | 13 +++++++++++++
 arch/powerpc/mm/tlb_nohash.c       |  3 +++
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/hugetlb.h 
b/arch/powerpc/include/asm/hugetlb.h
index dfb8bf236586..62a0ca02ca7d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -74,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned 
long addr,
        unsigned long idx = 0;
 
        pte_t *dir = hugepd_page(hpd);
-#ifndef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_8xx
+       idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+#elif !defined(CONFIG_PPC_FSL_BOOK3E)
        idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
 #endif
 
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bc97874d7c74..d0b92a0a072d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -66,7 +66,11 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t 
*hpdp,
                cachep = PGT_CACHE(PTE_T_ORDER);
                num_hugepd = 1 << (pshift - pdshift);
        } else {
+#ifdef CONFIG_PPC_8xx
+               cachep = PGT_CACHE(PTE_SHIFT);
+#else
                cachep = PGT_CACHE(pdshift - pshift);
+#endif
                num_hugepd = 1;
        }
 
@@ -332,8 +336,13 @@ static void free_hugepd_range(struct mmu_gather *tlb, 
hugepd_t *hpdp, int pdshif
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
        else
+#ifdef CONFIG_PPC_8xx
+               pgtable_free_tlb(tlb, hugepte,
+                                get_hugepd_cache_index(PTE_SHIFT));
+#else
                pgtable_free_tlb(tlb, hugepte,
                                 get_hugepd_cache_index(pdshift - shift));
+#endif
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -701,7 +710,11 @@ static int __init hugetlbpage_init(void)
                 * use pgt cache for hugepd.
                 */
                if (pdshift > shift)
+#ifdef CONFIG_PPC_8xx
+                       pgtable_cache_add(PTE_SHIFT);
+#else
                        pgtable_cache_add(pdshift - shift);
+#endif
 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
                else
                        pgtable_cache_add(PTE_T_ORDER);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 8ad7aab150b7..ae5d568e267f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -97,6 +97,9 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
                .shift  = 14,
        },
 #endif
+       [MMU_PAGE_512K] = {
+               .shift  = 19,
+       },
        [MMU_PAGE_8M] = {
                .shift  = 23,
        },
-- 
2.13.3

Reply via email to