NestMMU requires us to mark the pte invalid and flush the tlb when we do a
RW upgrade of pte. We fixed a variant of this in the fault path in commit
Fixes: bd5050e38aec ("powerpc/mm/radix: Change pte relax sequence to handle 
nest MMU hang")

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/hugetlb.h |  8 +++++
 arch/powerpc/include/asm/hugetlb.h           |  2 +-
 arch/powerpc/mm/hugetlbpage.c                | 35 ++++++++++++++++++++
 3 files changed, 44 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h 
b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 5b0177733994..a12bde29a5f0 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -42,4 +42,12 @@ static inline bool gigantic_page_supported(void)
 /* hugepd entry valid bit */
 #define HUGEPD_VAL_BITS                (0x8000000000000000UL)
 
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep,
+                                        pte_t old_pte, pte_t new_pte);
 #endif
diff --git a/arch/powerpc/include/asm/hugetlb.h 
b/arch/powerpc/include/asm/hugetlb.h
index 2d00cc530083..60c1d37e446a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -4,7 +4,6 @@
 
 #ifdef CONFIG_HUGETLB_PAGE
 #include <asm/page.h>
-#include <asm-generic/hugetlb.h>
 
 extern struct kmem_cache *hugepte_cache;
 
@@ -176,6 +175,7 @@ static inline void arch_clear_hugepage_flags(struct page 
*page)
 {
 }
 
+#include <asm-generic/hugetlb.h>
 #else /* ! CONFIG_HUGETLB_PAGE */
 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
                                      unsigned long vmaddr)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a7226ed9cae6..8b098bedaff5 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -913,3 +913,38 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned 
long addr,
 
        return 1;
 }
+
+#ifdef CONFIG_PPC_BOOK3S_64
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+                                 unsigned long addr, pte_t *ptep)
+{
+       unsigned long pte_val;
+       /*
+        * Clear the _PAGE_PRESENT so that no hardware parallel update is
+        * possible. Also keep the pte_present true so that we don't take
+        * wrong fault.
+        */
+       pte_val = pte_update(vma->vm_mm, addr, ptep,
+                            _PAGE_PRESENT, _PAGE_INVALID, 1);
+
+       return __pte(pte_val);
+}
+EXPORT_SYMBOL(huge_ptep_modify_prot_start);
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long 
addr,
+                                 pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       /*
+        * To avoid NMMU hang while relaxing access we need to flush the tlb 
before
+        * we set the new value.
+        */
+       if (is_pte_upgrade(pte_val(old_pte), pte_val(pte)) &&
+           (atomic_read(&mm->context.copros) > 0))
+               flush_hugetlb_page(vma, addr);
+
+       set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+EXPORT_SYMBOL(huge_ptep_modify_prot_commit);
+#endif
-- 
2.17.1

Reply via email to