Re: [PATCH v24 15/30] x86/mm: Update maybe_mkwrite() for shadow stack

2021-04-09 Thread Kirill A. Shutemov
On Thu, Apr 01, 2021 at 03:10:49PM -0700, Yu-cheng Yu wrote:
> When serving a page fault, maybe_mkwrite() makes a PTE writable if its vma
> has VM_WRITE.
> 
> A shadow stack vma has VM_SHADOW_STACK.  Its PTEs have _PAGE_DIRTY, but not
> _PAGE_WRITE.  In fork(), _PAGE_DIRTY is cleared to cause copy-on-write,
> and in the page fault handler, _PAGE_DIRTY is restored and the shadow stack
> page is writable again.
> 
> Introduce an x86 version of maybe_mkwrite(), which sets proper PTE bits
> according to VM flags.
> 
> Apply the same changes to maybe_pmd_mkwrite().
> 
> Signed-off-by: Yu-cheng Yu 
> Cc: Kees Cook 
> Cc: Kirill A. Shutemov 
> ---
> v24:
> - Instead of doing arch_maybe_mkwrite(), overwrite maybe*_mkwrite() with x86
>   versions.
> - Change VM_SHSTK to VM_SHADOW_STACK.
> 
>  arch/x86/include/asm/pgtable.h |  8 
>  arch/x86/mm/pgtable.c  | 20 
>  include/linux/mm.h |  2 ++
>  mm/huge_memory.c   |  2 ++
>  4 files changed, 32 insertions(+)
> 
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 46d9394b884f..51cdf14488b7 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1686,6 +1686,14 @@ static inline bool arch_faults_on_old_pte(void)
>   return false;
>  }
>  
> +#define maybe_mkwrite maybe_mkwrite
> +extern pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma);
> +
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +#define maybe_pmd_mkwrite maybe_pmd_mkwrite
> +extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
> +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

Move it next to other THP-depended stuff.

Otherwise looks good to me:

Reviewed-by: Kirill A. Shutemov 

-- 
 Kirill A. Shutemov


[PATCH v24 15/30] x86/mm: Update maybe_mkwrite() for shadow stack

2021-04-01 Thread Yu-cheng Yu
When serving a page fault, maybe_mkwrite() makes a PTE writable if its vma
has VM_WRITE.

A shadow stack vma has VM_SHADOW_STACK.  Its PTEs have _PAGE_DIRTY, but not
_PAGE_WRITE.  In fork(), _PAGE_DIRTY is cleared to cause copy-on-write,
and in the page fault handler, _PAGE_DIRTY is restored and the shadow stack
page is writable again.

Introduce an x86 version of maybe_mkwrite(), which sets proper PTE bits
according to VM flags.

Apply the same changes to maybe_pmd_mkwrite().

Signed-off-by: Yu-cheng Yu 
Cc: Kees Cook 
Cc: Kirill A. Shutemov 
---
v24:
- Instead of doing arch_maybe_mkwrite(), overwrite maybe*_mkwrite() with x86
  versions.
- Change VM_SHSTK to VM_SHADOW_STACK.

 arch/x86/include/asm/pgtable.h |  8 
 arch/x86/mm/pgtable.c  | 20 
 include/linux/mm.h |  2 ++
 mm/huge_memory.c   |  2 ++
 4 files changed, 32 insertions(+)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 46d9394b884f..51cdf14488b7 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1686,6 +1686,14 @@ static inline bool arch_faults_on_old_pte(void)
return false;
 }
 
+#define maybe_mkwrite maybe_mkwrite
+extern pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define maybe_pmd_mkwrite maybe_pmd_mkwrite
+extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_H */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index f6a9e2e36642..e778dbbef3d8 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -610,6 +610,26 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
 }
 #endif
 
+pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+   if (likely(vma->vm_flags & VM_WRITE))
+   pte = pte_mkwrite(pte);
+   else if (likely(vma->vm_flags & VM_SHADOW_STACK))
+   pte = pte_mkwrite_shstk(pte);
+   return pte;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+   if (likely(vma->vm_flags & VM_WRITE))
+   pmd = pmd_mkwrite(pmd);
+   else if (likely(vma->vm_flags & VM_SHADOW_STACK))
+   pmd = pmd_mkwrite_shstk(pmd);
+   return pmd;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 /**
  * reserve_top_address - reserves a hole in the top of kernel address space
  * @reserve - size of hole to reserve
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 08282eb2f195..6ac9b3e9a865 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -993,12 +993,14 @@ void free_compound_page(struct page *page);
  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
  * that do not have writing enabled, when used by access_process_vm.
  */
+#ifndef maybe_mkwrite
 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 {
if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte);
return pte;
 }
+#endif
 
 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae907a9c2050..8203bd6ae4bd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -478,12 +478,14 @@ static int __init setup_transparent_hugepage(char *str)
 }
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
+#ifndef maybe_pmd_mkwrite
 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
return pmd;
 }
+#endif
 
 #ifdef CONFIG_MEMCG
 static inline struct deferred_split *get_deferred_split_queue(struct page 
*page)
-- 
2.21.0