Re: [PATCH 3.16 226/328] x86/mm: Use WRITE_ONCE() when setting PTEs

2018-12-16 Thread Ben Hutchings
On Sun, 2018-12-09 at 21:57 +, Nadav Amit wrote:
> This patch causes some sparse warnings. If you want to wait, I’ll send a
> patch to fix it. (No expected functional impact though).

Thanks for the note.  I don't think that's enough of a reason to delay
this fix.

Ben.

> 
> > On Dec 9, 2018, at 1:50 PM, Ben Hutchings  wrote:
> > 
> > 3.16.62-rc1 review patch.  If anyone has any objections, please let me know.
> > 
> > --
> > 
> > From: Nadav Amit 
> > 
> > commit 9bc4f28af75a91aea0ae383f50b0a430c4509303 upstream.
> > 
> > When page-table entries are set, the compiler might optimize their
> > assignment by using multiple instructions to set the PTE. This might
> > turn into a security hazard if the user somehow manages to use the
> > interim PTE. L1TF does not make our lives easier, making even an interim
> > non-present PTE a security hazard.
> > 
> > Using WRITE_ONCE() to set PTEs and friends should prevent this potential
> > security hazard.
> > 
> > I skimmed the differences in the binary with and without this patch. The
> > differences are (obviously) greater when CONFIG_PARAVIRT=n as more
> > code optimizations are possible. For better and worse, the impact on the
> > binary with this patch is pretty small. Skimming the code did not cause
> > anything to jump out as a security hazard, but it seems that at least
> > move_soft_dirty_pte() caused set_pte_at() to use multiple writes.
> > 
> > Signed-off-by: Nadav Amit 
> > Signed-off-by: Thomas Gleixner 
> > Acked-by: Peter Zijlstra (Intel) 
> > Cc: Dave Hansen 
> > Cc: Andi Kleen 
> > Cc: Josh Poimboeuf 
> > Cc: Michal Hocko 
> > Cc: Vlastimil Babka 
> > Cc: Sean Christopherson 
> > Cc: Andy Lutomirski 
> > Link: 
> > https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flkml.kernel.org%2Fr%2F20180902181451.80520-1-namit%40vmware.com&data=02%7C01%7Cnamit%40vmware.com%7C714a85e56274491706a408d65e210edd%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C0%7C0%7C636799893436192539&sdata=QNA9jX%2FSAai7zpZeNn%2FosXL%2BrjkG2lYfDVVUN9Etm0A%3D&reserved=0
> > [bwh: Backported to 3.16:
> > - Use ACCESS_ONCE() instead of WRITE_ONCE()
> > - Drop changes in pmdp_establish(), native_set_p4d(), 
> > pudp_set_access_flags()]
> > Signed-off-by: Ben Hutchings 
> > ---
> > --- a/arch/x86/include/asm/pgtable_64.h
> > +++ b/arch/x86/include/asm/pgtable_64.h
> > @@ -44,15 +44,15 @@ struct mm_struct;
> > void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
> > 
> > 
> > -static inline void native_pte_clear(struct mm_struct *mm, unsigned long 
> > addr,
> > -   pte_t *ptep)
> > +static inline void native_set_pte(pte_t *ptep, pte_t pte)
> > {
> > -   *ptep = native_make_pte(0);
> > +   ACCESS_ONCE(*ptep) = pte;
> > }
> > 
> > -static inline void native_set_pte(pte_t *ptep, pte_t pte)
> > +static inline void native_pte_clear(struct mm_struct *mm, unsigned long 
> > addr,
> > +   pte_t *ptep)
> > {
> > -   *ptep = pte;
> > +   native_set_pte(ptep, native_make_pte(0));
> > }
> > 
> > static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
> > @@ -62,7 +62,7 @@ static inline void native_set_pte_atomic
> > 
> > static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
> > {
> > -   *pmdp = pmd;
> > +   ACCESS_ONCE(*pmdp) = pmd;
> > }
> > 
> > static inline void native_pmd_clear(pmd_t *pmd)
> > @@ -98,7 +98,7 @@ static inline pmd_t native_pmdp_get_and_
> > 
> > static inline void native_set_pud(pud_t *pudp, pud_t pud)
> > {
> > -   *pudp = pud;
> > +   ACCESS_ONCE(*pudp) = pud;
> > }
> > 
> > static inline void native_pud_clear(pud_t *pud)
> > @@ -131,7 +131,7 @@ static inline pgd_t *native_get_shadow_p
> > 
> > static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
> > {
> > -   *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
> > +   ACCESS_ONCE(*pgdp) = kaiser_set_shadow_pgd(pgdp, pgd);
> > }
> > 
> > static inline void native_pgd_clear(pgd_t *pgd)
> > --- a/arch/x86/mm/pgtable.c
> > +++ b/arch/x86/mm/pgtable.c
> > @@ -242,7 +242,7 @@ static void pgd_mop_up_pmds(struct mm_st
> > if (pgd_val(pgd) != 0) {
> > pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
> > 
> > -   pgdp[i] = native_make_pgd(0);
> > +   pgd_clear(&pgdp[i]);
> > 
> > paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
> > pmd_free(mm, pmd);
> > @@ -352,7 +352,7 @@ int ptep_set_access_flags(struct vm_area
> > int changed = !pte_same(*ptep, entry);
> > 
> > if (changed && dirty) {
> > -   *ptep = entry;
> > +   set_pte(ptep, entry);
> > pte_update_defer(vma->vm_mm, address, ptep);
> > }
> > 
> > @@ -369,7 +369,7 @@ int pmdp_set_access_flags(struct vm_area
> > VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> > 
> > if (changed && dirty) {
> > -   *pmdp = entry;
> > +   set_pmd(pmdp, entry);
> > pmd_update_defer(vma->vm_mm, address, pmdp);
> > 

[PATCH 3.16 226/328] x86/mm: Use WRITE_ONCE() when setting PTEs

2018-12-09 Thread Ben Hutchings
3.16.62-rc1 review patch.  If anyone has any objections, please let me know.

--

From: Nadav Amit 

commit 9bc4f28af75a91aea0ae383f50b0a430c4509303 upstream.

When page-table entries are set, the compiler might optimize their
assignment by using multiple instructions to set the PTE. This might
turn into a security hazard if the user somehow manages to use the
interim PTE. L1TF does not make our lives easier, making even an interim
non-present PTE a security hazard.

Using WRITE_ONCE() to set PTEs and friends should prevent this potential
security hazard.

I skimmed the differences in the binary with and without this patch. The
differences are (obviously) greater when CONFIG_PARAVIRT=n as more
code optimizations are possible. For better and worse, the impact on the
binary with this patch is pretty small. Skimming the code did not cause
anything to jump out as a security hazard, but it seems that at least
move_soft_dirty_pte() caused set_pte_at() to use multiple writes.

Signed-off-by: Nadav Amit 
Signed-off-by: Thomas Gleixner 
Acked-by: Peter Zijlstra (Intel) 
Cc: Dave Hansen 
Cc: Andi Kleen 
Cc: Josh Poimboeuf 
Cc: Michal Hocko 
Cc: Vlastimil Babka 
Cc: Sean Christopherson 
Cc: Andy Lutomirski 
Link: https://lkml.kernel.org/r/20180902181451.80520-1-na...@vmware.com
[bwh: Backported to 3.16:
 - Use ACCESS_ONCE() instead of WRITE_ONCE()
 - Drop changes in pmdp_establish(), native_set_p4d(), pudp_set_access_flags()]
Signed-off-by: Ben Hutchings 
---
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -44,15 +44,15 @@ struct mm_struct;
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-   pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
 {
-   *ptep = native_make_pte(0);
+   ACCESS_ONCE(*ptep) = pte;
 }
 
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+   pte_t *ptep)
 {
-   *ptep = pte;
+   native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -62,7 +62,7 @@ static inline void native_set_pte_atomic
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-   *pmdp = pmd;
+   ACCESS_ONCE(*pmdp) = pmd;
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -98,7 +98,7 @@ static inline pmd_t native_pmdp_get_and_
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-   *pudp = pud;
+   ACCESS_ONCE(*pudp) = pud;
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -131,7 +131,7 @@ static inline pgd_t *native_get_shadow_p
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-   *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
+   ACCESS_ONCE(*pgdp) = kaiser_set_shadow_pgd(pgdp, pgd);
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -242,7 +242,7 @@ static void pgd_mop_up_pmds(struct mm_st
if (pgd_val(pgd) != 0) {
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-   pgdp[i] = native_make_pgd(0);
+   pgd_clear(&pgdp[i]);
 
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pmd_free(mm, pmd);
@@ -352,7 +352,7 @@ int ptep_set_access_flags(struct vm_area
int changed = !pte_same(*ptep, entry);
 
if (changed && dirty) {
-   *ptep = entry;
+   set_pte(ptep, entry);
pte_update_defer(vma->vm_mm, address, ptep);
}
 
@@ -369,7 +369,7 @@ int pmdp_set_access_flags(struct vm_area
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
if (changed && dirty) {
-   *pmdp = entry;
+   set_pmd(pmdp, entry);
pmd_update_defer(vma->vm_mm, address, pmdp);
/*
 * We had a write-protection fault here and changed the pmd



Re: [PATCH 3.16 226/328] x86/mm: Use WRITE_ONCE() when setting PTEs

2018-12-09 Thread Nadav Amit
This patch causes some sparse warnings. If you want to wait, I’ll send a
patch to fix it. (No expected functional impact though).


> On Dec 9, 2018, at 1:50 PM, Ben Hutchings  wrote:
> 
> 3.16.62-rc1 review patch.  If anyone has any objections, please let me know.
> 
> --
> 
> From: Nadav Amit 
> 
> commit 9bc4f28af75a91aea0ae383f50b0a430c4509303 upstream.
> 
> When page-table entries are set, the compiler might optimize their
> assignment by using multiple instructions to set the PTE. This might
> turn into a security hazard if the user somehow manages to use the
> interim PTE. L1TF does not make our lives easier, making even an interim
> non-present PTE a security hazard.
> 
> Using WRITE_ONCE() to set PTEs and friends should prevent this potential
> security hazard.
> 
> I skimmed the differences in the binary with and without this patch. The
> differences are (obviously) greater when CONFIG_PARAVIRT=n as more
> code optimizations are possible. For better and worse, the impact on the
> binary with this patch is pretty small. Skimming the code did not cause
> anything to jump out as a security hazard, but it seems that at least
> move_soft_dirty_pte() caused set_pte_at() to use multiple writes.
> 
> Signed-off-by: Nadav Amit 
> Signed-off-by: Thomas Gleixner 
> Acked-by: Peter Zijlstra (Intel) 
> Cc: Dave Hansen 
> Cc: Andi Kleen 
> Cc: Josh Poimboeuf 
> Cc: Michal Hocko 
> Cc: Vlastimil Babka 
> Cc: Sean Christopherson 
> Cc: Andy Lutomirski 
> Link: 
> https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flkml.kernel.org%2Fr%2F20180902181451.80520-1-namit%40vmware.com&data=02%7C01%7Cnamit%40vmware.com%7C714a85e56274491706a408d65e210edd%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C0%7C0%7C636799893436192539&sdata=QNA9jX%2FSAai7zpZeNn%2FosXL%2BrjkG2lYfDVVUN9Etm0A%3D&reserved=0
> [bwh: Backported to 3.16:
> - Use ACCESS_ONCE() instead of WRITE_ONCE()
> - Drop changes in pmdp_establish(), native_set_p4d(), pudp_set_access_flags()]
> Signed-off-by: Ben Hutchings 
> ---
> --- a/arch/x86/include/asm/pgtable_64.h
> +++ b/arch/x86/include/asm/pgtable_64.h
> @@ -44,15 +44,15 @@ struct mm_struct;
> void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
> 
> 
> -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
> - pte_t *ptep)
> +static inline void native_set_pte(pte_t *ptep, pte_t pte)
> {
> - *ptep = native_make_pte(0);
> + ACCESS_ONCE(*ptep) = pte;
> }
> 
> -static inline void native_set_pte(pte_t *ptep, pte_t pte)
> +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
> + pte_t *ptep)
> {
> - *ptep = pte;
> + native_set_pte(ptep, native_make_pte(0));
> }
> 
> static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
> @@ -62,7 +62,7 @@ static inline void native_set_pte_atomic
> 
> static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
> {
> - *pmdp = pmd;
> + ACCESS_ONCE(*pmdp) = pmd;
> }
> 
> static inline void native_pmd_clear(pmd_t *pmd)
> @@ -98,7 +98,7 @@ static inline pmd_t native_pmdp_get_and_
> 
> static inline void native_set_pud(pud_t *pudp, pud_t pud)
> {
> - *pudp = pud;
> + ACCESS_ONCE(*pudp) = pud;
> }
> 
> static inline void native_pud_clear(pud_t *pud)
> @@ -131,7 +131,7 @@ static inline pgd_t *native_get_shadow_p
> 
> static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
> {
> - *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
> + ACCESS_ONCE(*pgdp) = kaiser_set_shadow_pgd(pgdp, pgd);
> }
> 
> static inline void native_pgd_clear(pgd_t *pgd)
> --- a/arch/x86/mm/pgtable.c
> +++ b/arch/x86/mm/pgtable.c
> @@ -242,7 +242,7 @@ static void pgd_mop_up_pmds(struct mm_st
>   if (pgd_val(pgd) != 0) {
>   pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
> 
> - pgdp[i] = native_make_pgd(0);
> + pgd_clear(&pgdp[i]);
> 
>   paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
>   pmd_free(mm, pmd);
> @@ -352,7 +352,7 @@ int ptep_set_access_flags(struct vm_area
>   int changed = !pte_same(*ptep, entry);
> 
>   if (changed && dirty) {
> - *ptep = entry;
> + set_pte(ptep, entry);
>   pte_update_defer(vma->vm_mm, address, ptep);
>   }
> 
> @@ -369,7 +369,7 @@ int pmdp_set_access_flags(struct vm_area
>   VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> 
>   if (changed && dirty) {
> - *pmdp = entry;
> + set_pmd(pmdp, entry);
>   pmd_update_defer(vma->vm_mm, address, pmdp);
>   /*
>* We had a write-protection fault here and changed the pmd