Re: [PATCH -next v2] powerpc/64s/pgtable: fix an undefined behaviour

2020-03-05 Thread Qian Cai



> On Mar 5, 2020, at 2:22 PM, Christophe Leroy  wrote:
> 
> 
> 
> Le 05/03/2020 à 15:32, Qian Cai a écrit :
>> Booting a power9 server with hash MMU could trigger an undefined
>> behaviour because pud_offset(p4d, 0) will do,
>> 0 >> (PAGE_SHIFT:16 + PTE_INDEX_SIZE:8 + H_PMD_INDEX_SIZE:10)
>> Fix it by converting pud_offset() and friends to static inline
>> functions.
> 
> I was suggesting to convert pud_index() to static inline, because that's 
> where the shift sits. Is it not possible ?
> 
> Here you seems to fix the problem for now, but if someone reuses pud_index() 
> in another macro one day, the same problem may happen again.
> 

Sounds reasonable. I send out a v3,

https://lore.kernel.org/lkml/20200306044852.3236-1-...@lca.pw/T/#u

> Christophe
> 
>>  UBSAN: shift-out-of-bounds in arch/powerpc/mm/ptdump/ptdump.c:282:15
>>  shift exponent 34 is too large for 32-bit type 'int'
>>  CPU: 6 PID: 1 Comm: swapper/0 Not tainted 5.6.0-rc4-next-20200303+ #13
>>  Call Trace:
>>  dump_stack+0xf4/0x164 (unreliable)
>>  ubsan_epilogue+0x18/0x78
>>  __ubsan_handle_shift_out_of_bounds+0x160/0x21c
>>  walk_pagetables+0x2cc/0x700
>>  walk_pud at arch/powerpc/mm/ptdump/ptdump.c:282
>>  (inlined by) walk_pagetables at arch/powerpc/mm/ptdump/ptdump.c:311
>>  ptdump_check_wx+0x8c/0xf0
>>  mark_rodata_ro+0x48/0x80
>>  kernel_init+0x74/0x194
>>  ret_from_kernel_thread+0x5c/0x74
>> Suggested-by: Christophe Leroy 
>> Signed-off-by: Qian Cai 
>> ---
>>  arch/powerpc/include/asm/book3s/64/pgtable.h | 20 ++--
>>  1 file changed, 14 insertions(+), 6 deletions(-)
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
>> b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index fa60e8594b9f..4967bc9e25e2 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -1016,12 +1016,20 @@ static inline bool p4d_access_permitted(p4d_t p4d, 
>> bool write)
>>#define pgd_offset(mm, address)((mm)->pgd + pgd_index(address))
>>  -#define pud_offset(p4dp, addr) \
>> -(((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr))
>> -#define pmd_offset(pudp,addr) \
>> -(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
>> -#define pte_offset_kernel(dir,addr) \
>> -(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
>> +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
>> +{
>> +return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
>> +}
>> +
>> +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
>> +{
>> +return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
>> +}
>> +
>> +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
>> +{
>> +return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
>> +}
>>#define pte_offset_map(dir,addr)  pte_offset_kernel((dir), (addr))
>>  



Re: [PATCH -next v2] powerpc/64s/pgtable: fix an undefined behaviour

2020-03-05 Thread Christophe Leroy




Le 05/03/2020 à 15:32, Qian Cai a écrit :

Booting a power9 server with hash MMU could trigger an undefined
behaviour because pud_offset(p4d, 0) will do,

0 >> (PAGE_SHIFT:16 + PTE_INDEX_SIZE:8 + H_PMD_INDEX_SIZE:10)

Fix it by converting pud_offset() and friends to static inline
functions.


I was suggesting to convert pud_index() to static inline, because that's 
where the shift sits. Is it not possible ?


Here you seems to fix the problem for now, but if someone reuses 
pud_index() in another macro one day, the same problem may happen again.


Christophe



  UBSAN: shift-out-of-bounds in arch/powerpc/mm/ptdump/ptdump.c:282:15
  shift exponent 34 is too large for 32-bit type 'int'
  CPU: 6 PID: 1 Comm: swapper/0 Not tainted 5.6.0-rc4-next-20200303+ #13
  Call Trace:
  dump_stack+0xf4/0x164 (unreliable)
  ubsan_epilogue+0x18/0x78
  __ubsan_handle_shift_out_of_bounds+0x160/0x21c
  walk_pagetables+0x2cc/0x700
  walk_pud at arch/powerpc/mm/ptdump/ptdump.c:282
  (inlined by) walk_pagetables at arch/powerpc/mm/ptdump/ptdump.c:311
  ptdump_check_wx+0x8c/0xf0
  mark_rodata_ro+0x48/0x80
  kernel_init+0x74/0x194
  ret_from_kernel_thread+0x5c/0x74

Suggested-by: Christophe Leroy 
Signed-off-by: Qian Cai 
---
  arch/powerpc/include/asm/book3s/64/pgtable.h | 20 ++--
  1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index fa60e8594b9f..4967bc9e25e2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1016,12 +1016,20 @@ static inline bool p4d_access_permitted(p4d_t p4d, bool 
write)
  
  #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
  
-#define pud_offset(p4dp, addr)	\

-   (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr))
-#define pmd_offset(pudp,addr) \
-   (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-#define pte_offset_kernel(dir,addr) \
-   (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+   return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+}
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+   return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+   return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
  
  #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
  



[PATCH -next v2] powerpc/64s/pgtable: fix an undefined behaviour

2020-03-05 Thread Qian Cai
Booting a power9 server with hash MMU could trigger an undefined
behaviour because pud_offset(p4d, 0) will do,

0 >> (PAGE_SHIFT:16 + PTE_INDEX_SIZE:8 + H_PMD_INDEX_SIZE:10)

Fix it by converting pud_offset() and friends to static inline
functions.

 UBSAN: shift-out-of-bounds in arch/powerpc/mm/ptdump/ptdump.c:282:15
 shift exponent 34 is too large for 32-bit type 'int'
 CPU: 6 PID: 1 Comm: swapper/0 Not tainted 5.6.0-rc4-next-20200303+ #13
 Call Trace:
 dump_stack+0xf4/0x164 (unreliable)
 ubsan_epilogue+0x18/0x78
 __ubsan_handle_shift_out_of_bounds+0x160/0x21c
 walk_pagetables+0x2cc/0x700
 walk_pud at arch/powerpc/mm/ptdump/ptdump.c:282
 (inlined by) walk_pagetables at arch/powerpc/mm/ptdump/ptdump.c:311
 ptdump_check_wx+0x8c/0xf0
 mark_rodata_ro+0x48/0x80
 kernel_init+0x74/0x194
 ret_from_kernel_thread+0x5c/0x74

Suggested-by: Christophe Leroy 
Signed-off-by: Qian Cai 
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 20 ++--
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index fa60e8594b9f..4967bc9e25e2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1016,12 +1016,20 @@ static inline bool p4d_access_permitted(p4d_t p4d, bool 
write)
 
 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 
-#define pud_offset(p4dp, addr) \
-   (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr))
-#define pmd_offset(pudp,addr) \
-   (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-#define pte_offset_kernel(dir,addr) \
-   (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+   return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+}
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+   return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+   return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
 
 #define pte_offset_map(dir,addr)   pte_offset_kernel((dir), (addr))
 
-- 
1.8.3.1