Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-03 Thread Thomas Garnier
On Tue, Aug 2, 2016 at 12:55 PM, Yinghai Lu  wrote:
> On Tue, Aug 2, 2016 at 10:48 AM, Thomas Garnier  wrote:
>> On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
>>>
>>> Looks like we need to change the loop from phys address to virtual
>>> address instead.
>>> to avoid the overflow.
>
> something like attached.

I tested it and it worked well. I just got this warning on build:

In file included from arch/x86/mm/init_64.c:60:0:
arch/x86/mm/ident_map.c: In function ‘ident_pmd_init’:
arch/x86/mm/ident_map.c:18:29: warning: suggest parentheses around
arithmetic in operand of ‘|’ [-Wparentheses]
set_pmd(pmd, __pmd(vaddr - off | info->pmd_flag));

Do you want to resend your version for integration?


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-03 Thread Thomas Garnier
On Tue, Aug 2, 2016 at 12:55 PM, Yinghai Lu  wrote:
> On Tue, Aug 2, 2016 at 10:48 AM, Thomas Garnier  wrote:
>> On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
>>>
>>> Looks like we need to change the loop from phys address to virtual
>>> address instead.
>>> to avoid the overflow.
>
> something like attached.

I tested it and it worked well. I just got this warning on build:

In file included from arch/x86/mm/init_64.c:60:0:
arch/x86/mm/ident_map.c: In function ‘ident_pmd_init’:
arch/x86/mm/ident_map.c:18:29: warning: suggest parentheses around
arithmetic in operand of ‘|’ [-Wparentheses]
set_pmd(pmd, __pmd(vaddr - off | info->pmd_flag));

Do you want to resend your version for integration?


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Tue, Aug 2, 2016 at 10:48 AM, Thomas Garnier  wrote:
> On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
>>
>> Looks like we need to change the loop from phys address to virtual
>> address instead.
>> to avoid the overflow.

something like attached.
---
 arch/x86/mm/ident_map.c |   54 
 1 file changed, 32 insertions(+), 22 deletions(-)

Index: linux-2.6/arch/x86/mm/ident_map.c
===
--- linux-2.6.orig/arch/x86/mm/ident_map.c
+++ linux-2.6/arch/x86/mm/ident_map.c
@@ -3,40 +3,47 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+
+	vaddr &= PMD_MASK;
+	for (; vaddr < vend; vaddr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(vaddr);
 
 		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+			set_pmd(pmd, __pmd(vaddr - off | info->pmd_flag));
 	}
 }
 
 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
+	for (; vaddr < vend; vaddr = vnext) {
+		pud_t *pud = pud_page + pud_index(vaddr);
 		pmd_t *pmd;
 
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PUD_MASK) + PUD_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
@@ -46,21 +53,24 @@ static int ident_pud_init(struct x86_map
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
 	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+	for (; vaddr < vend; vaddr = vnext) {
+		pgd_t *pgd = pgd_page + pgd_index(vaddr);
 		pud_t *pud;
 
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pgd_present(*pgd)) {
 			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
+			result = ident_pud_init(info, pud, vaddr - off,
+		vnext - off);
 			if (result)
 return result;
 			continue;
@@ -69,7 +79,7 @@ int kernel_ident_mapping_init(struct x86
 		pud = (pud_t *)info->alloc_pgt_page(info->context);
 		if (!pud)
 			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
+		result = ident_pud_init(info, pud, vaddr - off, vnext - off);
 		if (result)
 			return result;
 		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Tue, Aug 2, 2016 at 10:48 AM, Thomas Garnier  wrote:
> On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
>>
>> Looks like we need to change the loop from phys address to virtual
>> address instead.
>> to avoid the overflow.

something like attached.
---
 arch/x86/mm/ident_map.c |   54 
 1 file changed, 32 insertions(+), 22 deletions(-)

Index: linux-2.6/arch/x86/mm/ident_map.c
===
--- linux-2.6.orig/arch/x86/mm/ident_map.c
+++ linux-2.6/arch/x86/mm/ident_map.c
@@ -3,40 +3,47 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+
+	vaddr &= PMD_MASK;
+	for (; vaddr < vend; vaddr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(vaddr);
 
 		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+			set_pmd(pmd, __pmd(vaddr - off | info->pmd_flag));
 	}
 }
 
 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
+	for (; vaddr < vend; vaddr = vnext) {
+		pud_t *pud = pud_page + pud_index(vaddr);
 		pmd_t *pmd;
 
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PUD_MASK) + PUD_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
@@ -46,21 +53,24 @@ static int ident_pud_init(struct x86_map
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
 	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+	for (; vaddr < vend; vaddr = vnext) {
+		pgd_t *pgd = pgd_page + pgd_index(vaddr);
 		pud_t *pud;
 
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pgd_present(*pgd)) {
 			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
+			result = ident_pud_init(info, pud, vaddr - off,
+		vnext - off);
 			if (result)
 return result;
 			continue;
@@ -69,7 +79,7 @@ int kernel_ident_mapping_init(struct x86
 		pud = (pud_t *)info->alloc_pgt_page(info->context);
 		if (!pud)
 			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
+		result = ident_pud_init(info, pud, vaddr - off, vnext - off);
 		if (result)
 			return result;
 		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Mon, Aug 1, 2016 at 5:36 PM, Rafael J. Wysocki  wrote:
> On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote:
>> Correctly setup the temporary mapping for hibernation. Previous
>> implementation assumed the address was aligned on the PGD level. With
>> KASLR memory randomization enabled, the address is randomized on the PUD
>> level. This change supports unaligned address up to PMD.
>
> This code is shared with kexec AFAICS, so it likely is better to push it
> through tip rather than through the PM tree.

Only calling path via arch/x86/power/hibernate_64.c have
   kernel_mapping = true;
other two paths: arch/x86/boot/compressed/pagetable.c and
arch/x86/kernel/machine_kexec_64.c
all have kernel_mapping as false.

maybe that path need simplified kernel_physical_mapping_init() instead?

Thanks

Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Mon, Aug 1, 2016 at 5:36 PM, Rafael J. Wysocki  wrote:
> On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote:
>> Correctly setup the temporary mapping for hibernation. Previous
>> implementation assumed the address was aligned on the PGD level. With
>> KASLR memory randomization enabled, the address is randomized on the PUD
>> level. This change supports unaligned address up to PMD.
>
> This code is shared with kexec AFAICS, so it likely is better to push it
> through tip rather than through the PM tree.

Only calling path via arch/x86/power/hibernate_64.c have
   kernel_mapping = true;
other two paths: arch/x86/boot/compressed/pagetable.c and
arch/x86/kernel/machine_kexec_64.c
all have kernel_mapping as false.

maybe that path need simplified kernel_physical_mapping_init() instead?

Thanks

Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Thomas Garnier
On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
> On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier  wrote:
>> Correctly setup the temporary mapping for hibernation. Previous
>> implementation assumed the address was aligned on the PGD level. With
>> KASLR memory randomization enabled, the address is randomized on the PUD
>> level. This change supports unaligned address up to PMD.
>>
>> Signed-off-by: Thomas Garnier 
>> ---
>>  arch/x86/mm/ident_map.c | 18 ++
>>  1 file changed, 10 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
>> index ec21796..ea1ebf1 100644
>> --- a/arch/x86/mm/ident_map.c
>> +++ b/arch/x86/mm/ident_map.c
>> @@ -3,15 +3,16 @@
>>   * included by both the compressed kernel and the regular kernel.
>>   */
>>
>> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
>> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>>unsigned long addr, unsigned long end)
>>  {
>> -   addr &= PMD_MASK;
>> -   for (; addr < end; addr += PMD_SIZE) {
>> -   pmd_t *pmd = pmd_page + pmd_index(addr);
>> +   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
>> +
>> +   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
>> +   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>>
>> if (!pmd_present(*pmd))
>> -   set_pmd(pmd, __pmd(addr | pmd_flag));
>> +   set_pmd(pmd, __pmd(addr | info->pmd_flag));
>> }
>>  }
>>
>> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
>> pud_t *pud_page,
>>   unsigned long addr, unsigned long end)
>>  {
>> unsigned long next;
>> +   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>>
>> for (; addr < end; addr = next) {
>> -   pud_t *pud = pud_page + pud_index(addr);
>> +   pud_t *pud = pud_page + pud_index(addr) + off;
>> pmd_t *pmd;
>>
>> next = (addr & PUD_MASK) + PUD_SIZE;
>
> Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off)
> bigger than 512?
>
> Looks like we need to change the loop from phys address to virtual
> address instead.
> to avoid the overflow.
>

That's a good point. I will take a look at it.

> Thanks
>
> Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Thomas Garnier
On Tue, Aug 2, 2016 at 10:36 AM, Yinghai Lu  wrote:
> On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier  wrote:
>> Correctly setup the temporary mapping for hibernation. Previous
>> implementation assumed the address was aligned on the PGD level. With
>> KASLR memory randomization enabled, the address is randomized on the PUD
>> level. This change supports unaligned address up to PMD.
>>
>> Signed-off-by: Thomas Garnier 
>> ---
>>  arch/x86/mm/ident_map.c | 18 ++
>>  1 file changed, 10 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
>> index ec21796..ea1ebf1 100644
>> --- a/arch/x86/mm/ident_map.c
>> +++ b/arch/x86/mm/ident_map.c
>> @@ -3,15 +3,16 @@
>>   * included by both the compressed kernel and the regular kernel.
>>   */
>>
>> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
>> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>>unsigned long addr, unsigned long end)
>>  {
>> -   addr &= PMD_MASK;
>> -   for (; addr < end; addr += PMD_SIZE) {
>> -   pmd_t *pmd = pmd_page + pmd_index(addr);
>> +   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
>> +
>> +   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
>> +   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>>
>> if (!pmd_present(*pmd))
>> -   set_pmd(pmd, __pmd(addr | pmd_flag));
>> +   set_pmd(pmd, __pmd(addr | info->pmd_flag));
>> }
>>  }
>>
>> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
>> pud_t *pud_page,
>>   unsigned long addr, unsigned long end)
>>  {
>> unsigned long next;
>> +   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>>
>> for (; addr < end; addr = next) {
>> -   pud_t *pud = pud_page + pud_index(addr);
>> +   pud_t *pud = pud_page + pud_index(addr) + off;
>> pmd_t *pmd;
>>
>> next = (addr & PUD_MASK) + PUD_SIZE;
>
> Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off)
> bigger than 512?
>
> Looks like we need to change the loop from phys address to virtual
> address instead.
> to avoid the overflow.
>

That's a good point. I will take a look at it.

> Thanks
>
> Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier  wrote:
> Correctly setup the temporary mapping for hibernation. Previous
> implementation assumed the address was aligned on the PGD level. With
> KASLR memory randomization enabled, the address is randomized on the PUD
> level. This change supports unaligned address up to PMD.
>
> Signed-off-by: Thomas Garnier 
> ---
>  arch/x86/mm/ident_map.c | 18 ++
>  1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index ec21796..ea1ebf1 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -3,15 +3,16 @@
>   * included by both the compressed kernel and the regular kernel.
>   */
>
> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>unsigned long addr, unsigned long end)
>  {
> -   addr &= PMD_MASK;
> -   for (; addr < end; addr += PMD_SIZE) {
> -   pmd_t *pmd = pmd_page + pmd_index(addr);
> +   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
> +
> +   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
> +   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>
> if (!pmd_present(*pmd))
> -   set_pmd(pmd, __pmd(addr | pmd_flag));
> +   set_pmd(pmd, __pmd(addr | info->pmd_flag));
> }
>  }
>
> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
>   unsigned long addr, unsigned long end)
>  {
> unsigned long next;
> +   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>
> for (; addr < end; addr = next) {
> -   pud_t *pud = pud_page + pud_index(addr);
> +   pud_t *pud = pud_page + pud_index(addr) + off;
> pmd_t *pmd;
>
> next = (addr & PUD_MASK) + PUD_SIZE;

Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off)
bigger than 512?

Looks like we need to change the loop from phys address to virtual
address instead.
to avoid the overflow.

Thanks

Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-02 Thread Yinghai Lu
On Mon, Aug 1, 2016 at 10:07 AM, Thomas Garnier  wrote:
> Correctly setup the temporary mapping for hibernation. Previous
> implementation assumed the address was aligned on the PGD level. With
> KASLR memory randomization enabled, the address is randomized on the PUD
> level. This change supports unaligned address up to PMD.
>
> Signed-off-by: Thomas Garnier 
> ---
>  arch/x86/mm/ident_map.c | 18 ++
>  1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index ec21796..ea1ebf1 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -3,15 +3,16 @@
>   * included by both the compressed kernel and the regular kernel.
>   */
>
> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>unsigned long addr, unsigned long end)
>  {
> -   addr &= PMD_MASK;
> -   for (; addr < end; addr += PMD_SIZE) {
> -   pmd_t *pmd = pmd_page + pmd_index(addr);
> +   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
> +
> +   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
> +   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>
> if (!pmd_present(*pmd))
> -   set_pmd(pmd, __pmd(addr | pmd_flag));
> +   set_pmd(pmd, __pmd(addr | info->pmd_flag));
> }
>  }
>
> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
>   unsigned long addr, unsigned long end)
>  {
> unsigned long next;
> +   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>
> for (; addr < end; addr = next) {
> -   pud_t *pud = pud_page + pud_index(addr);
> +   pud_t *pud = pud_page + pud_index(addr) + off;
> pmd_t *pmd;
>
> next = (addr & PUD_MASK) + PUD_SIZE;

Is there any chance for (pud_index(addr) + off) or (pmd_index(addr) + off)
bigger than 512?

Looks like we need to change the loop from phys address to virtual
address instead.
to avoid the overflow.

Thanks

Yinghai


Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-01 Thread Rafael J. Wysocki
On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote:
> Correctly setup the temporary mapping for hibernation. Previous
> implementation assumed the address was aligned on the PGD level. With
> KASLR memory randomization enabled, the address is randomized on the PUD
> level. This change supports unaligned address up to PMD.
> 
> Signed-off-by: Thomas Garnier 

Acked-by: Rafael J. Wysocki 

This code is shared with kexec AFAICS, so it likely is better to push it
through tip rather than through the PM tree.

> ---
>  arch/x86/mm/ident_map.c | 18 ++
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index ec21796..ea1ebf1 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -3,15 +3,16 @@
>   * included by both the compressed kernel and the regular kernel.
>   */
>  
> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>  unsigned long addr, unsigned long end)
>  {
> - addr &= PMD_MASK;
> - for (; addr < end; addr += PMD_SIZE) {
> - pmd_t *pmd = pmd_page + pmd_index(addr);
> + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
> +
> + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
> + pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>  
>   if (!pmd_present(*pmd))
> - set_pmd(pmd, __pmd(addr | pmd_flag));
> + set_pmd(pmd, __pmd(addr | info->pmd_flag));
>   }
>  }
>  
> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
> unsigned long addr, unsigned long end)
>  {
>   unsigned long next;
> + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>  
>   for (; addr < end; addr = next) {
> - pud_t *pud = pud_page + pud_index(addr);
> + pud_t *pud = pud_page + pud_index(addr) + off;
>   pmd_t *pmd;
>  
>   next = (addr & PUD_MASK) + PUD_SIZE;
> @@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
>  
>   if (pud_present(*pud)) {
>   pmd = pmd_offset(pud, 0);
> - ident_pmd_init(info->pmd_flag, pmd, addr, next);
> + ident_pmd_init(info, pmd, addr, next);
>   continue;
>   }
>   pmd = (pmd_t *)info->alloc_pgt_page(info->context);
>   if (!pmd)
>   return -ENOMEM;
> - ident_pmd_init(info->pmd_flag, pmd, addr, next);
> + ident_pmd_init(info, pmd, addr, next);
>   set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
>   }
>  
> 



Re: [PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-01 Thread Rafael J. Wysocki
On Monday, August 01, 2016 10:07:59 AM Thomas Garnier wrote:
> Correctly setup the temporary mapping for hibernation. Previous
> implementation assumed the address was aligned on the PGD level. With
> KASLR memory randomization enabled, the address is randomized on the PUD
> level. This change supports unaligned address up to PMD.
> 
> Signed-off-by: Thomas Garnier 

Acked-by: Rafael J. Wysocki 

This code is shared with kexec AFAICS, so it likely is better to push it
through tip rather than through the PM tree.

> ---
>  arch/x86/mm/ident_map.c | 18 ++
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index ec21796..ea1ebf1 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -3,15 +3,16 @@
>   * included by both the compressed kernel and the regular kernel.
>   */
>  
> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>  unsigned long addr, unsigned long end)
>  {
> - addr &= PMD_MASK;
> - for (; addr < end; addr += PMD_SIZE) {
> - pmd_t *pmd = pmd_page + pmd_index(addr);
> + int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
> +
> + for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
> + pmd_t *pmd = pmd_page + pmd_index(addr) + off;
>  
>   if (!pmd_present(*pmd))
> - set_pmd(pmd, __pmd(addr | pmd_flag));
> + set_pmd(pmd, __pmd(addr | info->pmd_flag));
>   }
>  }
>  
> @@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
> unsigned long addr, unsigned long end)
>  {
>   unsigned long next;
> + int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
>  
>   for (; addr < end; addr = next) {
> - pud_t *pud = pud_page + pud_index(addr);
> + pud_t *pud = pud_page + pud_index(addr) + off;
>   pmd_t *pmd;
>  
>   next = (addr & PUD_MASK) + PUD_SIZE;
> @@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
>  
>   if (pud_present(*pud)) {
>   pmd = pmd_offset(pud, 0);
> - ident_pmd_init(info->pmd_flag, pmd, addr, next);
> + ident_pmd_init(info, pmd, addr, next);
>   continue;
>   }
>   pmd = (pmd_t *)info->alloc_pgt_page(info->context);
>   if (!pmd)
>   return -ENOMEM;
> - ident_pmd_init(info->pmd_flag, pmd, addr, next);
> + ident_pmd_init(info, pmd, addr, next);
>   set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
>   }
>  
> 



[PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-01 Thread Thomas Garnier
Correctly setup the temporary mapping for hibernation. Previous
implementation assumed the address was aligned on the PGD level. With
KASLR memory randomization enabled, the address is randomized on the PUD
level. This change supports unaligned address up to PMD.

Signed-off-by: Thomas Garnier 
---
 arch/x86/mm/ident_map.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796..ea1ebf1 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,16 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
   unsigned long addr, unsigned long end)
 {
-   addr &= PMD_MASK;
-   for (; addr < end; addr += PMD_SIZE) {
-   pmd_t *pmd = pmd_page + pmd_index(addr);
+   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
+
+   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
+   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
 
if (!pmd_present(*pmd))
-   set_pmd(pmd, __pmd(addr | pmd_flag));
+   set_pmd(pmd, __pmd(addr | info->pmd_flag));
}
 }
 
@@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
pud_t *pud_page,
  unsigned long addr, unsigned long end)
 {
unsigned long next;
+   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
 
for (; addr < end; addr = next) {
-   pud_t *pud = pud_page + pud_index(addr);
+   pud_t *pud = pud_page + pud_index(addr) + off;
pmd_t *pmd;
 
next = (addr & PUD_MASK) + PUD_SIZE;
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, 
pud_t *pud_page,
 
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
-   ident_pmd_init(info->pmd_flag, pmd, addr, next);
+   ident_pmd_init(info, pmd, addr, next);
continue;
}
pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd)
return -ENOMEM;
-   ident_pmd_init(info->pmd_flag, pmd, addr, next);
+   ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
 
-- 
2.8.0.rc3.226.g39d4020



[PATCH v1 1/2] x86/power/64: Support unaligned addresses for temporary mapping

2016-08-01 Thread Thomas Garnier
Correctly setup the temporary mapping for hibernation. Previous
implementation assumed the address was aligned on the PGD level. With
KASLR memory randomization enabled, the address is randomized on the PUD
level. This change supports unaligned address up to PMD.

Signed-off-by: Thomas Garnier 
---
 arch/x86/mm/ident_map.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796..ea1ebf1 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,16 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
   unsigned long addr, unsigned long end)
 {
-   addr &= PMD_MASK;
-   for (; addr < end; addr += PMD_SIZE) {
-   pmd_t *pmd = pmd_page + pmd_index(addr);
+   int off = info->kernel_mapping ? pmd_index(__PAGE_OFFSET) : 0;
+
+   for (addr &= PMD_MASK; addr < end; addr += PMD_SIZE) {
+   pmd_t *pmd = pmd_page + pmd_index(addr) + off;
 
if (!pmd_present(*pmd))
-   set_pmd(pmd, __pmd(addr | pmd_flag));
+   set_pmd(pmd, __pmd(addr | info->pmd_flag));
}
 }
 
@@ -19,9 +20,10 @@ static int ident_pud_init(struct x86_mapping_info *info, 
pud_t *pud_page,
  unsigned long addr, unsigned long end)
 {
unsigned long next;
+   int off = info->kernel_mapping ? pud_index(__PAGE_OFFSET) : 0;
 
for (; addr < end; addr = next) {
-   pud_t *pud = pud_page + pud_index(addr);
+   pud_t *pud = pud_page + pud_index(addr) + off;
pmd_t *pmd;
 
next = (addr & PUD_MASK) + PUD_SIZE;
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, 
pud_t *pud_page,
 
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
-   ident_pmd_init(info->pmd_flag, pmd, addr, next);
+   ident_pmd_init(info, pmd, addr, next);
continue;
}
pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd)
return -ENOMEM;
-   ident_pmd_init(info->pmd_flag, pmd, addr, next);
+   ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
 
-- 
2.8.0.rc3.226.g39d4020