[tip:x86/boot] x86/mm: Add PUD VA support for physical mapping

2016-07-08 Thread tip-bot for Thomas Garnier
Commit-ID:  faa379332f3cb3375db1849e27386f8bc9b97da4
Gitweb: http://git.kernel.org/tip/faa379332f3cb3375db1849e27386f8bc9b97da4
Author: Thomas Garnier 
AuthorDate: Tue, 21 Jun 2016 17:47:00 -0700
Committer:  Ingo Molnar 
CommitDate: Fri, 8 Jul 2016 17:33:46 +0200

x86/mm: Add PUD VA support for physical mapping

Minor change that allows early boot physical mapping of PUD level virtual
addresses. The current implementation expects the virtual address to be
PUD aligned. For KASLR memory randomization, we need to be able to
randomize the offset used on the PUD table.

It has no impact on current usage.

Signed-off-by: Thomas Garnier 
Signed-off-by: Kees Cook 
Cc: Alexander Kuleshov 
Cc: Alexander Popov 
Cc: Andrew Morton 
Cc: Andy Lutomirski 
Cc: Aneesh Kumar K.V 
Cc: Baoquan He 
Cc: Boris Ostrovsky 
Cc: Borislav Petkov 
Cc: Borislav Petkov 
Cc: Brian Gerst 
Cc: Christian Borntraeger 
Cc: Dan Williams 
Cc: Dave Hansen 
Cc: Dave Young 
Cc: Denys Vlasenko 
Cc: Dmitry Vyukov 
Cc: H. Peter Anvin 
Cc: Jan Beulich 
Cc: Joerg Roedel 
Cc: Jonathan Corbet 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: Kirill A. Shutemov 
Cc: Linus Torvalds 
Cc: Lv Zheng 
Cc: Mark Salter 
Cc: Martin Schwidefsky 
Cc: Matt Fleming 
Cc: Peter Zijlstra 
Cc: Stephen Smalley 
Cc: Thomas Gleixner 
Cc: Toshi Kani 
Cc: Xiao Guangrong 
Cc: Yinghai Lu 
Cc: kernel-harden...@lists.openwall.com
Cc: linux-...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/1466556426-32664-4-git-send-email-keesc...@chromium.org
Signed-off-by: Ingo Molnar 
---
 arch/x86/mm/init_64.c | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6714712..7bf1ddb 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -465,7 +465,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, 
unsigned long paddr_end,
 
 /*
  * Create PUD level page table mapping for physical addresses. The virtual
- * and physical address have to be aligned at this level.
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
  * It returns the last physical address mapped.
  */
 static unsigned long __meminit
@@ -474,14 +475,18 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
 {
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
-   int i = pud_index(paddr);
+   unsigned long vaddr = (unsigned long)__va(paddr);
+   int i = pud_index(vaddr);
 
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
-   pud_t *pud = pud_page + pud_index(paddr);
+   pud_t *pud;
pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL;
 
+   vaddr = (unsigned long)__va(paddr);
+   pud = pud_page + pud_index(vaddr);
paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
if (paddr >= paddr_end) {
if (!after_bootmem &&
!e820_any_mapped(paddr & PUD_MASK, paddr_next,
@@ -551,7 +556,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
 
 /*
  * Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PUD 
level
+ * addresses. The virtual and physical addresses have to be aligned on PMD 
level
  * down. It returns the last physical address mapped.
  */
 unsigned long __meminit


[tip:x86/boot] x86/mm: Add PUD VA support for physical mapping

2016-07-08 Thread tip-bot for Thomas Garnier
Commit-ID:  faa379332f3cb3375db1849e27386f8bc9b97da4
Gitweb: http://git.kernel.org/tip/faa379332f3cb3375db1849e27386f8bc9b97da4
Author: Thomas Garnier 
AuthorDate: Tue, 21 Jun 2016 17:47:00 -0700
Committer:  Ingo Molnar 
CommitDate: Fri, 8 Jul 2016 17:33:46 +0200

x86/mm: Add PUD VA support for physical mapping

Minor change that allows early boot physical mapping of PUD level virtual
addresses. The current implementation expects the virtual address to be
PUD aligned. For KASLR memory randomization, we need to be able to
randomize the offset used on the PUD table.

It has no impact on current usage.

Signed-off-by: Thomas Garnier 
Signed-off-by: Kees Cook 
Cc: Alexander Kuleshov 
Cc: Alexander Popov 
Cc: Andrew Morton 
Cc: Andy Lutomirski 
Cc: Aneesh Kumar K.V 
Cc: Baoquan He 
Cc: Boris Ostrovsky 
Cc: Borislav Petkov 
Cc: Borislav Petkov 
Cc: Brian Gerst 
Cc: Christian Borntraeger 
Cc: Dan Williams 
Cc: Dave Hansen 
Cc: Dave Young 
Cc: Denys Vlasenko 
Cc: Dmitry Vyukov 
Cc: H. Peter Anvin 
Cc: Jan Beulich 
Cc: Joerg Roedel 
Cc: Jonathan Corbet 
Cc: Josh Poimboeuf 
Cc: Juergen Gross 
Cc: Kirill A. Shutemov 
Cc: Linus Torvalds 
Cc: Lv Zheng 
Cc: Mark Salter 
Cc: Martin Schwidefsky 
Cc: Matt Fleming 
Cc: Peter Zijlstra 
Cc: Stephen Smalley 
Cc: Thomas Gleixner 
Cc: Toshi Kani 
Cc: Xiao Guangrong 
Cc: Yinghai Lu 
Cc: kernel-harden...@lists.openwall.com
Cc: linux-...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/1466556426-32664-4-git-send-email-keesc...@chromium.org
Signed-off-by: Ingo Molnar 
---
 arch/x86/mm/init_64.c | 13 +
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6714712..7bf1ddb 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -465,7 +465,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, 
unsigned long paddr_end,
 
 /*
  * Create PUD level page table mapping for physical addresses. The virtual
- * and physical address have to be aligned at this level.
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
  * It returns the last physical address mapped.
  */
 static unsigned long __meminit
@@ -474,14 +475,18 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
 {
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
-   int i = pud_index(paddr);
+   unsigned long vaddr = (unsigned long)__va(paddr);
+   int i = pud_index(vaddr);
 
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
-   pud_t *pud = pud_page + pud_index(paddr);
+   pud_t *pud;
pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL;
 
+   vaddr = (unsigned long)__va(paddr);
+   pud = pud_page + pud_index(vaddr);
paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
if (paddr >= paddr_end) {
if (!after_bootmem &&
!e820_any_mapped(paddr & PUD_MASK, paddr_next,
@@ -551,7 +556,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
 
 /*
  * Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PUD 
level
+ * addresses. The virtual and physical addresses have to be aligned on PMD 
level
  * down. It returns the last physical address mapped.
  */
 unsigned long __meminit