[PATCH 13/30] x86, kaiser: map dynamically-allocated LDTs

2017-11-10 Thread Dave Hansen

From: Dave Hansen 

Normally, a process has a NULL mm->context.ldt.  But, there is a
syscall for a process to set a new one.  If a process does that,
the LDT be mapped into the user page tables, just like the
default copy.

The original KAISER patch missed this case.

Signed-off-by: Dave Hansen 
Cc: Moritz Lipp 
Cc: Daniel Gruss 
Cc: Michael Schwarz 
Cc: Richard Fellner 
Cc: Andy Lutomirski 
Cc: Linus Torvalds 
Cc: Kees Cook 
Cc: Hugh Dickins 
Cc: x...@kernel.org
---

 b/arch/x86/kernel/ldt.c |   25 -
 1 file changed, 20 insertions(+), 5 deletions(-)

diff -puN arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts arch/x86/kernel/ldt.c
--- a/arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts2017-11-10 
11:22:12.127244942 -0800
+++ b/arch/x86/kernel/ldt.c 2017-11-10 11:22:12.131244942 -0800
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -56,11 +57,21 @@ static void flush_ldt(void *__mm)
refresh_ldt_segments();
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
+   vfree_atomic(ldt->entries);
+   else
+   free_page((unsigned long)ldt->entries);
+   kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
struct ldt_struct *new_ldt;
unsigned int alloc_size;
+   int ret;
 
if (num_entries > LDT_ENTRIES)
return NULL;
@@ -88,6 +99,12 @@ static struct ldt_struct *alloc_ldt_stru
return NULL;
}
 
+   ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+__PAGE_KERNEL | _PAGE_GLOBAL);
+   if (ret) {
+   __free_ldt_struct(new_ldt);
+   return NULL;
+   }
new_ldt->nr_entries = num_entries;
return new_ldt;
 }
@@ -114,12 +131,10 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;
 
+   kaiser_remove_mapping((unsigned long)ldt->entries,
+ ldt->nr_entries * LDT_ENTRY_SIZE);
paravirt_free_ldt(ldt->entries, ldt->nr_entries);
-   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
-   vfree_atomic(ldt->entries);
-   else
-   free_page((unsigned long)ldt->entries);
-   kfree(ldt);
+   __free_ldt_struct(ldt);
 }
 
 /*
_


[PATCH 13/30] x86, kaiser: map dynamically-allocated LDTs

2017-11-10 Thread Dave Hansen

From: Dave Hansen 

Normally, a process has a NULL mm->context.ldt.  But, there is a
syscall for a process to set a new one.  If a process does that,
the LDT be mapped into the user page tables, just like the
default copy.

The original KAISER patch missed this case.

Signed-off-by: Dave Hansen 
Cc: Moritz Lipp 
Cc: Daniel Gruss 
Cc: Michael Schwarz 
Cc: Richard Fellner 
Cc: Andy Lutomirski 
Cc: Linus Torvalds 
Cc: Kees Cook 
Cc: Hugh Dickins 
Cc: x...@kernel.org
---

 b/arch/x86/kernel/ldt.c |   25 -
 1 file changed, 20 insertions(+), 5 deletions(-)

diff -puN arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts arch/x86/kernel/ldt.c
--- a/arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts2017-11-10 
11:22:12.127244942 -0800
+++ b/arch/x86/kernel/ldt.c 2017-11-10 11:22:12.131244942 -0800
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -56,11 +57,21 @@ static void flush_ldt(void *__mm)
refresh_ldt_segments();
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
+   vfree_atomic(ldt->entries);
+   else
+   free_page((unsigned long)ldt->entries);
+   kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
struct ldt_struct *new_ldt;
unsigned int alloc_size;
+   int ret;
 
if (num_entries > LDT_ENTRIES)
return NULL;
@@ -88,6 +99,12 @@ static struct ldt_struct *alloc_ldt_stru
return NULL;
}
 
+   ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+__PAGE_KERNEL | _PAGE_GLOBAL);
+   if (ret) {
+   __free_ldt_struct(new_ldt);
+   return NULL;
+   }
new_ldt->nr_entries = num_entries;
return new_ldt;
 }
@@ -114,12 +131,10 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;
 
+   kaiser_remove_mapping((unsigned long)ldt->entries,
+ ldt->nr_entries * LDT_ENTRY_SIZE);
paravirt_free_ldt(ldt->entries, ldt->nr_entries);
-   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
-   vfree_atomic(ldt->entries);
-   else
-   free_page((unsigned long)ldt->entries);
-   kfree(ldt);
+   __free_ldt_struct(ldt);
 }
 
 /*
_


[PATCH 13/30] x86, kaiser: map dynamically-allocated LDTs

2017-11-08 Thread Dave Hansen

From: Dave Hansen 

Normally, a process just has a NULL mm->context.ldt.  But, we
have a syscall for a process to set a new one.  If a process does
that, we need to map the new LDT.

The original KAISER patch missed this case.

Signed-off-by: Dave Hansen 
Cc: Moritz Lipp 
Cc: Daniel Gruss 
Cc: Michael Schwarz 
Cc: Richard Fellner 
Cc: Andy Lutomirski 
Cc: Linus Torvalds 
Cc: Kees Cook 
Cc: Hugh Dickins 
Cc: x...@kernel.org
---

 b/arch/x86/kernel/ldt.c |   25 -
 1 file changed, 20 insertions(+), 5 deletions(-)

diff -puN arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts arch/x86/kernel/ldt.c
--- a/arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts2017-11-08 
10:45:32.935681386 -0800
+++ b/arch/x86/kernel/ldt.c 2017-11-08 10:45:32.938681386 -0800
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -56,11 +57,21 @@ static void flush_ldt(void *__mm)
refresh_ldt_segments();
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
+   vfree_atomic(ldt->entries);
+   else
+   free_page((unsigned long)ldt->entries);
+   kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
struct ldt_struct *new_ldt;
unsigned int alloc_size;
+   int ret;
 
if (num_entries > LDT_ENTRIES)
return NULL;
@@ -88,6 +99,12 @@ static struct ldt_struct *alloc_ldt_stru
return NULL;
}
 
+   ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+__PAGE_KERNEL | _PAGE_GLOBAL);
+   if (ret) {
+   __free_ldt_struct(new_ldt);
+   return NULL;
+   }
new_ldt->nr_entries = num_entries;
return new_ldt;
 }
@@ -114,12 +131,10 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;
 
+   kaiser_remove_mapping((unsigned long)ldt->entries,
+ ldt->nr_entries * LDT_ENTRY_SIZE);
paravirt_free_ldt(ldt->entries, ldt->nr_entries);
-   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
-   vfree_atomic(ldt->entries);
-   else
-   free_page((unsigned long)ldt->entries);
-   kfree(ldt);
+   __free_ldt_struct(ldt);
 }
 
 /*
_


[PATCH 13/30] x86, kaiser: map dynamically-allocated LDTs

2017-11-08 Thread Dave Hansen

From: Dave Hansen 

Normally, a process just has a NULL mm->context.ldt.  But, we
have a syscall for a process to set a new one.  If a process does
that, we need to map the new LDT.

The original KAISER patch missed this case.

Signed-off-by: Dave Hansen 
Cc: Moritz Lipp 
Cc: Daniel Gruss 
Cc: Michael Schwarz 
Cc: Richard Fellner 
Cc: Andy Lutomirski 
Cc: Linus Torvalds 
Cc: Kees Cook 
Cc: Hugh Dickins 
Cc: x...@kernel.org
---

 b/arch/x86/kernel/ldt.c |   25 -
 1 file changed, 20 insertions(+), 5 deletions(-)

diff -puN arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts arch/x86/kernel/ldt.c
--- a/arch/x86/kernel/ldt.c~kaiser-user-map-new-ldts2017-11-08 
10:45:32.935681386 -0800
+++ b/arch/x86/kernel/ldt.c 2017-11-08 10:45:32.938681386 -0800
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -56,11 +57,21 @@ static void flush_ldt(void *__mm)
refresh_ldt_segments();
 }
 
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
+   vfree_atomic(ldt->entries);
+   else
+   free_page((unsigned long)ldt->entries);
+   kfree(ldt);
+}
+
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
struct ldt_struct *new_ldt;
unsigned int alloc_size;
+   int ret;
 
if (num_entries > LDT_ENTRIES)
return NULL;
@@ -88,6 +99,12 @@ static struct ldt_struct *alloc_ldt_stru
return NULL;
}
 
+   ret = kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+__PAGE_KERNEL | _PAGE_GLOBAL);
+   if (ret) {
+   __free_ldt_struct(new_ldt);
+   return NULL;
+   }
new_ldt->nr_entries = num_entries;
return new_ldt;
 }
@@ -114,12 +131,10 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;
 
+   kaiser_remove_mapping((unsigned long)ldt->entries,
+ ldt->nr_entries * LDT_ENTRY_SIZE);
paravirt_free_ldt(ldt->entries, ldt->nr_entries);
-   if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
-   vfree_atomic(ldt->entries);
-   else
-   free_page((unsigned long)ldt->entries);
-   kfree(ldt);
+   __free_ldt_struct(ldt);
 }
 
 /*
_