From: Nadav Amit <na...@vmware.com>

Set the page as executable after allocation.  This patch is a
preparatory patch for a following patch that makes module allocated
pages non-executable.

While at it, do some small cleanup of what appears to be unnecessary
masking.

Acked-by: Masami Hiramatsu <mhira...@kernel.org>
Signed-off-by: Nadav Amit <na...@vmware.com>
Signed-off-by: Rick Edgecombe <rick.p.edgeco...@intel.com>
---
 arch/x86/kernel/kprobes/core.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a034cb808e7e..1591852d3ac4 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -431,8 +431,20 @@ void *alloc_insn_page(void)
        void *page;
 
        page = module_alloc(PAGE_SIZE);
-       if (page)
-               set_memory_ro((unsigned long)page & PAGE_MASK, 1);
+       if (!page)
+               return NULL;
+
+       /*
+        * First make the page read-only, and only then make it executable to
+        * prevent it from being W+X in between.
+        */
+       set_memory_ro((unsigned long)page, 1);
+
+       /*
+        * TODO: Once additional kernel code protection mechanisms are set, 
ensure
+        * that the page was not maliciously altered and it is still zeroed.
+        */
+       set_memory_x((unsigned long)page, 1);
 
        return page;
 }
@@ -440,8 +452,12 @@ void *alloc_insn_page(void)
 /* Recover page to RW mode before releasing it */
 void free_insn_page(void *page)
 {
-       set_memory_nx((unsigned long)page & PAGE_MASK, 1);
-       set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+       /*
+        * First make the page non-executable, and only then make it writable to
+        * prevent it from being W+X in between.
+        */
+       set_memory_nx((unsigned long)page, 1);
+       set_memory_rw((unsigned long)page, 1);
        module_memfree(page);
 }
 
-- 
2.17.1

Reply via email to