From: Artem Kuzin <[email protected]>

Co-developed-by: Nikita Panov <[email protected]>
Signed-off-by: Nikita Panov <[email protected]>
Co-developed-by: Alexander Grubnikov <[email protected]>
Signed-off-by: Alexander Grubnikov <[email protected]>
Signed-off-by: Artem Kuzin <[email protected]>
---
 arch/x86/kernel/kprobes/core.c | 2 +-
 arch/x86/mm/init.c             | 8 ++++----
 arch/x86/mm/init_64.c          | 4 ++--
 arch/x86/mm/pti.c              | 2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f7f6042eb7e6..0fb29a4855fe 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -422,7 +422,7 @@ void *alloc_insn_page(void)
         * TODO: Once additional kernel code protection mechanisms are set, 
ensure
         * that the page was not maliciously altered and it is still zeroed.
         */
-       set_memory_rox((unsigned long)page, 1);
+       numa_set_memory_rox((unsigned long)page, 1);
 
        return page;
 }
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8192452d1d2d..f797e194bfb0 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -927,15 +927,15 @@ void free_init_pages(const char *what, unsigned long 
begin, unsigned long end)
                 * corresponding pages will be unmapped.
                 */
                kmemleak_free_part((void *)begin, end - begin);
-               set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+               numa_set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
        } else {
                /*
                 * We just marked the kernel text read only above, now that
                 * we are going to free part of that, we need to make that
                 * writeable and non-executable first.
                 */
-               set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
-               set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
+               numa_set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
+               numa_set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
 
                free_reserved_area((void *)begin, (void *)end,
                                   POISON_FREE_INITMEM, what);
@@ -971,7 +971,7 @@ void free_kernel_image_pages(const char *what, void *begin, 
void *end)
         * which can't be treated in this way for obvious reasons.
         */
        if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
-               set_memory_np_noalias(begin_ul, len_pages);
+               numa_set_memory_np_noalias(begin_ul, len_pages);
 }
 
 void __ref free_initmem(void)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a190aae8ceaf..98cb7f5f2863 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1379,7 +1379,7 @@ void mark_rodata_ro(void)
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
-       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+       numa_set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 
        kernel_set_to_readonly = 1;
 
@@ -1396,7 +1396,7 @@ void mark_rodata_ro(void)
         * has been zapped already via cleanup_highmem().
         */
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-       set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
+       numa_set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
        set_ftrace_ops_ro();
 
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 78414c6d1b5e..23f30edf71b3 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -580,7 +580,7 @@ static void pti_clone_kernel_text(void)
         */
 
        /* Set the global bit for normal non-__init kernel text: */
-       set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
+       numa_set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
 }
 
 static void pti_set_kernel_image_nonglobal(void)
-- 
2.34.1


Reply via email to