On Sa, 10. Nov 15:17, Nadav Amit wrote: > text_poke() can potentially compromise the security as it sets temporary > PTEs in the fixmap. These PTEs might be used to rewrite the kernel code > from other cores accidentally or maliciously, if an attacker gains the > ability to write onto kernel memory. > > Moreover, since remote TLBs are not flushed after the temporary PTEs are > removed, the time-window in which the code is writable is not limited if > the fixmap PTEs - maliciously or accidentally - are cached in the TLB. > To address these potential security hazards, we use a temporary mm for > patching the code. > > Finally, text_poke() is also not conservative enough when mapping pages, > as it always tries to map 2 pages, even when a single one is sufficient. > So try to be more conservative, and do not map more than needed. > > Cc: Andy Lutomirski <l...@kernel.org> > Cc: Kees Cook <keesc...@chromium.org> > Cc: Peter Zijlstra <pet...@infradead.org> > Cc: Dave Hansen <dave.han...@intel.com> > Cc: Masami Hiramatsu <mhira...@kernel.org> > Signed-off-by: Nadav Amit <na...@vmware.com> > --- > arch/x86/include/asm/fixmap.h | 2 - > arch/x86/kernel/alternative.c | 112 +++++++++++++++++++++++++++------- > 2 files changed, 89 insertions(+), 25 deletions(-) > > diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h > index 50ba74a34a37..9da8cccdf3fb 100644 > --- a/arch/x86/include/asm/fixmap.h > +++ b/arch/x86/include/asm/fixmap.h > @@ -103,8 +103,6 @@ enum fixed_addresses { > #ifdef CONFIG_PARAVIRT > FIX_PARAVIRT_BOOTMAP, > #endif
Hello Nadav, with the remove of FIX_TEXT_POKE1 and FIX_TEXT_POKE0 i get the following build error: /home/damian/kernel/linux/arch/x86/xen/mmu_pv.c:2321:7: Fehler: »FIX_TEXT_POKE0« nicht deklariert (erstmalige Verwendung in dieser Funktion); meinten Sie »FIX_TBOOT_BASE«? case FIX_TEXT_POKE0: ^~~~~~~~~~~~~~ FIX_TBOOT_BASE /home/damian/kernel/linux/arch/x86/xen/mmu_pv.c:2321:7: Anmerkung: jeder nicht deklarierte Bezeichner wird nur einmal für jede Funktion, in der er vorkommt, gemeldet /home/damian/kernel/linux/arch/x86/xen/mmu_pv.c:2322:7: Fehler: »FIX_TEXT_POKE1« nicht deklariert (erstmalige Verwendung in dieser Funktion); meinten Sie »FIX_TBOOT_BASE«? case FIX_TEXT_POKE1: ^~~~~~~~~~~~~~ FIX_TBOOT_BASE Best regards Damian > - FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ > - FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ > #ifdef CONFIG_X86_INTEL_MID > FIX_LNW_VRTC, > #endif > diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c > index d3ae5c26e5a0..96607ef285c3 100644 > --- a/arch/x86/kernel/alternative.c > +++ b/arch/x86/kernel/alternative.c > @@ -11,6 +11,7 @@ > #include <linux/stop_machine.h> > #include <linux/slab.h> > #include <linux/kdebug.h> > +#include <linux/mmu_context.h> > #include <asm/text-patching.h> > #include <asm/alternative.h> > #include <asm/sections.h> > @@ -683,43 +684,108 @@ __ro_after_init unsigned long poking_addr; > > static int __text_poke(void *addr, const void *opcode, size_t len) > { > + bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; > + temporary_mm_state_t prev; > + struct page *pages[2] = {NULL}; > unsigned long flags; > - char *vaddr; > - struct page *pages[2]; > - int i, r = 0; > + pte_t pte, *ptep; > + spinlock_t *ptl; > + int r = 0; > > /* > - * While boot memory allocator is runnig we cannot use struct > - * pages as they are not yet initialized. > + * While boot memory allocator is running we cannot use struct pages as > + * they are not yet initialized. > */ > BUG_ON(!after_bootmem); > > if (!core_kernel_text((unsigned long)addr)) { > pages[0] = vmalloc_to_page(addr); > - pages[1] = vmalloc_to_page(addr + PAGE_SIZE); > + if (cross_page_boundary) > + pages[1] = vmalloc_to_page(addr + PAGE_SIZE); > } else { > pages[0] = virt_to_page(addr); > WARN_ON(!PageReserved(pages[0])); > - pages[1] = virt_to_page(addr + PAGE_SIZE); > + if (cross_page_boundary) > + pages[1] = virt_to_page(addr + PAGE_SIZE); > } > - if (!pages[0]) > + > + if (!pages[0] || (cross_page_boundary && !pages[1])) > return -EFAULT; > + > local_irq_save(flags); > - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); > - if (pages[1]) > - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); > - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); > - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); > - clear_fixmap(FIX_TEXT_POKE0); > - if (pages[1]) > - clear_fixmap(FIX_TEXT_POKE1); > - local_flush_tlb(); > - sync_core(); > - /* Could also do a CLFLUSH here to speed up CPU recovery; but > - that causes hangs on some VIA CPUs. */ > - for (i = 0; i < len; i++) > - if (((char *)addr)[i] != ((char *)opcode)[i]) > - r = -EFAULT; > + > + /* > + * The lock is not really needed, but this allows to avoid open-coding. > + */ > + ptep = get_locked_pte(poking_mm, poking_addr, &ptl); > + > + /* > + * If we failed to allocate a PTE, fail. This should *never* happen, > + * since we preallocate the PTE. > + */ > + if (WARN_ON_ONCE(!ptep)) > + goto out; > + > + pte = mk_pte(pages[0], PAGE_KERNEL); > + set_pte_at(poking_mm, poking_addr, ptep, pte); > + > + if (cross_page_boundary) { > + pte = mk_pte(pages[1], PAGE_KERNEL); > + set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); > + } > + > + /* > + * Loading the temporary mm behaves as a compiler barrier, which > + * guarantees that the PTE will be set at the time memcpy() is done. > + */ > + prev = use_temporary_mm(poking_mm); > + > + kasan_disable_current(); > + memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len); > + kasan_enable_current(); > + > + /* > + * Ensure that the PTE is only cleared after the instructions of memcpy > + * were issued by using a compiler barrier. > + */ > + barrier(); > + > + pte_clear(poking_mm, poking_addr, ptep); > + > + /* > + * __flush_tlb_one_user() performs a redundant TLB flush when PTI is on, > + * as it also flushes the corresponding "user" address spaces, which > + * does not exist. > + * > + * Poking, however, is already very inefficient since it does not try to > + * batch updates, so we ignore this problem for the time being. > + * > + * Since the PTEs do not exist in other kernel address-spaces, we do > + * not use __flush_tlb_one_kernel(), which when PTI is on would cause > + * more unwarranted TLB flushes. > + * > + * There is a slight anomaly here: the PTE is a supervisor-only and > + * (potentially) global and we use __flush_tlb_one_user() but this > + * should be fine. > + */ > + __flush_tlb_one_user(poking_addr); > + if (cross_page_boundary) { > + pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); > + __flush_tlb_one_user(poking_addr + PAGE_SIZE); > + } > + > + /* > + * Loading the previous page-table hierarchy requires a serializing > + * instruction that already allows the core to see the updated version. > + * Xen-PV is assumed to serialize execution in a similar manner. > + */ > + unuse_temporary_mm(prev); > + > + pte_unmap_unlock(ptep, ptl); > +out: > + if (memcmp(addr, opcode, len)) > + r = -EFAULT; > + > local_irq_restore(flags); > return r; > } > -- > 2.17.1 >