On Sat, Nov 10, 2018 at 03:17:28PM -0800, Nadav Amit wrote: > @@ -683,43 +684,108 @@ __ro_after_init unsigned long poking_addr; > > static int __text_poke(void *addr, const void *opcode, size_t len) > { > + bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; > + temporary_mm_state_t prev; > + struct page *pages[2] = {NULL}; > unsigned long flags; > + pte_t pte, *ptep; > + spinlock_t *ptl; > + int r = 0; > > /* > + * While boot memory allocator is running we cannot use struct pages as > + * they are not yet initialized. > */ > BUG_ON(!after_bootmem); > > if (!core_kernel_text((unsigned long)addr)) { > pages[0] = vmalloc_to_page(addr); > + if (cross_page_boundary) > + pages[1] = vmalloc_to_page(addr + PAGE_SIZE); > } else { > pages[0] = virt_to_page(addr); > WARN_ON(!PageReserved(pages[0])); > + if (cross_page_boundary) > + pages[1] = virt_to_page(addr + PAGE_SIZE); > } > + > + if (!pages[0] || (cross_page_boundary && !pages[1])) > return -EFAULT; > + > local_irq_save(flags); > + > + /* > + * The lock is not really needed, but this allows to avoid open-coding. > + */ > + ptep = get_locked_pte(poking_mm, poking_addr, &ptl); > + > + /* > + * If we failed to allocate a PTE, fail. This should *never* happen, > + * since we preallocate the PTE. > + */ > + if (WARN_ON_ONCE(!ptep)) > + goto out;
Since we hard rely on init getting that right; can't we simply get rid of this? > + > + pte = mk_pte(pages[0], PAGE_KERNEL); > + set_pte_at(poking_mm, poking_addr, ptep, pte); > + > + if (cross_page_boundary) { > + pte = mk_pte(pages[1], PAGE_KERNEL); > + set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); > + } > + > + /* > + * Loading the temporary mm behaves as a compiler barrier, which > + * guarantees that the PTE will be set at the time memcpy() is done. > + */ > + prev = use_temporary_mm(poking_mm); > + > + kasan_disable_current(); > + memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len); > + kasan_enable_current(); > + > + /* > + * Ensure that the PTE is only cleared after the instructions of memcpy > + * were issued by using a compiler barrier. > + */ > + barrier(); > + > + pte_clear(poking_mm, poking_addr, ptep); > + > + /* > + * __flush_tlb_one_user() performs a redundant TLB flush when PTI is on, > + * as it also flushes the corresponding "user" address spaces, which > + * does not exist. > + * > + * Poking, however, is already very inefficient since it does not try to > + * batch updates, so we ignore this problem for the time being. > + * > + * Since the PTEs do not exist in other kernel address-spaces, we do > + * not use __flush_tlb_one_kernel(), which when PTI is on would cause > + * more unwarranted TLB flushes. > + * > + * There is a slight anomaly here: the PTE is a supervisor-only and > + * (potentially) global and we use __flush_tlb_one_user() but this > + * should be fine. > + */ > + __flush_tlb_one_user(poking_addr); > + if (cross_page_boundary) { > + pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); > + __flush_tlb_one_user(poking_addr + PAGE_SIZE); > + } > + > + /* > + * Loading the previous page-table hierarchy requires a serializing > + * instruction that already allows the core to see the updated version. > + * Xen-PV is assumed to serialize execution in a similar manner. > + */ > + unuse_temporary_mm(prev); > + > + pte_unmap_unlock(ptep, ptl); > +out: > + if (memcmp(addr, opcode, len)) > + r = -EFAULT; How could this ever fail? And how can we reliably recover from that? I mean, we can move that BUG_ON() we have in text_poke() down a level, but for example the static_key/jump_label code has no real option on failing this. > + > local_irq_restore(flags); > return r; > } Other than that, this looks really good!