Currently, idt_table is allocated as page-aligned .bss, and remapped read-only after init. This breaks a 2 MiB large page into 4k page mappings, which defeats some of the effort done at boot to map the kernel image using large pages, for improved TLB efficiency.
Mark this allocation as __ro_after_init instead, so it will be made read-only automatically after boot, without breaking up large page mappings. This also fixes a latent bug on i386, where the size of idt_table is less than a page, and so remapping it read-only could potentially affect other read-write variables too, if those are not page-aligned as well. Signed-off-by: Ard Biesheuvel <[email protected]> --- arch/x86/kernel/idt.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index f445bec516a0..d6da25d7964f 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -170,7 +170,7 @@ static const __initconst struct idt_data apic_idts[] = { }; /* Must be page-aligned because the real IDT is used in the cpu entry area */ -static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss; +static gate_desc idt_table[IDT_ENTRIES] __aligned(PAGE_SIZE) __ro_after_init; static struct desc_ptr idt_descr __ro_after_init = { .size = IDT_TABLE_SIZE - 1, @@ -308,9 +308,6 @@ void __init idt_setup_apic_and_irq_gates(void) idt_map_in_cea(); load_idt(&idt_descr); - /* Make the IDT table read only */ - set_memory_ro((unsigned long)&idt_table, 1); - idt_setup_done = true; } -- 2.47.3
