Currently vdso data is one page. Next patches will add per-cpu data to vdso, which requires several pages if CPU number is big. This makes VDSO data support multiple pages.
Cc: Andy Lutomirski <l...@amacapital.net> Cc: H. Peter Anvin <h...@zytor.com> Cc: Ingo Molnar <mi...@redhat.com> Signed-off-by: Shaohua Li <s...@fb.com> --- arch/x86/include/asm/vvar.h | 6 +++++- arch/x86/kernel/asm-offsets.c | 5 +++++ arch/x86/kernel/vmlinux.lds.S | 4 +--- arch/x86/vdso/vdso-layout.lds.S | 5 +++-- arch/x86/vdso/vma.c | 3 ++- 5 files changed, 16 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h index 5d2b9ad..fcbe621 100644 --- a/arch/x86/include/asm/vvar.h +++ b/arch/x86/include/asm/vvar.h @@ -47,7 +47,11 @@ extern char __vvar_page; DECLARE_VVAR(0, volatile unsigned long, jiffies) DECLARE_VVAR(16, int, vgetcpu_mode) DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) - +/* + * you must update VVAR_TOTAL_SIZE to reflect all of the variables we're + * stuffing into the vvar area. Don't change any of the above without + * also changing this math of VVAR_TOTAL_SIZE + */ #undef DECLARE_VVAR #endif diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 9f6b934..0ab31a9 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -16,6 +16,7 @@ #include <asm/sigframe.h> #include <asm/bootparam.h> #include <asm/suspend.h> +#include <asm/vgtod.h> #ifdef CONFIG_XEN #include <xen/interface/xen.h> @@ -71,4 +72,8 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + + BLANK(); + DEFINE(VVAR_TOTAL_SIZE, + ALIGN(128 + sizeof(struct vsyscall_gtod_data), PAGE_SIZE)); } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 49edf2d..8b11307 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -168,11 +168,9 @@ SECTIONS * Pad the rest of the page with zeros. Otherwise the loader * can leave garbage here. */ - . = __vvar_beginning_hack + PAGE_SIZE; + . = __vvar_beginning_hack + VVAR_TOTAL_SIZE; } :data - . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); - /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S index de2c921..acaf8ce 100644 --- a/arch/x86/vdso/vdso-layout.lds.S +++ b/arch/x86/vdso/vdso-layout.lds.S @@ -1,4 +1,5 @@ #include <asm/vdso.h> +#include <asm/asm-offsets.h> /* * Linker script for vDSO. This is an ELF shared object prelinked to @@ -25,7 +26,7 @@ SECTIONS * segment. */ - vvar_start = . - 2 * PAGE_SIZE; + vvar_start = . - (VVAR_TOTAL_SIZE + PAGE_SIZE); vvar_page = vvar_start; /* Place all vvars at the offsets in asm/vvar.h. */ @@ -35,7 +36,7 @@ SECTIONS #undef __VVAR_KERNEL_LDS #undef EMIT_VVAR - hpet_page = vvar_start + PAGE_SIZE; + hpet_page = vvar_start + VVAR_TOTAL_SIZE; . = SIZEOF_HEADERS; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 970463b..fc37067 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -16,6 +16,7 @@ #include <asm/vdso.h> #include <asm/page.h> #include <asm/hpet.h> +#include <asm/asm-offsets.h> #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; @@ -150,7 +151,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) ret = remap_pfn_range(vma, text_start + image->sym_vvar_page, __pa_symbol(&__vvar_page) >> PAGE_SHIFT, - PAGE_SIZE, + VVAR_TOTAL_SIZE, PAGE_READONLY); if (ret) -- 1.8.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/