As it has been discussed on timens RFC, adding a new conditional branch
`if (inside_time_ns)` on VDSO for all processes is undesirable.

Addressing those problems, there are two versions of VDSO's .so:
for host tasks (without any penalty) and for processes inside time
namespace with clk_to_ns() that subtracts offsets from host's time.

The timens code in vdso looks like this:

      if (timens_static_branch()) {
              clk_to_ns(clk, ts);
      }

Static branch mechanism adds a __jump_table section into vdso.
Vdso's linker script drops all unwanted sections in compile time.

Preserve __jump_table section and add it into (struct vdso_image),
as it's needed for enabling (patching) static branches that are
present on vdso.

Co-developed-by: Andrei Vagin <ava...@gmail.com>
Signed-off-by: Andrei Vagin <ava...@gmail.com>
Signed-off-by: Dmitry Safonov <d...@arista.com>
---
 arch/x86/entry/vdso/vdso-layout.lds.S | 1 +
 arch/x86/entry/vdso/vdso2c.h          | 9 ++++++++-
 arch/x86/include/asm/vdso.h           | 1 +
 3 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S 
b/arch/x86/entry/vdso/vdso-layout.lds.S
index ba216527e59f..69dbe4821aa5 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -45,6 +45,7 @@ SECTIONS
        .gnu.version    : { *(.gnu.version) }
        .gnu.version_d  : { *(.gnu.version_d) }
        .gnu.version_r  : { *(.gnu.version_r) }
+       __jump_table    : { *(__jump_table) }   :text
 
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 885b988aea19..318b278ca396 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -14,7 +14,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
        unsigned long mapping_size;
        ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
        unsigned int i, syms_nr;
-       unsigned long j;
+       unsigned long j, jump_table_addr = -1UL, jump_table_size = -1UL;
        ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
                *alt_sec = NULL;
        ELF(Dyn) *dyn = 0, *dyn_end = 0;
@@ -78,6 +78,10 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
                if (!strcmp(secstrings + GET_LE(&sh->sh_name),
                            ".altinstructions"))
                        alt_sec = sh;
+               if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__jump_table")) 
{
+                       jump_table_addr = GET_LE(&sh->sh_offset);
+                       jump_table_size = GET_LE(&sh->sh_size);
+               }
        }
 
        if (!symtab_hdr)
@@ -166,6 +170,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
                fprintf(outfile, "\t.alt_len = %lu,\n",
                        (unsigned long)GET_LE(&alt_sec->sh_size));
        }
+       fprintf(outfile, "\t.jump_table = %luUL,\n", jump_table_addr);
+       fprintf(outfile, "\t.jump_table_len = %luUL,\n", jump_table_size);
+
        for (i = 0; i < NSYMS; i++) {
                if (required_syms[i].export && syms[i])
                        fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index ccf89dedd04f..5e83bd3cda22 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -16,6 +16,7 @@ struct vdso_image {
        unsigned long size;   /* Always a multiple of PAGE_SIZE */
 
        unsigned long alt, alt_len;
+       unsigned long jump_table, jump_table_len;
 
        long sym_vvar_start;  /* Negative offset to the vvar area */
 
-- 
2.22.0

Reply via email to