Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all
functions which do not have ".globl" annotation. This is needed to
balance SYM_FUNC_END for tools that are about to generate debuginfo.

Note that some functions already had ENDPROC -- switch to the new
SYM_FUNC_END too.

[v3] annotate more functions

Signed-off-by: Jiri Slaby <jsl...@suse.cz>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: <x...@kernel.org>
---
 arch/x86/entry/entry_32.S        |  5 ++---
 arch/x86/entry/entry_64.S        |  3 ++-
 arch/x86/kernel/acpi/wakeup_64.S |  3 ++-
 arch/x86/kernel/head_32.S        | 10 +++++-----
 arch/x86/kernel/head_64.S        |  7 ++++---
 arch/x86/kernel/verify_cpu.S     |  3 ++-
 arch/x86/lib/getuser.S           |  8 ++++----
 arch/x86/lib/putuser.S           |  4 ++--
 arch/x86/net/bpf_jit.S           | 21 ++++++++++++++-------
 9 files changed, 37 insertions(+), 27 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index afeeb389e9aa..73c34507893b 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -299,8 +299,7 @@ ENDPROC(ret_from_fork)
  */
 
        # userspace resumption stub bypassing syscall exit tracing
-       ALIGN
-ret_from_exception:
+SYM_FUNC_START_LOCAL(ret_from_exception)
        preempt_stop(CLBR_ANY)
 ret_from_intr:
 #ifdef CONFIG_VM86
@@ -323,7 +322,7 @@ ENTRY(resume_userspace)
        movl    %esp, %eax
        call    prepare_exit_to_usermode
        jmp     restore_all
-ENDPROC(ret_from_exception)
+SYM_FUNC_END(ret_from_exception)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1fe8758102cb..aff5f8051ce2 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -876,7 +876,7 @@ EXPORT_SYMBOL(native_load_gs_index)
        _ASM_EXTABLE(.Lgs_change, bad_gs)
        .section .fixup, "ax"
        /* running with kernelgs */
-bad_gs:
+SYM_FUNC_START_LOCAL(bad_gs)
        SWAPGS                                  /* switch back to user gs */
 .macro ZAP_GS
        /* This can't be a string because the preprocessor needs to see it. */
@@ -887,6 +887,7 @@ bad_gs:
        xorl    %eax, %eax
        movl    %eax, %gs
        jmp     2b
+SYM_FUNC_END(bad_gs)
        .previous
 
 /* Call softirq on interrupt stack. Interrupts are off. */
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index e53a03648cc7..8fca92dd9144 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -36,8 +36,9 @@ ENTRY(wakeup_long64)
        jmp     *%rax
 ENDPROC(wakeup_long64)
 
-bogus_64_magic:
+SYM_FUNC_START_LOCAL(bogus_64_magic)
        jmp     bogus_64_magic
+SYM_FUNC_END(bogus_64_magic)
 
 ENTRY(do_suspend_lowlevel)
        FRAME_BEGIN
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b9b6c6ff16a0..00193c67845c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -446,7 +446,7 @@ ENTRY(early_idt_handler_array)
        .endr
 ENDPROC(early_idt_handler_array)
        
-early_idt_handler_common:
+SYM_FUNC_START_LOCAL(early_idt_handler_common)
        /*
         * The stack is the hardware frame, an error code or zero, and the
         * vector number.
@@ -501,11 +501,10 @@ early_idt_handler_common:
        decl    %ss:early_recursion_flag
        addl    $4, %esp        /* pop pt_regs->orig_ax */
        iret
-ENDPROC(early_idt_handler_common)
+SYM_FUNC_END(early_idt_handler_common)
 
 /* This is the default interrupt "handler" :-) */
-       ALIGN
-ignore_int:
+SYM_FUNC_START_LOCAL(ignore_int)
        cld
 #ifdef CONFIG_PRINTK
        pushl %eax
@@ -540,7 +539,8 @@ ignore_int:
 hlt_loop:
        hlt
        jmp hlt_loop
-ENDPROC(ignore_int)
+SYM_FUNC_END(ignore_int)
+
 __INITDATA
        .align 4
 GLOBAL(early_recursion_flag)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index dace17c6c5fe..a2230c35fcf9 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -335,8 +335,9 @@ ENDPROC(start_cpu0)
        .quad  init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
        __FINITDATA
 
-bad_address:
+SYM_FUNC_START_LOCAL(bad_address)
        jmp bad_address
+SYM_FUNC_END(bad_address)
 
        __INIT
 ENTRY(early_idt_handler_array)
@@ -356,7 +357,7 @@ ENTRY(early_idt_handler_array)
        .endr
 ENDPROC(early_idt_handler_array)
 
-early_idt_handler_common:
+SYM_FUNC_START_LOCAL(early_idt_handler_common)
        /*
         * The stack is the hardware frame, an error code or zero, and the
         * vector number.
@@ -397,7 +398,7 @@ early_idt_handler_common:
 20:
        decl early_recursion_flag(%rip)
        jmp restore_regs_and_iret
-ENDPROC(early_idt_handler_common)
+SYM_FUNC_END(early_idt_handler_common)
 
        __INITDATA
 
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 014ea59aa153..fd60f1ac5fec 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -33,7 +33,7 @@
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
-verify_cpu:
+SYM_FUNC_START_LOCAL(verify_cpu)
        pushf                           # Save caller passed flags
        push    $0                      # Kill any dangerous flags
        popf
@@ -139,3 +139,4 @@ verify_cpu:
        popf                            # Restore caller passed flags
        xorl %eax, %eax
        ret
+SYM_FUNC_END(verify_cpu)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 37b62d412148..29f0707a3913 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -104,21 +104,21 @@ ENDPROC(__get_user_8)
 EXPORT_SYMBOL(__get_user_8)
 
 
-bad_get_user:
+SYM_FUNC_START_LOCAL(bad_get_user)
        xor %edx,%edx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
        ret
-END(bad_get_user)
+SYM_FUNC_END(bad_get_user)
 
 #ifdef CONFIG_X86_32
-bad_get_user_8:
+SYM_FUNC_START_LOCAL(bad_get_user_8)
        xor %edx,%edx
        xor %ecx,%ecx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
        ret
-END(bad_get_user_8)
+SYM_FUNC_END(bad_get_user_8)
 #endif
 
        _ASM_EXTABLE(1b,bad_get_user)
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index cd5d716d2897..d77883f36875 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -88,10 +88,10 @@ ENTRY(__put_user_8)
 ENDPROC(__put_user_8)
 EXPORT_SYMBOL(__put_user_8)
 
-bad_put_user:
+SYM_FUNC_START_LOCAL(bad_put_user)
        movl $-EFAULT,%eax
        EXIT
-END(bad_put_user)
+SYM_FUNC_END(bad_put_user)
 
        _ASM_EXTABLE(1b,bad_put_user)
        _ASM_EXTABLE(2b,bad_put_user)
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 762c29fb8832..823edd6f1db7 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -81,26 +81,29 @@ ENDPROC(sk_load_byte_positive_offset)
        FRAME_END
 
 
-bpf_slow_path_word:
+SYM_FUNC_START_LOCAL(bpf_slow_path_word)
        bpf_slow_path_common(4)
        js      bpf_error
        mov     - MAX_BPF_STACK + 32(%rbp),%eax
        bswap   %eax
        ret
+SYM_FUNC_END(bpf_slow_path_word)
 
-bpf_slow_path_half:
+SYM_FUNC_START_LOCAL(bpf_slow_path_half)
        bpf_slow_path_common(2)
        js      bpf_error
        mov     - MAX_BPF_STACK + 32(%rbp),%ax
        rol     $8,%ax
        movzwl  %ax,%eax
        ret
+SYM_FUNC_END(bpf_slow_path_half)
 
-bpf_slow_path_byte:
+SYM_FUNC_START_LOCAL(bpf_slow_path_byte)
        bpf_slow_path_common(1)
        js      bpf_error
        movzbl  - MAX_BPF_STACK + 32(%rbp),%eax
        ret
+SYM_FUNC_END(bpf_slow_path_byte)
 
 #define sk_negative_common(SIZE)                               \
        FRAME_BEGIN;                                            \
@@ -116,9 +119,10 @@ bpf_slow_path_byte:
        FRAME_END;                                              \
        jz      bpf_error
 
-bpf_slow_path_word_neg:
+SYM_FUNC_START_LOCAL(bpf_slow_path_word_neg)
        cmp     SKF_MAX_NEG_OFF, %esi   /* test range */
        jl      bpf_error       /* offset lower -> error  */
+SYM_FUNC_END(bpf_slow_path_word_neg)
 
 ENTRY(sk_load_word_negative_offset)
        sk_negative_common(4)
@@ -127,9 +131,10 @@ ENTRY(sk_load_word_negative_offset)
        ret
 ENDPROC(sk_load_word_negative_offset)
 
-bpf_slow_path_half_neg:
+SYM_FUNC_START_LOCAL(bpf_slow_path_half_neg)
        cmp     SKF_MAX_NEG_OFF, %esi
        jl      bpf_error
+SYM_FUNC_END(bpf_slow_path_half_neg)
 
 ENTRY(sk_load_half_negative_offset)
        sk_negative_common(2)
@@ -139,9 +144,10 @@ ENTRY(sk_load_half_negative_offset)
        ret
 ENDPROC(sk_load_half_negative_offset)
 
-bpf_slow_path_byte_neg:
+SYM_FUNC_START_LOCAL(bpf_slow_path_byte_neg)
        cmp     SKF_MAX_NEG_OFF, %esi
        jl      bpf_error
+SYM_FUNC_END(bpf_slow_path_byte_neg)
 
 ENTRY(sk_load_byte_negative_offset)
        sk_negative_common(1)
@@ -149,7 +155,7 @@ ENTRY(sk_load_byte_negative_offset)
        ret
 ENDPROC(sk_load_byte_negative_offset)
 
-bpf_error:
+SYM_FUNC_START_LOCAL(bpf_error)
 # force a return 0 from jit handler
        xor     %eax,%eax
        mov     - MAX_BPF_STACK(%rbp),%rbx
@@ -158,3 +164,4 @@ bpf_error:
        mov     - MAX_BPF_STACK + 24(%rbp),%r15
        leaveq
        ret
+SYM_FUNC_END(bpf_error)
-- 
2.12.2

Reply via email to