Add the local label prefix to all non-function named labels in head_32.S
and entry_32.S.  In addition to decluttering the symbol table, it also
causes stack traces to be more sensible.  For example, the last reported
function in the idle task stack trace is now startup_32_smp() instead of
is486().

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 arch/x86/entry/entry_32.S | 57 ++++++++++++++++++++++++-----------------------
 arch/x86/kernel/head_32.S | 32 +++++++++++++-------------
 2 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 0b56666..df4e045 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -64,7 +64,7 @@
 # define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); 
TRACE_IRQS_OFF
 #else
 # define preempt_stop(clobbers)
-# define resume_kernel         restore_all
+# define resume_kernel         .Lrestore_all
 #endif
 
 .macro TRACE_IRQS_IRET
@@ -212,7 +212,7 @@ ENTRY(ret_from_fork)
        /* When we fork, we trace the syscall return in the child, too. */
        movl    %esp, %eax
        call    syscall_return_slowpath
-       jmp     restore_all
+       jmp     .Lrestore_all
 END(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
@@ -230,7 +230,7 @@ ENTRY(ret_from_kernel_thread)
         */
        movl    %esp, %eax
        call    syscall_return_slowpath
-       jmp     restore_all
+       jmp     .Lrestore_all
 ENDPROC(ret_from_kernel_thread)
 
 /*
@@ -264,19 +264,19 @@ ENTRY(resume_userspace)
        TRACE_IRQS_OFF
        movl    %esp, %eax
        call    prepare_exit_to_usermode
-       jmp     restore_all
+       jmp     .Lrestore_all
 END(ret_from_exception)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
        DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
+.Lneed_resched:
        cmpl    $0, PER_CPU_VAR(__preempt_count)
-       jnz     restore_all
+       jnz     .Lrestore_all
        testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception 
path) ?
-       jz      restore_all
+       jz      .Lrestore_all
        call    preempt_schedule_irq
-       jmp     need_resched
+       jmp     .Lneed_resched
 END(resume_kernel)
 #endif
 
@@ -297,7 +297,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
  */
 ENTRY(xen_sysenter_target)
        addl    $5*4, %esp                      /* remove xen-provided frame */
-       jmp     sysenter_past_esp
+       jmp     .Lsysenter_past_esp
 #endif
 
 /*
@@ -334,7 +334,7 @@ ENTRY(xen_sysenter_target)
  */
 ENTRY(entry_SYSENTER_32)
        movl    TSS_sysenter_sp0(%esp), %esp
-sysenter_past_esp:
+.Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
        pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
@@ -465,11 +465,11 @@ ENTRY(entry_INT80_32)
        call    do_int80_syscall_32
 .Lsyscall_32_done:
 
-restore_all:
+.Lrestore_all:
        TRACE_IRQS_IRET
-restore_all_notrace:
+.Lrestore_all_notrace:
 #ifdef CONFIG_X86_ESPFIX32
-       ALTERNATIVE     "jmp restore_nocheck", "", X86_BUG_ESPFIX
+       ALTERNATIVE     "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
 
        movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
        /*
@@ -481,22 +481,23 @@ restore_all_notrace:
        movb    PT_CS(%esp), %al
        andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), 
%eax
        cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
-       je ldt_ss                               # returning to user-space with 
LDT SS
+       je .Lldt_ss                             # returning to user-space with 
LDT SS
 #endif
-restore_nocheck:
+.Lrestore_nocheck:
        RESTORE_REGS 4                          # skip orig_eax/error_code
-irq_return:
+.Lirq_return:
        INTERRUPT_RETURN
+
 .section .fixup, "ax"
 ENTRY(iret_exc )
        pushl   $0                              # no error code
        pushl   $do_iret_error
        jmp     error_code
 .previous
-       _ASM_EXTABLE(irq_return, iret_exc)
+       _ASM_EXTABLE(.Lirq_return, iret_exc)
 
 #ifdef CONFIG_X86_ESPFIX32
-ldt_ss:
+.Lldt_ss:
 /*
  * Setup and switch to ESPFIX stack
  *
@@ -525,7 +526,7 @@ ldt_ss:
         */
        DISABLE_INTERRUPTS(CLBR_EAX)
        lss     (%esp), %esp                    /* switch to espfix segment */
-       jmp     restore_nocheck
+       jmp     .Lrestore_nocheck
 #endif
 ENDPROC(entry_INT80_32)
 
@@ -845,7 +846,7 @@ ftrace_call:
        popl    %edx
        popl    %ecx
        popl    %eax
-ftrace_ret:
+.Lftrace_ret:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 .globl ftrace_graph_call
 ftrace_graph_call:
@@ -915,7 +916,7 @@ GLOBAL(ftrace_regs_call)
        popl    %gs
        addl    $8, %esp                        /* Skip orig_ax and ip */
        popf                                    /* Pop flags at end (no addl to 
corrupt flags) */
-       jmp     ftrace_ret
+       jmp     .Lftrace_ret
 
        popf
        jmp     ftrace_stub
@@ -926,7 +927,7 @@ ENTRY(mcount)
        jb      ftrace_stub                     /* Paging not enabled yet? */
 
        cmpl    $ftrace_stub, ftrace_trace_function
-       jnz     trace
+       jnz     .Ltrace
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        cmpl    $ftrace_stub, ftrace_graph_return
        jnz     ftrace_graph_caller
@@ -939,7 +940,7 @@ ftrace_stub:
        ret
 
        /* taken from glibc */
-trace:
+.Ltrace:
        pushl   %eax
        pushl   %ecx
        pushl   %edx
@@ -1078,7 +1079,7 @@ ENTRY(nmi)
        movl    %ss, %eax
        cmpw    $__ESPFIX_SS, %ax
        popl    %eax
-       je      nmi_espfix_stack
+       je      .Lnmi_espfix_stack
 #endif
 
        pushl   %eax                            # pt_regs->orig_ax
@@ -1094,7 +1095,7 @@ ENTRY(nmi)
 
        /* Not on SYSENTER stack. */
        call    do_nmi
-       jmp     restore_all_notrace
+       jmp     .Lrestore_all_notrace
 
 .Lnmi_from_sysenter_stack:
        /*
@@ -1105,10 +1106,10 @@ ENTRY(nmi)
        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
        call    do_nmi
        movl    %ebp, %esp
-       jmp     restore_all_notrace
+       jmp     .Lrestore_all_notrace
 
 #ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
+.Lnmi_espfix_stack:
        /*
         * create the pointer to lss back
         */
@@ -1126,7 +1127,7 @@ nmi_espfix_stack:
        call    do_nmi
        RESTORE_REGS
        lss     12+4(%esp), %esp                # back to espfix stack
-       jmp     irq_return
+       jmp     .Lirq_return
 #endif
 END(nmi)
 
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 6fc4f1d..53202d7 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -249,19 +249,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
 #ifdef CONFIG_PARAVIRT
        /* This is can only trip for a broken bootloader... */
        cmpw $0x207, pa(boot_params + BP_version)
-       jb default_entry
+       jb .Ldefault_entry
 
        /* Paravirt-compatible boot parameters.  Look to see what architecture
                we're booting under. */
        movl pa(boot_params + BP_hardware_subarch), %eax
        cmpl $num_subarch_entries, %eax
-       jae bad_subarch
+       jae .Lbad_subarch
 
        movl pa(subarch_entries)(,%eax,4), %eax
        subl $__PAGE_OFFSET, %eax
        jmp *%eax
 
-bad_subarch:
+.Lbad_subarch:
 WEAK(lguest_entry)
 WEAK(xen_entry)
        /* Unknown implementation; there's really
@@ -271,14 +271,14 @@ WEAK(xen_entry)
        __INITDATA
 
 subarch_entries:
-       .long default_entry             /* normal x86/PC */
+       .long .Ldefault_entry           /* normal x86/PC */
        .long lguest_entry              /* lguest hypervisor */
        .long xen_entry                 /* Xen hypervisor */
-       .long default_entry             /* Moorestown MID */
+       .long .Ldefault_entry           /* Moorestown MID */
 num_subarch_entries = (. - subarch_entries) / 4
 .previous
 #else
-       jmp default_entry
+       jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -318,7 +318,7 @@ ENTRY(startup_32_smp)
        call load_ucode_ap
 #endif
 
-default_entry:
+.Ldefault_entry:
 #define CR0_STATE      (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
                         X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
                         X86_CR0_PG)
@@ -348,7 +348,7 @@ default_entry:
        pushfl
        popl %eax                       # get EFLAGS
        testl $X86_EFLAGS_ID,%eax       # did EFLAGS.ID remained set?
-       jz enable_paging                # hw disallowed setting of ID bit
+       jz .Lenable_paging              # hw disallowed setting of ID bit
                                        # which means no CPUID and no CR4
 
        xorl %eax,%eax
@@ -358,13 +358,13 @@ default_entry:
        movl $1,%eax
        cpuid
        andl $~1,%edx                   # Ignore CPUID.FPU
-       jz enable_paging                # No flags or only CPUID.FPU = no CR4
+       jz .Lenable_paging              # No flags or only CPUID.FPU = no CR4
 
        movl pa(mmu_cr4_features),%eax
        movl %eax,%cr4
 
        testb $X86_CR4_PAE, %al         # check if PAE is enabled
-       jz enable_paging
+       jz .Lenable_paging
 
        /* Check if extended functions are implemented */
        movl $0x80000000, %eax
@@ -372,7 +372,7 @@ default_entry:
        /* Value must be in the range 0x80000001 to 0x8000ffff */
        subl $0x80000001, %eax
        cmpl $(0x8000ffff-0x80000001), %eax
-       ja enable_paging
+       ja .Lenable_paging
 
        /* Clear bogus XD_DISABLE bits */
        call verify_cpu
@@ -381,7 +381,7 @@ default_entry:
        cpuid
        /* Execute Disable bit supported? */
        btl $(X86_FEATURE_NX & 31), %edx
-       jnc enable_paging
+       jnc .Lenable_paging
 
        /* Setup EFER (Extended Feature Enable Register) */
        movl $MSR_EFER, %ecx
@@ -391,7 +391,7 @@ default_entry:
        /* Make changes effective */
        wrmsr
 
-enable_paging:
+.Lenable_paging:
 
 /*
  * Enable paging
@@ -420,7 +420,7 @@ enable_paging:
  */
        movb $4,X86                     # at least 486
        cmpl $-1,X86_CPUID
-       je is486
+       je .Lis486
 
        /* get vendor info */
        xorl %eax,%eax                  # call CPUID with 0 -> return vendor ID
@@ -431,7 +431,7 @@ enable_paging:
        movl %ecx,X86_VENDOR_ID+8       # last 4 chars
 
        orl %eax,%eax                   # do we have processor info as well?
-       je is486
+       je .Lis486
 
        movl $1,%eax            # Use the CPUID instruction to get CPU type
        cpuid
@@ -445,7 +445,7 @@ enable_paging:
        movb %cl,X86_MASK
        movl %edx,X86_CAPABILITY
 
-is486:
+.Lis486:
        movl $0x50022,%ecx      # set AM, WP, NE and MP
        movl %cr0,%eax
        andl $0x80000011,%eax   # Save PG,PE,ET
-- 
2.7.4

Reply via email to