Author: ian Date: Mon Aug 11 01:29:28 2014 New Revision: 269796 URL: http://svnweb.freebsd.org/changeset/base/269796
Log: MFC r269390: Fix unwind info in hand-written asm (avoid nested functions). Modified: stable/10/sys/arm/arm/cpufunc_asm_arm10.S stable/10/sys/arm/arm/cpufunc_asm_arm9.S stable/10/sys/arm/arm/cpufunc_asm_armv5.S stable/10/sys/arm/arm/cpufunc_asm_armv6.S stable/10/sys/arm/arm/cpufunc_asm_armv7.S stable/10/sys/arm/arm/cpufunc_asm_xscale.S stable/10/sys/arm/arm/cpufunc_asm_xscale_c3.S stable/10/sys/arm/arm/exception.S stable/10/sys/arm/arm/fusu.S stable/10/sys/arm/arm/locore.S stable/10/sys/arm/arm/setstack.s stable/10/sys/arm/arm/support.S stable/10/sys/arm/include/asm.h stable/10/sys/libkern/arm/divsi3.S Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/arm/arm/cpufunc_asm_arm10.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_arm10.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_arm10.S Mon Aug 11 01:29:28 2014 (r269796) @@ -209,7 +209,7 @@ ENTRY_NP(arm10_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ -ENTRY(arm10_dcache_wbinv_all) +EENTRY(arm10_dcache_wbinv_all) .Larm10_dcache_wbinv_all: ldr ip, .Larm10_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -223,8 +223,8 @@ ENTRY(arm10_dcache_wbinv_all) bhs .Lnext_set_inv /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr +EEND(arm10_dcache_wbinv_all) END(arm10_idcache_wbinv_all) -END(arm10_dcache_wbinv_all) .Larm10_cache_data: .word _C_LABEL(arm10_dcache_sets_max) Modified: stable/10/sys/arm/arm/cpufunc_asm_arm9.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_arm9.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_arm9.S Mon Aug 11 01:29:28 2014 (r269796) @@ -197,7 +197,7 @@ ENTRY_NP(arm9_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through */ -ENTRY(arm9_dcache_wbinv_all) +EENTRY(arm9_dcache_wbinv_all) .Larm9_dcache_wbinv_all: ldr ip, .Larm9_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -210,8 +210,8 @@ ENTRY(arm9_dcache_wbinv_all) subs s_max, s_max, s_inc bhs .Lnext_set_inv /* Next set */ mov pc, lr +EEND(arm9_dcache_wbinv_all) END(arm9_idcache_wbinv_all) -END(arm9_dcache_wbinv_all) .Larm9_cache_data: .word _C_LABEL(arm9_dcache_sets_max) Modified: stable/10/sys/arm/arm/cpufunc_asm_armv5.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv5.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_armv5.S Mon Aug 11 01:29:28 2014 (r269796) @@ -194,6 +194,7 @@ ENTRY(armv5_idcache_wbinv_range) END(armv5_idcache_wbinv_range) ENTRY_NP(armv5_idcache_wbinv_all) +armv5_idcache_wbinv_all: .Larmv5_idcache_wbinv_all: /* * We assume that the code here can never be out of sync with the @@ -203,7 +204,7 @@ ENTRY_NP(armv5_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ -ENTRY(armv5_dcache_wbinv_all) +EENTRY(armv5_dcache_wbinv_all) .Larmv5_dcache_wbinv_all: ldr ip, .Larmv5_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -219,8 +220,8 @@ ENTRY(armv5_dcache_wbinv_all) bpl 1b /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET +EEND(armv5_dcache_wbinv_all) END(armv5_idcache_wbinv_all) -END(armv5_dcache_wbinv_all) .Larmv5_cache_data: .word _C_LABEL(armv5_dcache_sets_max) Modified: stable/10/sys/arm/arm/cpufunc_asm_armv6.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv6.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_armv6.S Mon Aug 11 01:29:28 2014 (r269796) @@ -137,12 +137,12 @@ ENTRY_NP(armv6_idcache_wbinv_all) /* Fall through to purge Dcache. */ /* LINTSTUB: void armv6_dcache_wbinv_all(void); */ -ENTRY(armv6_dcache_wbinv_all) +EENTRY(armv6_dcache_wbinv_all) mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET +EEND(armv6_dcache_wbinv_all) END(armv6_idcache_wbinv_all) -END(armv6_dcache_wbinv_all) ENTRY(armv6_idcache_inv_all) mov r0, #0 Modified: stable/10/sys/arm/arm/cpufunc_asm_armv7.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_armv7.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_armv7.S Mon Aug 11 01:29:28 2014 (r269796) @@ -358,7 +358,7 @@ ENTRY(armv7_idcache_inv_all) mcr p15, 0, r0, c7, c5, 0 @ invalidate instruction+branch cache isb @ instruction sync barrier bx lr @ return -END(armv7_l1cache_inv_all) +END(armv7_idcache_inv_all) ENTRY_NP(armv7_sleep) dsb Modified: stable/10/sys/arm/arm/cpufunc_asm_xscale.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_xscale.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_xscale.S Mon Aug 11 01:29:28 2014 (r269796) @@ -306,11 +306,12 @@ _C_LABEL(xscale_minidata_clean_size): XSCALE_CACHE_CLEAN_UNBLOCK ENTRY_NP(xscale_cache_syncI) -ENTRY_NP(xscale_cache_purgeID) + +EENTRY_NP(xscale_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -ENTRY_NP(xscale_cache_cleanID) -ENTRY_NP(xscale_cache_purgeD) -ENTRY(xscale_cache_cleanD) +EENTRY_NP(xscale_cache_cleanID) +EENTRY_NP(xscale_cache_purgeD) +EENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_PROLOGUE 1: subs r0, r0, #32 @@ -326,11 +327,11 @@ ENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_EPILOGUE RET +EEND(xscale_cache_cleanD) +EEND(xscale_cache_purgeD) +EEND(xscale_cache_cleanID) +EEND(xscale_cache_purgeID) END(xscale_cache_syncI) -END(xscale_cache_purgeID) -END(xscale_cache_cleanID) -END(xscale_cache_purgeD) -END(xscale_cache_cleanD) /* * Clean the mini-data cache. @@ -374,7 +375,7 @@ END(xscale_cache_purgeD_E) */ /* xscale_cache_syncI is identical to xscale_cache_purgeID */ -ENTRY(xscale_cache_cleanID_rng) +EENTRY(xscale_cache_cleanID_rng) ENTRY(xscale_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_cleanID) @@ -393,7 +394,7 @@ ENTRY(xscale_cache_cleanD_rng) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) -END(xscale_cache_cleanID_rng) +/*END(xscale_cache_cleanID_rng)*/ END(xscale_cache_cleanD_rng) ENTRY(xscale_cache_purgeID_rng) Modified: stable/10/sys/arm/arm/cpufunc_asm_xscale_c3.S ============================================================================== --- stable/10/sys/arm/arm/cpufunc_asm_xscale_c3.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/cpufunc_asm_xscale_c3.S Mon Aug 11 01:29:28 2014 (r269796) @@ -143,11 +143,12 @@ __FBSDID("$FreeBSD$"); ENTRY_NP(xscalec3_cache_syncI) -ENTRY_NP(xscalec3_cache_purgeID) +xscalec3_cache_purgeID: +EENTRY_NP(xscalec3_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -ENTRY_NP(xscalec3_cache_cleanID) -ENTRY_NP(xscalec3_cache_purgeD) -ENTRY(xscalec3_cache_cleanD) +EENTRY_NP(xscalec3_cache_cleanID) +EENTRY_NP(xscalec3_cache_purgeD) +EENTRY(xscalec3_cache_cleanD) XSCALE_CACHE_CLEAN_BLOCK mov r0, #0 @@ -168,11 +169,11 @@ ENTRY(xscalec3_cache_cleanD) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ RET +EEND(xscalec3_cache_purgeID) +EEND(xscalec3_cache_cleanID) +EEND(xscalec3_cache_purgeD) +EEND(xscalec3_cache_cleanD) END(xscalec3_cache_syncI) -END(xscalec3_cache_purgeID) -END(xscalec3_cache_cleanID) -END(xscalec3_cache_purgeD) -END(xscalec3_cache_cleanD) ENTRY(xscalec3_cache_purgeID_rng) @@ -238,7 +239,7 @@ ENTRY(xscalec3_cache_purgeD_rng) END(xscalec3_cache_purgeD_rng) ENTRY(xscalec3_cache_cleanID_rng) -ENTRY(xscalec3_cache_cleanD_rng) +EENTRY(xscalec3_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) @@ -257,8 +258,8 @@ ENTRY(xscalec3_cache_cleanD_rng) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) +EEND(xscalec3_cache_cleanD_rng) END(xscalec3_cache_cleanID_rng) -END(xscalec3_cache_cleanD_rng) ENTRY(xscalec3_l2cache_purge) /* Clean-up the L2 cache */ Modified: stable/10/sys/arm/arm/exception.S ============================================================================== --- stable/10/sys/arm/arm/exception.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/exception.S Mon Aug 11 01:29:28 2014 (r269796) @@ -280,12 +280,12 @@ ASENTRY_NP(swi_entry) * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ -ASENTRY_NP(swi_exit) +ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ -END(swi_exit) +EEND(swi_exit) END(swi_entry) /* Modified: stable/10/sys/arm/arm/fusu.S ============================================================================== --- stable/10/sys/arm/arm/fusu.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/fusu.S Mon Aug 11 01:29:28 2014 (r269796) @@ -54,8 +54,8 @@ __FBSDID("$FreeBSD$"); * Fetch an int from the user's address space. */ -ENTRY_NP(casuword32) ENTRY(casuword) +EENTRY_NP(casuword32) GET_PCB(r3) ldr r3, [r3] @@ -91,7 +91,7 @@ ENTRY(casuword) mov r1, #0x00000000 str r1, [r3, #PCB_ONFAULT] RET -END(casuword32) +EEND(casuword32) END(casuword) /* @@ -110,8 +110,8 @@ END(casuword) * Fetch an int from the user's address space. */ -ENTRY_NP(fuword32) ENTRY(fuword) +EENTRY_NP(fuword32) GET_PCB(r2) ldr r2, [r2] @@ -277,8 +277,8 @@ fusupcbfaulttext: * Store an int in the user's address space. */ -ENTRY_NP(suword32) ENTRY(suword) +EENTRY_NP(suword32) GET_PCB(r2) ldr r2, [r2] @@ -390,4 +390,3 @@ ENTRY(subyte) str r0, [r2, #PCB_ONFAULT] RET END(subyte) - Modified: stable/10/sys/arm/arm/locore.S ============================================================================== --- stable/10/sys/arm/arm/locore.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/locore.S Mon Aug 11 01:29:28 2014 (r269796) @@ -75,7 +75,8 @@ __FBSDID("$FreeBSD$"); * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ -ENTRY_NP(btext) + .globl btext +btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ @@ -261,7 +262,6 @@ virt_done: adr r0, .Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ -END(btext) END(_start) /* @@ -524,7 +524,7 @@ ENTRY_NP(sigcode) /* Branch back to retry SYS_sigreturn */ b . - 16 - +END(sigcode) .word SYS_sigreturn .word SYS_exit @@ -536,5 +536,5 @@ ENTRY_NP(sigcode) .global szsigcode szsigcode: .long esigcode-sigcode -END(sigcode) + /* End of locore.S */ Modified: stable/10/sys/arm/arm/setstack.s ============================================================================== --- stable/10/sys/arm/arm/setstack.s Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/setstack.s Mon Aug 11 01:29:28 2014 (r269796) @@ -71,7 +71,7 @@ ENTRY(set_stackptr) msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ - +END(set_stackptr) /* To get the stack pointer for a particular mode we must switch * to that mode copy the banked r13 and then switch back. * This routine provides an easy way of doing this for any mode @@ -90,5 +90,5 @@ ENTRY(get_stackptr) msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ - +END(get_stackptr) /* End of setstack.S */ Modified: stable/10/sys/arm/arm/support.S ============================================================================== --- stable/10/sys/arm/arm/support.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/arm/support.S Mon Aug 11 01:29:28 2014 (r269796) @@ -130,7 +130,7 @@ ENTRY(bzero) .Lnormal0: mov r3, #0x00 b do_memset - +EEND(bzero) /* LINTSTUB: Func: void *memset(void *, int, size_t) */ ENTRY(memset) and r3, r1, #0xff /* We deal with bytes */ @@ -276,7 +276,6 @@ do_memset: strgeb r3, [ip], #0x01 /* Set another byte */ strgtb r3, [ip] /* and a third */ RET /* Exit */ -END(bzero) END(memset) ENTRY(bcmp) @@ -394,7 +393,7 @@ ENTRY(bcopy) eor r0, r1, r0 eor r1, r0, r1 eor r0, r1, r0 -ENTRY(memmove) +EENTRY(memmove) /* Do the buffers overlap? */ cmp r0, r1 RETeq /* Bail now if src/dst are the same */ @@ -931,8 +930,8 @@ ENTRY(memmove) .Lmemmove_bsrcul1l4: add r1, r1, #1 b .Lmemmove_bl4 +EEND(memmove) END(bcopy) -END(memmove) #if !defined(_ARM_ARCH_5E) ENTRY(memcpy) @@ -2945,13 +2944,17 @@ END(memcpy) ENTRY(user) nop +END(user) ENTRY(btrap) nop +END(btrap) ENTRY(etrap) nop +END(etrap) ENTRY(bintr) nop +END(bintr) ENTRY(eintr) nop - +END(eintr) #endif Modified: stable/10/sys/arm/include/asm.h ============================================================================== --- stable/10/sys/arm/include/asm.h Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/arm/include/asm.h Mon Aug 11 01:29:28 2014 (r269796) @@ -74,9 +74,20 @@ #define GLOBAL(X) .globl x #define _ENTRY(x) \ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART - #define _END(x) .size x, . - x; _FNEND +/* + * EENTRY()/EEND() mark "extra" entry/exit points from a function. + * The unwind info cannot handle the concept of a nested function, or a function + * with multiple .fnstart directives, but some of our assembler code is written + * with multiple labels to allow entry at several points. The EENTRY() macro + * defines such an extra entry point without a new .fnstart, so that it's + * basically just a label that you can jump to. The EEND() macro does nothing + * at all, except document the exit point associated with the same-named entry. + */ +#define _EENTRY(x) .globl x; .type x,_ASM_TYPE_FUNCTION; x: +#define _EEND(x) /* nothing */ + #ifdef GPROF # define _PROF_PROLOGUE \ mov ip, lr; bl __mcount @@ -85,11 +96,17 @@ #endif #define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE +#define EENTRY(y) _EENTRY(_C_LABEL(y)); _PROF_PROLOGUE #define ENTRY_NP(y) _ENTRY(_C_LABEL(y)) +#define EENTRY_NP(y) _EENTRY(_C_LABEL(y)) #define END(y) _END(_C_LABEL(y)) +#define EEND(y) #define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE +#define ASEENTRY(y) _EENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y)) +#define ASEENTRY_NP(y) _EENTRY(_ASM_LABEL(y)) #define ASEND(y) _END(_ASM_LABEL(y)) +#define ASEEND(y) #define ASMSTR .asciz Modified: stable/10/sys/libkern/arm/divsi3.S ============================================================================== --- stable/10/sys/libkern/arm/divsi3.S Mon Aug 11 01:22:10 2014 (r269795) +++ stable/10/sys/libkern/arm/divsi3.S Mon Aug 11 01:29:28 2014 (r269796) @@ -51,11 +51,11 @@ ENTRY_NP(__modsi3) RET END(__modsi3) +ENTRY_NP(__udivsi3) #ifdef __ARM_EABI__ -ENTRY_NP(__aeabi_uidiv) -ENTRY_NP(__aeabi_uidivmod) +EENTRY_NP(__aeabi_uidiv) +EENTRY_NP(__aeabi_uidivmod) #endif -ENTRY_NP(__udivsi3) .L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */ eor r0, r1, r0 eor r1, r0, r1 @@ -77,16 +77,16 @@ ENTRY_NP(__udivsi3) mov r1, #0 RET #ifdef __ARM_EABI__ -END(__aeabi_uidiv) -END(__aeabi_uidivmod) +EEND(__aeabi_uidiv) +EEND(__aeabi_uidivmod) #endif END(__udivsi3) +ENTRY_NP(__divsi3) #ifdef __ARM_EABI__ -ENTRY_NP(__aeabi_idiv) -ENTRY_NP(__aeabi_idivmod) +EENTRY_NP(__aeabi_idiv) +EENTRY_NP(__aeabi_idivmod) #endif -ENTRY_NP(__divsi3) .L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */ eor r0, r1, r0 eor r1, r0, r1 @@ -401,8 +401,8 @@ ENTRY_NP(__divsi3) mov r0, r3 RET #ifdef __ARM_EABI__ -END(__aeabi_idiv) -END(__aeabi_idivmod) +EEND(__aeabi_idiv) +EEND(__aeabi_idivmod) #endif END(__divsi3) _______________________________________________ svn-src-all@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-all To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"