Author: ian
Date: Sun Jan 24 19:58:58 2016
New Revision: 294681
URL: https://svnweb.freebsd.org/changeset/base/294681

Log:
  MFC r277416, r282023, r282024, r282025, r284264:
  
    Remove the SMP code from locore-v4. These will never use the SMP code as
    there is no multi-core hardware prior to ARMv6.
  
    Remove the armv6 code from locore-v4.S, it's not needed there.
  
    Fix the style of locore-v4.S and locore-v6.S to help find any common code.
  
    Cleanup a little more:
     - Remove whitespace at the end of lines
     - Use a tab after instructions, not spaces
  
    Fix the spelling of __ARM_ARCH >= 6 in sys/arm/arm.

Modified:
  stable/10/sys/arm/arm/bcopyinout.S
  stable/10/sys/arm/arm/bcopyinout_xscale.S
  stable/10/sys/arm/arm/copystr.S
  stable/10/sys/arm/arm/locore-v4.S
  stable/10/sys/arm/arm/locore-v6.S
  stable/10/sys/arm/arm/machdep.c
  stable/10/sys/arm/arm/trap.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/arm/arm/bcopyinout.S
==============================================================================
--- stable/10/sys/arm/arm/bcopyinout.S  Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/bcopyinout.S  Sun Jan 24 19:58:58 2016        
(r294681)
@@ -38,6 +38,7 @@
 
 #include "assym.s"
 
+#include <machine/acle-compat.h>
 #include <machine/asm.h>
 #include <sys/errno.h>
 
@@ -54,7 +55,7 @@ __FBSDID("$FreeBSD$");
        .text
        .align  2
 
-#ifdef _ARM_ARCH_6
+#if __ARM_ARCH >= 6
 #define GET_PCB(tmp) \
        mrc p15, 0, tmp, c13, c0, 4; \
        add     tmp, tmp, #(TD_PCB)

Modified: stable/10/sys/arm/arm/bcopyinout_xscale.S
==============================================================================
--- stable/10/sys/arm/arm/bcopyinout_xscale.S   Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/bcopyinout_xscale.S   Sun Jan 24 19:58:58 2016        
(r294681)
@@ -38,11 +38,13 @@
 #include <machine/asm.h>
 __FBSDID("$FreeBSD$");
 
+#include <machine/acle-compat.h>
+
        .syntax unified
        .text
        .align  2
 
-#ifdef _ARM_ARCH_6
+#if __ARM_ARCH >= 6
 #define GET_PCB(tmp) \
        mrc p15, 0, tmp, c13, c0, 4; \
        add     tmp, tmp, #(TD_PCB)

Modified: stable/10/sys/arm/arm/copystr.S
==============================================================================
--- stable/10/sys/arm/arm/copystr.S     Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/copystr.S     Sun Jan 24 19:58:58 2016        
(r294681)
@@ -40,6 +40,7 @@
 
        
 #include "assym.s"
+#include <machine/acle-compat.h>
 #include <machine/asm.h>
 #include <machine/armreg.h>
 __FBSDID("$FreeBSD$");
@@ -49,7 +50,7 @@ __FBSDID("$FreeBSD$");
        .text
        .align  2
 
-#ifdef _ARM_ARCH_6
+#if __ARM_ARCH >= 6
 #define GET_PCB(tmp) \
        mrc p15, 0, tmp, c13, c0, 4; \
        add     tmp, tmp, #(TD_PCB)

Modified: stable/10/sys/arm/arm/locore-v4.S
==============================================================================
--- stable/10/sys/arm/arm/locore-v4.S   Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/locore-v4.S   Sun Jan 24 19:58:58 2016        
(r294681)
@@ -49,8 +49,7 @@ __FBSDID("$FreeBSD$");
  *
  * TODO: Fix the ARMv4/v5 case.
  */
-#if (defined(FLASHADDR) || defined(LOADERRAMADDR) || !defined(_ARM_ARCH_6)) && 
\
-    !defined(PHYSADDR)
+#ifndef PHYSADDR
 #error PHYSADDR must be defined for this configuration
 #endif
 
@@ -116,7 +115,7 @@ ASENTRY_NP(_start)
         * If we're running with MMU disabled, test against the
         * physical address instead.
         */
-       mrc     p15, 0, r2, c1, c0, 0
+       mrc     p15, 0, r2, c1, c0, 0
        ands    r2, r2, #CPU_CONTROL_MMU_ENABLE
        ldreq   r6, =PHYSADDR
        ldrne   r6, =LOADERRAMADDR
@@ -125,7 +124,7 @@ ASENTRY_NP(_start)
        cmp     r7, pc
        bhi     from_ram
        b       do_copy
-       
+
 flash_lower:
        cmp     r6, pc
        bls     from_ram
@@ -148,12 +147,12 @@ from_ram:
 
 disable_mmu:
        /* Disable MMU for a while */
-       mrc     p15, 0, r2, c1, c0, 0
+       mrc     p15, 0, r2, c1, c0, 0
        bic     r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
            CPU_CONTROL_WBUF_ENABLE)
        bic     r2, r2, #(CPU_CONTROL_IC_ENABLE)
        bic     r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
-       mcr     p15, 0, r2, c1, c0, 0
+       mcr     p15, 0, r2, c1, c0, 0
 
        nop
        nop
@@ -169,36 +168,16 @@ Lunmapped:
        adr     r0, Lpagetable
        bl      translate_va_to_pa
 
-#ifndef _ARM_ARCH_6
        /*
         * Some of the older ports (the various XScale, mostly) assume
         * that the memory before the kernel is mapped, and use it for
-        * the various stacks, page tables, etc. For those CPUs, map the 
-        * 64 first MB of RAM, as it used to be. 
+        * the various stacks, page tables, etc. For those CPUs, map the
+        * 64 first MB of RAM, as it used to be.
         */
        /*
         * Map PA == VA
-        */    
-       ldr     r5, =PHYSADDR
-       mov     r1, r5
-       mov     r2, r5
-       /* Map 64MiB, preserved over calls to build_pagetables */
-       mov     r3, #64
-       bl      build_pagetables
-       
-       /* Create the kernel map to jump to */
-       mov     r1, r5
-       ldr     r2, =(KERNBASE)
-       bl      build_pagetables
-       ldr     r5, =(KERNPHYSADDR)
-#else
-       /*
-        * Map PA == VA
-        */    
-       /* Find the start kernels load address */
-       adr     r5, _start
-       ldr     r2, =(L1_S_OFFSET)
-       bic     r5, r2
+        */
+       ldr     r5, =PHYSADDR
        mov     r1, r5
        mov     r2, r5
        /* Map 64MiB, preserved over calls to build_pagetables */
@@ -207,10 +186,10 @@ Lunmapped:
 
        /* Create the kernel map to jump to */
        mov     r1, r5
-       ldr     r2, =(KERNVIRTADDR)
+       ldr     r2, =(KERNBASE)
        bl      build_pagetables
-#endif
-       
+       ldr     r5, =(KERNPHYSADDR)
+
 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
        /* Create the custom map */
        ldr     r1, =SOCDEV_PA
@@ -218,32 +197,19 @@ Lunmapped:
        bl      build_pagetables
 #endif
 
-#if defined(SMP)
-       orr     r0, r0, #2              /* Set TTB shared memory flag */
-#endif
        mcr     p15, 0, r0, c2, c0, 0   /* Set TTB */
        mcr     p15, 0, r0, c8, c7, 0   /* Flush TLB */
 
-#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || 
defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
-       mov     r0, #0
-       mcr     p15, 0, r0, c13, c0, 1  /* Set ASID to 0 */
-#endif
-
        /* Set the Domain Access register.  Very important! */
-       mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
+       mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
        mcr     p15, 0, r0, c3, c0, 0
-       /* 
+       /*
         * Enable MMU.
         * On armv6 enable extended page tables, and set alignment checking
         * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
         * instructions emitted by clang.
         */
        mrc     p15, 0, r0, c1, c0, 0
-#ifdef _ARM_ARCH_6
-       orr     r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
-       orr     r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
-       orr     r0, r0, #(CPU_CONTROL_AF_ENABLE)
-#endif
        orr     r0, r0, #(CPU_CONTROL_MMU_ENABLE)
        mcr     p15, 0, r0, c1, c0, 0
        nop
@@ -283,7 +249,7 @@ virt_done:
        /* init arm will return the new stack pointer. */
        mov     sp, r0
 
-       bl      _C_LABEL(mi_startup)            /* call mi_startup()! */
+       bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
 
        adr     r0, .Lmainreturned
        b       _C_LABEL(panic)
@@ -338,9 +304,6 @@ translate_va_to_pa:
 build_pagetables:
        /* Set the required page attributed */
        ldr     r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
-#if defined(SMP)
-       orr     r4, #(L1_SHARED)
-#endif
        orr     r1, r4
 
        /* Move the virtual address to the correct bit location */
@@ -394,98 +357,12 @@ pagetable:
 .Lcpufuncs:
        .word   _C_LABEL(cpufuncs)
 
-#if defined(SMP)
-
-.Lmpvirt_done:
-       .word   mpvirt_done
-VA_TO_PA_POINTER(Lstartup_pagetable_secondary, temp_pagetable)
-
-ASENTRY_NP(mpentry)
-
-       /* Make sure interrupts are disabled. */
-       mrs     r7, cpsr
-       orr     r7, r7, #(PSR_I | PSR_F)
-       msr     cpsr_c, r7
-
-       /* Disable MMU.  It should be disabled already, but make sure. */
-       mrc     p15, 0, r2, c1, c0, 0
-       bic     r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
-           CPU_CONTROL_WBUF_ENABLE)
-       bic     r2, r2, #(CPU_CONTROL_IC_ENABLE)
-       bic     r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
-       mcr     p15, 0, r2, c1, c0, 0
-       nop
-       nop
-       nop
-       CPWAIT(r0)
-
-#if ARM_MMU_V6
-       bl      armv6_idcache_inv_all   /* Modifies r0 only */
-#elif ARM_MMU_V7
-       bl      armv7_idcache_inv_all   /* Modifies r0-r3, ip */
-#endif
-
-       /* Load the page table physical address */
-       adr     r0, Lstartup_pagetable_secondary
-       bl      translate_va_to_pa
-       /* Load the address the secondary page table */
-       ldr     r0, [r0]
-
-       orr     r0, r0, #2              /* Set TTB shared memory flag */
-       mcr     p15, 0, r0, c2, c0, 0   /* Set TTB */
-       mcr     p15, 0, r0, c8, c7, 0   /* Flush TLB */
-
-       mov     r0, #0
-       mcr     p15, 0, r0, c13, c0, 1  /* Set ASID to 0 */
-
-       /* Set the Domain Access register.  Very important! */
-       mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
-       mcr     p15, 0, r0, c3, c0, 0
-       /* Enable MMU */
-       mrc     p15, 0, r0, c1, c0, 0
-       orr     r0, r0, #CPU_CONTROL_V6_EXTPAGE
-       orr     r0, r0, #CPU_CONTROL_AF_ENABLE
-       orr     r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
-           CPU_CONTROL_WBUF_ENABLE)
-       orr     r0, r0, #(CPU_CONTROL_IC_ENABLE)
-       orr     r0, r0, #(CPU_CONTROL_BPRD_ENABLE)
-       mcr     p15, 0, r0, c1, c0, 0
-       nop
-       nop
-       nop
-       CPWAIT(r0)
-
-       adr     r1, .Lstart
-       ldmia   r1, {r1, r2, sp}        /* Set initial stack and */
-       mrc     p15, 0, r0, c0, c0, 5
-       and     r0, r0, #15
-       mov     r1, #2048
-       mul     r2, r1, r0
-       sub     sp, sp, r2
-       str     r1, [sp]
-       ldr     pc, .Lmpvirt_done
-
-mpvirt_done:
-
-       mov     fp, #0                  /* trace back starts here */
-       bl      _C_LABEL(init_secondary)        /* Off we go */
-
-       adr     r0, .Lmpreturned
-       b       _C_LABEL(panic)
-       /* NOTREACHED */
-
-.Lmpreturned:
-       .asciz  "init_secondary() returned"
-       .align  2
-END(mpentry)
-#endif
-
 ENTRY_NP(cpu_halt)
-       mrs     r2, cpsr
+       mrs     r2, cpsr
        bic     r2, r2, #(PSR_MODE)
-       orr     r2, r2, #(PSR_SVC32_MODE)
+       orr     r2, r2, #(PSR_SVC32_MODE)
        orr     r2, r2, #(PSR_I | PSR_F)
-       msr     cpsr_fsxc, r2
+       msr     cpsr_fsxc, r2
 
        ldr     r4, .Lcpu_reset_address
        ldr     r4, [r4]
@@ -511,9 +388,9 @@ ENTRY_NP(cpu_halt)
         * Hurl ourselves into the ROM
         */
        mov     r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
-       mcr     p15, 0, r0, c1, c0, 0
-       mcrne   p15, 0, r2, c8, c7, 0   /* nail I+D TLB on ARMv4 and greater */
-       mov     pc, r4
+       mcr     p15, 0, r0, c1, c0, 0
+       mcrne   p15, 0, r2, c8, c7, 0   /* nail I+D TLB on ARMv4 and greater */
+       mov     pc, r4
 
        /*
         * _cpu_reset_address contains the address to branch to, to complete
@@ -550,7 +427,7 @@ ENTRY(longjmp)
 END(longjmp)
 
        .data
-       .global _C_LABEL(esym)
+       .global _C_LABEL(esym)
 _C_LABEL(esym):        .word   _C_LABEL(end)
 
 ENTRY_NP(abort)
@@ -563,7 +440,7 @@ ENTRY_NP(sigcode)
 
        /*
         * Call the sigreturn system call.
-        * 
+        *
         * We have to load r7 manually rather than using
         * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
         * correct. Using the alternative places esigcode at the address

Modified: stable/10/sys/arm/arm/locore-v6.S
==============================================================================
--- stable/10/sys/arm/arm/locore-v6.S   Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/locore-v6.S   Sun Jan 24 19:58:58 2016        
(r294681)
@@ -39,7 +39,7 @@
 
 __FBSDID("$FreeBSD$");
 
-#ifndef        ARM_NEW_PMAP
+#ifndef ARM_NEW_PMAP
 #define        PTE1_OFFSET     L1_S_OFFSET
 #define        PTE1_SHIFT      L1_S_SHIFT
 #define        PTE1_SIZE       L1_S_SIZE
@@ -52,13 +52,13 @@ __FBSDID("$FreeBSD$");
        .align  2
 
 /*
- * On entry for        FreeBSD boot ABI:
- *     r0 - metadata pointer or 0 (boothowto on AT91's boot2)
- *     r1 - if (r0 == 0) then metadata pointer
- * On entry for        Linux boot ABI:
+ * On entry for FreeBSD boot ABI:
+ *     r0 - metadata pointer or 0 (boothowto on AT91's boot2)
+ *     r1 - if (r0 == 0) then metadata pointer
+ * On entry for Linux boot ABI:
  *     r0 - 0
  *     r1 - machine type (passed as arg2 to initarm)
- *     r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 
initarm)
+ *     r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 
initarm)
  *
  * For both types of boot we gather up the args, put them in a struct 
arm_boot_params
  * structure and pass that to initarm.
@@ -66,17 +66,17 @@ __FBSDID("$FreeBSD$");
        .globl  btext
 btext:
 ASENTRY_NP(_start)
-       STOP_UNWINDING          /* Can't unwind into the bootloader! */
+       STOP_UNWINDING          /* Can't unwind into the bootloader! */
 
-       /* Make sure interrupts are disabled. */
+       /* Make sure interrupts are disabled. */
        cpsid   ifa
 
-       mov     r8, r0          /* 0 or boot mode from boot2 */
-       mov     r9, r1          /* Save Machine type */
-       mov     r10, r2         /* Save meta data */
+       mov     r8, r0          /* 0 or boot mode from boot2 */
+       mov     r9, r1          /* Save Machine type */
+       mov     r10, r2         /* Save meta data */
        mov     r11, r3         /* Future expansion */
 
-       /* 
+       /*
         * Check whether data cache is enabled.  If it is, then we know
         * current tags are valid (not power-on garbage values) and there
         * might be dirty lines that need cleaning.  Disable cache to prevent
@@ -93,7 +93,7 @@ ASENTRY_NP(_start)
         * valid.  Disable all caches and the MMU, and invalidate everything
         * before setting up new page tables and re-enabling the mmu.
         */
-1:     
+1:
        bic     r7, #CPU_CONTROL_DC_ENABLE
        bic     r7, #CPU_CONTROL_MMU_ENABLE
        bic     r7, #CPU_CONTROL_IC_ENABLE
@@ -119,13 +119,13 @@ ASENTRY_NP(_start)
        /*
         * Map PA == VA
         */
-       /* Find the start kernels load address */
+       /* Find the start kernels load address */
        adr     r5, _start
        ldr     r2, =(PTE1_OFFSET)
        bic     r5, r2
        mov     r1, r5
        mov     r2, r5
-       /* Map 64MiB, preserved over calls to build_pagetables */
+       /* Map 64MiB, preserved over calls to build_pagetables */
        mov     r3, #64
        bl      build_pagetables
 
@@ -142,41 +142,41 @@ ASENTRY_NP(_start)
 #endif
        bl      init_mmu
 
-       /* Switch to virtual addresses. */
+       /* Switch to virtual addresses. */
        ldr     pc, =1f
 1:
 
-       /* Setup stack, clear BSS */
+       /* Setup stack, clear BSS */
        ldr     r1, =.Lstart
        ldmia   r1, {r1, r2, sp}        /* Set initial stack and */
        add     sp, sp, #INIT_ARM_STACK_SIZE
-       sub     r2, r2, r1              /* get zero init data */
+       sub     r2, r2, r1              /* get zero init data */
        mov     r3, #0
 2:
        str     r3, [r1], #0x0004       /* get zero init data */
-       subs    r2, r2, #4
+       subs    r2, r2, #4
        bgt     2b
 
-       mov     r1, #28                 /* loader info size is 28 bytes also 
second arg */
-       subs    sp, sp, r1              /* allocate arm_boot_params struct on 
stack */
-       mov     r0, sp                  /* loader info pointer is first arg */
-       bic     sp, sp, #7              /* align stack to 8 bytes */
-       str     r1, [r0]                /* Store length of loader info */
+       mov     r1, #28                 /* loader info size is 28 bytes also 
second arg */
+       subs    sp, sp, r1              /* allocate arm_boot_params struct on 
stack */
+       mov     r0, sp                  /* loader info pointer is first arg */
+       bic     sp, sp, #7              /* align stack to 8 bytes */
+       str     r1, [r0]                /* Store length of loader info */
        str     r8, [r0, #4]            /* Store r0 from boot loader */
        str     r9, [r0, #8]            /* Store r1 from boot loader */
        str     r10, [r0, #12]          /* store r2 from boot loader */
        str     r11, [r0, #16]          /* store r3 from boot loader */
        str     r5, [r0, #20]           /* store the physical address */
-       adr     r4, Lpagetable          /* load the pagetable address */
+       adr     r4, Lpagetable          /* load the pagetable address */
        ldr     r5, [r4, #4]
        str     r5, [r0, #24]           /* store the pagetable address */
        mov     fp, #0                  /* trace back starts here */
        bl      _C_LABEL(initarm)       /* Off we go */
 
-       /* init arm will return the new stack pointer. */
+       /* init arm will return the new stack pointer. */
        mov     sp, r0
 
-       bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
+       bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
 
        ldr     r0, =.Lmainreturned
        b       _C_LABEL(panic)
@@ -219,8 +219,8 @@ translate_va_to_pa:
        mov     pc, lr
 
 /*
- * Init        MMU
- * r0 -        The table base address
+ * Init MMU
+ * r0 - the table base address
  */
 
 ASENTRY_NP(init_mmu)
@@ -267,11 +267,11 @@ END(init_mmu)
 
 
 /*
- * Init        SMP coherent mode, enable caching and switch to final MMU table.
- * Called with disabled        caches
- * r0 -        The table base address
- * r1 -        clear bits for aux register
- * r2 -        set bits for aux register
+ * Init SMP coherent mode, enable caching and switch to final MMU table.
+ * Called with disabled caches
+ * r0 - The table base address
+ * r1 - clear bits for aux register
+ * r2 - set bits for aux register
  */
 ASENTRY_NP(reinit_mmu)
        push    {r4-r11, lr}
@@ -331,11 +331,11 @@ END(reinit_mmu)
 
 /*
  * Builds the page table
- * r0 -        The table base address
- * r1 -        The physical address (trashed)
- * r2 -        The virtual address (trashed)
- * r3 -        The number of 1MiB sections
- * r4 -        Trashed
+ * r0 - The table base address
+ * r1 - The physical address (trashed)
+ * r2 - The virtual address (trashed)
+ * r3 - The number of 1MiB sections
+ * r4 - Trashed
  *
  * Addresses must be 1MiB aligned
  */
@@ -350,15 +350,15 @@ build_pagetables:
 #endif
        orr     r1, r4
 
-       /* Move the virtual address to the correct bit location */
+       /* Move the virtual address to the correct bit location */
        lsr     r2, #(PTE1_SHIFT - 2)
 
        mov     r4, r3
 1:
        str     r1, [r0, r2]
-       add     r2, r2, #4
-       add     r1, r1, #(PTE1_SIZE)
-       adds    r4, r4, #-1
+       add     r2, r2, #4
+       add     r1, r1, #(PTE1_SIZE)
+       adds    r4, r4, #-1
        bhi     1b
 
        mov     pc, lr
@@ -372,7 +372,7 @@ VA_TO_PA_POINTER(Lpagetable, boot_pt1)
        .word   svcstk                  /* must remain in order together. */
 
 .Lmainreturned:
-       .asciz  "main() returned"
+       .asciz  "main() returned"
        .align  2
 
        .bss
@@ -380,8 +380,8 @@ svcstk:
        .space  INIT_ARM_STACK_SIZE * MAXCPU
 
 /*
- * Memory for the initial pagetable. We        are unable to place this in
- * the bss as this will        be cleared after the table is loaded.
+ * Memory for the initial pagetable. We are unable to place this in
+ * the bss as this will be cleared after the table is loaded.
  */
        .section ".init_pagetable"
        .align  14 /* 16KiB aligned */
@@ -398,7 +398,7 @@ boot_pt1:
 #if defined(SMP)
 
 ASENTRY_NP(mpentry)
-       /* Make sure interrupts are disabled. */
+       /* Make sure interrupts are disabled. */
        cpsid   ifa
 
        /* Setup core, disable all caches. */
@@ -419,10 +419,10 @@ ASENTRY_NP(mpentry)
        mcr     CP15_ICIALLU
        ISB
 
-       /* Find the delta between VA and PA */
+       /* Find the delta between VA and PA */
        adr     r0, Lpagetable
        bl      translate_va_to_pa
-       
+
        bl      init_mmu
 
        adr     r1, .Lstart+8           /* Get initstack pointer from */
@@ -433,7 +433,7 @@ ASENTRY_NP(mpentry)
        mul     r2, r1, r0              /* Point sp to initstack */
        add     sp, sp, r2              /* area for this processor. */
 
-       /* Switch to virtual addresses. */
+       /* Switch to virtual addresses. */
        ldr     pc, =1f
 1:
        mov     fp, #0                  /* trace back starts here */
@@ -459,14 +459,14 @@ ENTRY_NP(cpu_halt)
        ldr     r4, [r4]
        teq     r4, #0
        movne   pc, r4
-1:     
+1:
        WFI
        b       1b
 
        /*
         * _cpu_reset_address contains the address to branch to, to complete
         * the cpu reset after turning the MMU off
-        * This variable is provided by the hardware specific code
+        * This variable is provided by the hardware specific code
         */
 .Lcpu_reset_address:
        .word   _C_LABEL(cpu_reset_address)
@@ -498,38 +498,37 @@ END(abort)
 
 ENTRY_NP(sigcode)
        mov     r0, sp
-       add     r0, r0, #SIGF_UC
+       add     r0, r0, #SIGF_UC
 
        /*
-        * Call the sigreturn system call.
+        * Call the sigreturn system call.
         *
         * We have to load r7 manually rather than using
-        * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
+        * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
         * correct. Using the alternative places esigcode at the address
-        * of the data rather than the address one past the data.
+        * of the data rather than the address one past the data.
         */
 
-       ldr     r7, [pc, #12]   /* Load SYS_sigreturn */
+       ldr     r7, [pc, #12]   /* Load SYS_sigreturn */
        swi     SYS_sigreturn
 
-       /* Well if that failed we better exit quick ! */
+       /* Well if that failed we better exit quick ! */
 
-       ldr     r7, [pc, #8]    /* Load SYS_exit */
+       ldr     r7, [pc, #8]    /* Load SYS_exit */
        swi     SYS_exit
 
-       /* Branch back to retry SYS_sigreturn */
+       /* Branch back to retry SYS_sigreturn */
        b       . - 16
 END(sigcode)
-
        .word   SYS_sigreturn
        .word   SYS_exit
 
        .align  2
-       .global _C_LABEL(esigcode)
+       .global _C_LABEL(esigcode)
                _C_LABEL(esigcode):
 
        .data
-       .global szsigcode
+       .global szsigcode
 szsigcode:
        .long esigcode-sigcode
 

Modified: stable/10/sys/arm/arm/machdep.c
==============================================================================
--- stable/10/sys/arm/arm/machdep.c     Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/machdep.c     Sun Jan 24 19:58:58 2016        
(r294681)
@@ -832,7 +832,7 @@ fake_preload_metadata(struct arm_boot_pa
 void
 pcpu0_init(void)
 {
-#if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B)
+#if __ARM_ARCH >= 6
        set_curthread(&thread0);
 #endif
        pcpu_init(pcpup, 0, sizeof(struct pcpu));

Modified: stable/10/sys/arm/arm/trap.c
==============================================================================
--- stable/10/sys/arm/arm/trap.c        Sun Jan 24 19:41:31 2016        
(r294680)
+++ stable/10/sys/arm/arm/trap.c        Sun Jan 24 19:58:58 2016        
(r294681)
@@ -95,6 +95,7 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_map.h>
 #include <vm/vm_extern.h>
 
+#include <machine/acle-compat.h>
 #include <machine/cpu.h>
 #include <machine/frame.h>
 #include <machine/machdep.h>
@@ -322,7 +323,7 @@ abort_handler(struct trapframe *tf, int 
         * location, so we can deal with those quickly.  Otherwise we need to
         * disassemble the faulting instruction to determine if it was a write.
         */
-#if ARM_ARCH_6 || ARM_ARCH_7A
+#if __ARM_ARCH >= 6
        ftype = (fsr & FAULT_WNR) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
 #else
        if (IS_PERMISSION_FAULT(fsr))
_______________________________________________
[email protected] mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-stable-10
To unsubscribe, send any mail to "[email protected]"

Reply via email to