Module Name:    src
Committed By:   nisimura
Date:           Fri Aug 25 21:43:49 UTC 2017

Modified Files:
        src/sys/arch/aarch64/aarch64: locore.S

Log Message:
make them better shape


To generate a diff of this commit:
cvs rdiff -u -r1.2 -r1.3 src/sys/arch/aarch64/aarch64/locore.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/locore.S
diff -u src/sys/arch/aarch64/aarch64/locore.S:1.2 src/sys/arch/aarch64/aarch64/locore.S:1.3
--- src/sys/arch/aarch64/aarch64/locore.S:1.2	Wed Aug 16 22:49:05 2017
+++ src/sys/arch/aarch64/aarch64/locore.S	Fri Aug 25 21:43:49 2017
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.2 2017/08/16 22:49:05 nisimura Exp $ */
+/* $NetBSD: locore.S,v 1.3 2017/08/25 21:43:49 nisimura Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -34,10 +34,16 @@
 
 #include "opt_ddb.h"
 
-RCSID("$NetBSD: locore.S,v 1.2 2017/08/16 22:49:05 nisimura Exp $")
+RCSID("$NetBSD: locore.S,v 1.3 2017/08/25 21:43:49 nisimura Exp $")
 
 // XXX:AARCH64
 lr	.req	x30
+	.macro DISABLE_INTERRUPT
+	msr	daifset, #DAIF_I|DAIF_F		/* daif'set */
+	.endm
+	.macro ENABLE_INTERRUPT
+	msr	daifclr, #DAIF_I|DAIF_F		/* daif'clr */
+	.endm
 
 /*
  * At IPL_SCHED:
@@ -50,9 +56,9 @@ ENTRY_NP(cpu_switchto)
 	cbz	x0, .Lrestore_lwp
 
 	/*
-	 * Store the callee saved register on the stack in a trapframe
+	 * Store the callee saved register on the stack.
 	 */
-	sub	sp, sp, #TF_SIZE
+	sub	sp, sp, #TF_SIZE		/* make switchframe */
 	stp	x19, x20, [sp, #TF_X19]
 	stp	x21, x22, [sp, #TF_X21]
 	stp	x23, x24, [sp, #TF_X23]
@@ -61,20 +67,20 @@ ENTRY_NP(cpu_switchto)
 	stp	x29, x30, [sp, #TF_X29]
 
 	/*
-	 * Get the previous trapframe pointer and the user writeable Thread ID
-	 * register and save them in the trap frame.
+	 * Save the previous trapframe pointer and EL0 thread ID in the
+	 * switchframe.
 	 */
 	ldr	x5, [x0, #L_MD_KTF]
 	mrs	x4, tpidr_el0
 #if TF_TPIDR + 8 == TF_CHAIN
-	str	x4, x5, [sp, #TF_TPIDR]
+	stp	x4, x5, [sp, #TF_TPIDR]
 #else
 	str	x4, [sp, #TF_TPIDR]
 	str	x5, [sp, #TF_CHAIN]
 #endif
 
 	/*
-	 * Get the current stack pointer and the CPACR and save them in
+	 * Save the current stack pointer and the CPACR and save them in
 	 * old lwp md area.
 	 */
 	mov	x4, sp 
@@ -90,48 +96,109 @@ ENTRY_NP(cpu_switchto)
 
 .Lrestore_lwp:
 #if L_MD_KTF + 8 == L_MD_CPACR
-	ldp	x4, x5, [x1, #L_MD_KTF]	// get trapframe ptr and cpacr_el1
+	ldp	x4, x5, [x1, #L_MD_KTF]	/* get trapframe ptr and cpacr_el1 */
 #else
-	ldr	x4, [x0, #L_MD_KTF]	// get trapframe ptr (aka SP)
-	ldr	x5, [x0, #L_MD_CPACR]	// get cpacr_el1
+	ldr	x4, [x1, #L_MD_KTF]	/* get trapframe ptr (aka SP) */
+	ldr	x5, [x1, #L_MD_CPACR]	/* get cpacr_el1 */
 #endif
-	mov	sp, x4			// restore stack pointer
-	msr	cpacr_el1, x5		// restore cpacr_el1
+	mov	sp, x4			/* restore stack pointer */
+	msr	cpacr_el1, x5		/* restore cpacr_el1 */
 
-	ldr	x4, [sp, #TF_TPIDR]	// load user writeable thread ip reg
-	msr	tpidr_el0, x4		// restore it
+	ldr	x4, [sp, #TF_TPIDR]
+	msr	tpidr_el0, x4		/* restore EL0 thread ID */
 
-	mrs	x3, tpidr_el1		// get curcpu
-	str	x1, [x3, #CI_CURLWP]	// show as curlwp
+	mrs	x3, tpidr_el1
+	str	x1, [x3, #CI_CURLWP]	/* switch curlwp to new lwp */
 
 	/*
-	 * Restore callee save registers
+	 * Restore callee save registers.
 	 */
 	ldp	x19, x20, [sp, #TF_X19]
 	ldp	x21, x22, [sp, #TF_X21]
 	ldp	x23, x24, [sp, #TF_X23]
 	ldp	x25, x26, [sp, #TF_X25]
 	ldp	x27, x28, [sp, #TF_X27]
-	ldp	x29, x30, [sp, #TF_X29]
-	add	sp, sp, #TF_SIZE	/* pop trapframe from stack */
+	ldp	x29, lr, [sp, #TF_X29]
+	add	sp, sp, #TF_SIZE	/* unwind switchframe */
 
 	ret
 END(cpu_switchto)
 
 /*
- *	x0 = lwp
- *	x1 = ipl
+ * void
+ * cpu_switchto_softint(struct lwp *softlwp, int ipl)
+ * {
+ *	build a switchframe on kernel stack.
+ *	craft TF_X30 to have softint_cleanup.
+ *	pinned_lwp = curlwp
+ *	switch to softlwp context.
+ *	call softint_dispatch(pinned_lwp, ipl);
+ *	switch back to pinned_lwp context.
+ *	unwind switchframe made on kernel stack.
+ *	return to caller this time.
+ * }
  */
 ENTRY_NP(cpu_switchto_softint)
-//
-//XXXAARCH64
-//
+	sub	sp, sp, #TF_SIZE	/* make switchframe */
+	adr	x2, softint_cleanup
+	stp	x19, x20, [sp, #TF_X19]
+	stp	x21, x22, [sp, #TF_X21]
+	stp	x23, x24, [sp, #TF_X23]
+	stp	x25, x26, [sp, #TF_X25]
+	stp	x27, x28, [sp, #TF_X27]
+	stp	x29, x2, [sp, #TF_X29]	/* tf->lr = softint_cleanup; */
+
+	mrs	x3, tpidr_el1
+	ldr	x2, [x3, #CI_CURLWP]	/* x2 := curcpu()->ci_curlwp */
+	mov	x4, sp			/* x4 := sp */
+	DISABLE_INTERRUPT
+	str	x4, [x2, #L_MD_KTF]	/* curlwp->l_md_ktf := sp */
+	str	x0, [x3, #CI_CURLWP]	/* curcpu()->ci_curlwp = softlwp; */
+	ldr	x4, [x0, #L_MD_KTF]	/* switch to softlwp stack */
+	mov	sp, x4			/* new sp := softlwp->l_md_ktf */
+	ENABLE_INTERRUPT
+	mov	x19, x2			/* x19 := pinned_lwp */
+	mov	x20, lr			/* x20 := original lr */
+
+	/* softint_dispatch(pinned_lwp, ipl) */
+	mov	x0, x19
+	bl	_C_LABEL(softint_dispatch)
+
+	mrs	x3, tpidr_el1
+	DISABLE_INTERRUPT
+	str	x19, [x3, #CI_CURLWP]	/* curcpu()->ci_curlwp := x19 */
+	ldr	x4, [x19, #L_MD_KTF]	/* x4 := curlwp->l_md_ktf */
+	mov	sp, x4			/* restore pinned_lwp sp */
+	ENABLE_INTERRUPT
+	mov	lr, x20			/* restore pinned_lwp lr */
+	ldp	x19, x20, [sp, #TF_X19]	/* restore x19 and x20 */
+	add	sp, sp, #TF_SIZE	/* unwind switchframe */
 	ret
 END(cpu_switchto_softint)
 
+/*
+ * void
+ * softint_cleanup(struct lwp *softlwp)
+ * {
+ *	cpu_switchto() bottom half arranges to start this when softlwp.
+ *	kernel thread is to yield CPU for the pinned_lwp in the above.
+ *	curcpu()->ci_mtx_count += 1;
+ *	softlwp->l_ctxswtch = 0;
+ *	this returns as if cpu_switchto_softint finished normally.
+ * }
+ */
+ENTRY_NP(softint_cleanup)
+	mrs	x3, tpidr_el1		/* curcpu() */
+	ldr	w2, [x3, #CI_MTX_COUNT]	/* ->ci_mtx_count */
+	add	w2, w2, #1
+	str	w2, [x3, #CI_MTX_COUNT]
+	str	wzr, [x0, #L_CTXSWTCH]	/* softlwp->l_ctxswtch = 0 */
+	add	sp, sp, #TF_SIZE	/* unwind switchframe */
+	ret
+END(softint_cleanup)
 
 /*
- * Called at IPL_SCHED
+ * Called at IPL_SCHED:
  *	x0 = old lwp (from cpu_switchto)
  *	x1 = new lwp (from cpu_switchto)
  *	x27 = func
@@ -161,6 +228,7 @@ END(lwp_trampoline)
  * a syscall return.
  */
 ENTRY_NP(exception_trap_exit)
+	/* XXX critial section guarded by SR.EXL if it was MIPS XXX */
 	ldp	x0, x1, [sp, #TF_X0]
 	ldp	x2, x3, [sp, #TF_X2]
 	ldp	x4, x5, [sp, #TF_X4]
@@ -169,20 +237,18 @@ ENTRY_NP(exception_trap_exit)
 	ldp	x10, x11, [sp, #TF_X10]
 	ldp	x12, x13, [sp, #TF_X12]
 	ldp	x14, x15, [sp, #TF_X14]
-exception_syscall_exit:
 	ldp	x16, x17, [sp, #TF_X16]
 	ldr	x18, [sp, #TF_X18]
 
-#if TF_SP + 8 == TF_PC
-	ldp	x20, x21, [sp, #TF_SP]
-#else
-	ldr	x20, [sp, #TF_SP]
-	ldr	x21, [sp, #TF_PC]
-#endif
-	ldr	x22, [sp, #TF_SPSR]
-	msr	sp_el0, x20
-	msr	elr_el1, x21
-	msr	spsr_el1, x22
+	ldr	x20, [sp, #TF_PC]
+	ldr	x21, [sp, #TF_SPSR]
+	msr	elr_el1, x20		/* exception pc */
+	msr	spsr_el1, x21		/* exception pstate */
+
+	and	x21, x21, #1
+	cbz	x21, .Lkernelexception
+	ldr	x22, [sp, #TF_SP]
+	msr	sp_el0, x22		/* restore EL0 stack */
 
 	ldp	x19, x20, [sp, #TF_X19]
 	ldp	x21, x22, [sp, #TF_X21]
@@ -190,11 +256,16 @@ exception_syscall_exit:
 	ldp	x25, x26, [sp, #TF_X25]
 	ldp	x27, x28, [sp, #TF_X27]
 	ldp	x29, x30, [sp, #TF_X29]
-
-	/*
-	 * Don't adjust the stack for the trapframe since we would
-	 * just add subtract it again upon exception entry.
-	 */
+	/* EL1 sp stays at l_md_utf */
+	eret
+ .Lkernelexception:
+	ldp	x19, x20, [sp, #TF_X19]
+	ldp	x21, x22, [sp, #TF_X21]
+	ldp	x23, x24, [sp, #TF_X23]
+	ldp	x25, x26, [sp, #TF_X25]
+	ldp	x27, x28, [sp, #TF_X27]
+	ldp	x29, x30, [sp, #TF_X29]
+	add	sp, sp, #TF_SIZE	/* unwind trapframe on stack */
 	eret
 END(exception_trap_exit)
 
@@ -205,6 +276,24 @@ ENTRY(cpu_Debugger)
 END(cpu_Debugger)
 #endif /* DDB */
 
+#ifdef MULTIPROCESSOR
+/*
+ * void
+ * cpu_spinup_trampoline(int cpu_index)
+ * {
+ *      ci := tp == cpu_info[cpu_index]
+ *      ci->ci_curlwp = ci->ci_data.ci_idlelwp;
+ *      sp := ci->ci_curlwp->l_addr + USPACE - sizeof(struct trapframe)
+ *      cpu_hatch(ci);
+ *      jump to idle_loop() to join the cpu pool.
+ * }
+ */
+ENTRY(cpu_spinup_trampoline)
+	bl	_C_LABEL(cpu_hatch)
+	b	_C_LABEL(cpu_idle)
+END(cpu_spinup_trampoline)
+#endif
+
 /*
  * int cpu_set_onfault(struct faultbuf *fb, register_t retval)
  */

Reply via email to