Module Name:    src
Committed By:   matt
Date:           Fri Feb  5 07:36:51 UTC 2010

Modified Files:
        src/sys/arch/mips/conf [matt-nb5-mips64]: files.mips
        src/sys/arch/mips/include [matt-nb5-mips64]: cpu.h cpuregs.h locore.h
            mips_param.h types.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: genassym.cf locore.S
            vm_machdep.c
Added Files:
        src/sys/arch/mips/mips [matt-nb5-mips64]: mips_softint.c
Removed Files:
        src/sys/arch/mips/mips [matt-nb5-mips64]: softintr.c

Log Message:
Add __HAVE_FAST_SOFTINTS support.
Add routine to remap an uarea via a direct-mapped address.  This avoids
TLB machinations when swtching to/from the softint thread.  This can only
be done for lwp which won't exit.


To generate a diff of this commit:
cvs rdiff -u -r1.58.24.6 -r1.58.24.7 src/sys/arch/mips/conf/files.mips
cvs rdiff -u -r1.90.16.17 -r1.90.16.18 src/sys/arch/mips/include/cpu.h
cvs rdiff -u -r1.74.28.13 -r1.74.28.14 src/sys/arch/mips/include/cpuregs.h
cvs rdiff -u -r1.78.36.1.2.11 -r1.78.36.1.2.12 \
    src/sys/arch/mips/include/locore.h
cvs rdiff -u -r1.23.78.4 -r1.23.78.5 src/sys/arch/mips/include/mips_param.h
cvs rdiff -u -r1.43.36.12 -r1.43.36.13 src/sys/arch/mips/include/types.h
cvs rdiff -u -r1.44.12.13 -r1.44.12.14 src/sys/arch/mips/mips/genassym.cf
cvs rdiff -u -r1.167.38.8 -r1.167.38.9 src/sys/arch/mips/mips/locore.S
cvs rdiff -u -r0 -r1.1.2.1 src/sys/arch/mips/mips/mips_softint.c
cvs rdiff -u -r1.7 -r0 src/sys/arch/mips/mips/softintr.c
cvs rdiff -u -r1.121.6.1.2.9 -r1.121.6.1.2.10 \
    src/sys/arch/mips/mips/vm_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/conf/files.mips
diff -u src/sys/arch/mips/conf/files.mips:1.58.24.6 src/sys/arch/mips/conf/files.mips:1.58.24.7
--- src/sys/arch/mips/conf/files.mips:1.58.24.6	Fri Jan 29 00:16:58 2010
+++ src/sys/arch/mips/conf/files.mips	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-#	$NetBSD: files.mips,v 1.58.24.6 2010/01/29 00:16:58 matt Exp $
+#	$NetBSD: files.mips,v 1.58.24.7 2010/02/05 07:36:51 matt Exp $
 #
 
 defflag	opt_cputype.h		NOFPU FPEMUL
@@ -39,6 +39,7 @@
 file	arch/mips/mips/trap.c			# trap handlers
 file	arch/mips/mips/syscall.c		# syscall entries
 file	arch/mips/mips/mips_machdep.c
+file	arch/mips/mips/mips_softint.c
 file	arch/mips/mips/sig_machdep.c		# signal delivery
 file	arch/mips/mips/sys_machdep.c
 file	arch/mips/mips/vm_machdep.c

Index: src/sys/arch/mips/include/cpu.h
diff -u src/sys/arch/mips/include/cpu.h:1.90.16.17 src/sys/arch/mips/include/cpu.h:1.90.16.18
--- src/sys/arch/mips/include/cpu.h:1.90.16.17	Mon Feb  1 04:16:18 2010
+++ src/sys/arch/mips/include/cpu.h	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.90.16.17 2010/02/01 04:16:18 matt Exp $	*/
+/*	$NetBSD: cpu.h,v 1.90.16.18 2010/02/05 07:36:51 matt Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -138,6 +138,8 @@
 	vaddr_t ci_ebase;		/* VA of exception base */
 	paddr_t ci_ebase_pa;		/* PA of exception base */
 	u_long ci_cctr_freq;		/* cycle counter frequency */
+	struct lwp *ci_softlwps[SOFTINT_COUNT];
+#define	ci_softints	ci_data.cpu_softints
 	/*
 	 * Per-cpu pmap information
 	 */
@@ -563,6 +565,9 @@
 int	badaddr(void *, size_t);
 int	badaddr64(uint64_t, size_t);
 
+/* vm_machdep.c */
+void	cpu_uarea_remap(struct lwp *);
+
 #endif /* ! _LOCORE */
 #endif /* _KERNEL */
 #endif /* _CPU_H_ */

Index: src/sys/arch/mips/include/cpuregs.h
diff -u src/sys/arch/mips/include/cpuregs.h:1.74.28.13 src/sys/arch/mips/include/cpuregs.h:1.74.28.14
--- src/sys/arch/mips/include/cpuregs.h:1.74.28.13	Wed Jan 20 06:58:35 2010
+++ src/sys/arch/mips/include/cpuregs.h	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpuregs.h,v 1.74.28.13 2010/01/20 06:58:35 matt Exp $	*/
+/*	$NetBSD: cpuregs.h,v 1.74.28.14 2010/02/05 07:36:51 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -294,6 +294,8 @@
 #define	MIPS_HARD_INT_MASK	0xfc00
 #define	MIPS_SOFT_INT_MASK_1	0x0200
 #define	MIPS_SOFT_INT_MASK_0	0x0100
+#define	MIPS_SOFT_INT_MASK	0x0300
+#define	MIPS_INT_MASK_SHIFT	8
 
 /*
  * mips3 CPUs have on-chip timer at INT_MASK_5.  Each platform can

Index: src/sys/arch/mips/include/locore.h
diff -u src/sys/arch/mips/include/locore.h:1.78.36.1.2.11 src/sys/arch/mips/include/locore.h:1.78.36.1.2.12
--- src/sys/arch/mips/include/locore.h:1.78.36.1.2.11	Mon Feb  1 04:16:19 2010
+++ src/sys/arch/mips/include/locore.h	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.h,v 1.78.36.1.2.11 2010/02/01 04:16:19 matt Exp $ */
+/* $NetBSD: locore.h,v 1.78.36.1.2.12 2010/02/05 07:36:51 matt Exp $ */
 
 /*
  * Copyright 1996 The Board of Trustees of The Leland Stanford
@@ -43,8 +43,10 @@
 int _splset(int);
 int _splget(void); 
 void _splnone(void);
-void _setsoftintr(int);
-void _clrsoftintr(int);
+void _setsoftintr(uint32_t);
+void _clrsoftintr(uint32_t);
+void softint_process(uint32_t);
+void softint_fast_dispatch(struct lwp *, int);
 
 #ifdef MIPS1
 void	mips1_tlb_set_asid(uint32_t);

Index: src/sys/arch/mips/include/mips_param.h
diff -u src/sys/arch/mips/include/mips_param.h:1.23.78.4 src/sys/arch/mips/include/mips_param.h:1.23.78.5
--- src/sys/arch/mips/include/mips_param.h:1.23.78.4	Sat Sep 12 17:17:05 2009
+++ src/sys/arch/mips/include/mips_param.h	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_param.h,v 1.23.78.4 2009/09/12 17:17:05 matt Exp $	*/
+/*	$NetBSD: mips_param.h,v 1.23.78.5 2010/02/05 07:36:51 matt Exp $	*/
 
 #ifdef _KERNEL
 #include <machine/cpu.h>
@@ -41,6 +41,7 @@
 
 #define	UPAGES		2		/* pages of u-area */
 #define	USPACE		(UPAGES*NBPG)	/* size of u-area in bytes */
+#define	USPACE_ALIGN	USPACE		/* make sure it starts on a even VA */
 
 #ifndef MSGBUFSIZE
 #define MSGBUFSIZE	NBPG		/* default message buffer size */

Index: src/sys/arch/mips/include/types.h
diff -u src/sys/arch/mips/include/types.h:1.43.36.12 src/sys/arch/mips/include/types.h:1.43.36.13
--- src/sys/arch/mips/include/types.h:1.43.36.12	Sat Jan 30 23:49:31 2010
+++ src/sys/arch/mips/include/types.h	Fri Feb  5 07:36:51 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.43.36.12 2010/01/30 23:49:31 matt Exp $	*/
+/*	$NetBSD: types.h,v 1.43.36.13 2010/02/05 07:36:51 matt Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -131,6 +131,7 @@
 
 #define	__SWAP_BROKEN
 
+#define	__HAVE_FAST_SOFTINTS
 #define	__HAVE_AST_PERPROC
 #define	__HAVE_SYSCALL_INTERN
 #define	__HAVE_PROCESS_XFPREGS

Index: src/sys/arch/mips/mips/genassym.cf
diff -u src/sys/arch/mips/mips/genassym.cf:1.44.12.13 src/sys/arch/mips/mips/genassym.cf:1.44.12.14
--- src/sys/arch/mips/mips/genassym.cf:1.44.12.13	Mon Feb  1 04:16:19 2010
+++ src/sys/arch/mips/mips/genassym.cf	Fri Feb  5 07:36:50 2010
@@ -1,4 +1,4 @@
-#	$NetBSD: genassym.cf,v 1.44.12.13 2010/02/01 04:16:19 matt Exp $
+#	$NetBSD: genassym.cf,v 1.44.12.14 2010/02/05 07:36:50 matt Exp $
 #
 # Copyright (c) 1992, 1993
 #	The Regents of the University of California.  All rights reserved.
@@ -100,14 +100,15 @@
 define	MIPS_XKSEG_START	MIPS_XKSEG_START
 
 # Important offsets into the lwp and proc structs & associated constants
+define	L_CPU			offsetof(struct lwp, l_cpu)
 define	L_ADDR			offsetof(struct lwp, l_addr)
 define	L_PRIORITY		offsetof(struct lwp, l_priority)
 define	L_WCHAN			offsetof(struct lwp, l_wchan)
 define	L_STAT			offsetof(struct lwp, l_stat)
 define	L_PROC			offsetof(struct lwp, l_proc)
+define	L_CTXSWITCH		offsetof(struct lwp, l_ctxswtch)
 
 # Process status constants
-define	L_CPU			offsetof(struct lwp, l_cpu)
 define	L_MD_UTF		offsetof(struct lwp, l_md.md_utf)
 define	L_MD_UPTE_0		offsetof(struct lwp, l_md.md_upte[0])
 define	L_MD_UPTE_1		offsetof(struct lwp, l_md.md_upte[1])

Index: src/sys/arch/mips/mips/locore.S
diff -u src/sys/arch/mips/mips/locore.S:1.167.38.8 src/sys/arch/mips/mips/locore.S:1.167.38.9
--- src/sys/arch/mips/mips/locore.S:1.167.38.8	Mon Feb  1 04:16:19 2010
+++ src/sys/arch/mips/mips/locore.S	Fri Feb  5 07:36:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.167.38.8 2010/02/01 04:16:19 matt Exp $	*/
+/*	$NetBSD: locore.S,v 1.167.38.9 2010/02/05 07:36:50 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -201,10 +201,14 @@
  * Switch to new context.
  */
 	PTR_L	t2, _C_LABEL(mips_locoresw) + MIPSX_CPU_SWITCH_RESUME
-	move	a0, MIPS_CURLWP
 	jal	ra, t2
-	nop
+	 move	a0, MIPS_CURLWP
+#ifdef MULTIPROCESSOR
+	PTR_L	t2, L_CPU(MIPS_CURLWP)
+	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(t2)
+#else
 	PTR_S	MIPS_CURLWP, CPUVAR(CURLWP)
+#endif
 
 	/* Check for restartable atomic sequences (RAS) */
 	PTR_L	t1, L_PROC(MIPS_CURLWP)
@@ -259,6 +263,96 @@
 END(cpu_switchto)
 
 /*
+ * void softint_fast_dispatch(struct lwp *l, int s, void *new_sp);
+ *
+ * called at IPL_HIGH
+ *
+ */
+softint_cleanup:
+	PTR_L	t0, L_CPU(MIPS_CURLWP)
+	INT_L	t1, CPU_INFO_MTX_COUNT(t0)
+	addi	t1, t1, 1
+	INT_S	t1, CPU_INFO_MTX_COUNT(t0)
+	REG_L	ra, CALLFRAME_RA(sp)
+	PTR_S	zero, L_CTXSWITCH(MIPS_CURLWP)
+	j	ra
+	 PTR_ADDU sp, CALLFRAME_SIZ
+/*
+ * Arguments:
+ *	a0	the LWP to switch to
+ *	a1	IPL to execute at
+ *	a2	the new stack.
+ */
+NESTED(softint_fast_dispatch, CALLFRAME_SIZ, ra)
+	PTR_SUBU sp, CALLFRAME_SIZ
+	REG_S	ra, CALLFRAME_RA(sp)		# save return address
+	.mask	0x80000000, -4
+	PTR_L	t0, L_ADDR(MIPS_CURLWP)		# t0 = curlwp->l_addr
+	/*
+	 * Save our state in case softint_dispatch blocks and get switched back
+	 * to.
+ 	 */
+	mfc0	t1, MIPS_COP_0_STATUS
+	PTR_LA	t2, softint_cleanup		# if softint blocks, return here
+	REG_PROLOGUE
+	REG_S	s0, U_PCB_CONTEXT+SF_REG_S0(t0)
+	REG_S	s1, U_PCB_CONTEXT+SF_REG_S1(t0)
+	REG_S	s2, U_PCB_CONTEXT+SF_REG_S2(t0)
+	REG_S	s3, U_PCB_CONTEXT+SF_REG_S3(t0)
+	REG_S	s4, U_PCB_CONTEXT+SF_REG_S4(t0)
+	REG_S	s5, U_PCB_CONTEXT+SF_REG_S5(t0)
+	REG_S	s6, U_PCB_CONTEXT+SF_REG_S6(t0)
+	REG_S	s7, U_PCB_CONTEXT+SF_REG_S7(t0)
+	#REG_S	t8, U_PCB_CONTEXT+SF_REG_T8(t0) # MIPS_CURLWP
+	REG_S	sp, U_PCB_CONTEXT+SF_REG_SP(t0)
+	REG_S	s8, U_PCB_CONTEXT+SF_REG_S8(t0)
+	REG_S	t2, U_PCB_CONTEXT+SF_REG_RA(t0)
+	REG_S	t1, U_PCB_CONTEXT+SF_REG_SR(t0)
+#if defined(__mips_n32) || defined(__mips_n64)
+	REG_S	gp, U_PCB_CONTEXT+SF_REG_GP(t0)
+#endif
+#ifdef IPL_ICU_MASK
+	INT_L	t1, _C_LABEL(md_imask)
+	INT_S	t1, U_PCB_PPL(a2)
+#endif
+	REG_EPILOGUE
+	/*
+	 * Switch to a fast softint thread.  We don't care about its existing
+	 * state and we use a private KSEG0/XKPHYS mapped stack so don't have
+	 * to do TLB manipulation.
+	 */
+	move	s0, MIPS_CURLWP
+	PTR_L	s1, L_CPU(MIPS_CURLWP)			# get curcpu()
+	move	MIPS_CURLWP, a0				# switch to softint lwp
+	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(s1)	#    ...
+	move	s2, sp					# remember sp
+	move	s3, t0					# remember curpcb
+
+	PTR_L	t2, L_ADDR(MIPS_CURLWP)
+	move	a0, s0					# wants the pinned lwp
+	jal	_C_LABEL(softint_dispatch)
+	 PTR_ADDU sp, t2, USPACE - TF_SIZ - CALLFRAME_SIZ
+
+	move	sp, s2					# restore stack
+	move	MIPS_CURLWP, s0				# restore curlwp
+	PTR_S	MIPS_CURLWP, CPU_INFO_CURLWP(s1)	#    ....
+
+	REG_PROLOGUE
+	REG_L	s0, U_PCB_CONTEXT+SF_REG_S0(s3)		# restore the saved
+	REG_L	s1, U_PCB_CONTEXT+SF_REG_S1(s3)		#    registers that we
+	REG_L	s2, U_PCB_CONTEXT+SF_REG_S2(s3)		#    used
+	REG_L	s3, U_PCB_CONTEXT+SF_REG_S3(s3)
+	REG_EPILOGUE
+
+	/*
+	 * Almost everything (all except sp) is restored so we ca retrn.
+	 */
+	REG_L	ra, CALLFRAME_RA(sp)
+	j	ra
+	 PTR_ADDU sp, CALLFRAME_SIZ
+END(softint_fast_dispatch)
+
+/*
  * savectx(struct user *up)
  */
 LEAF(savectx)

Index: src/sys/arch/mips/mips/vm_machdep.c
diff -u src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.9 src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.10
--- src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.9	Mon Feb  1 04:16:20 2010
+++ src/sys/arch/mips/mips/vm_machdep.c	Fri Feb  5 07:36:50 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm_machdep.c,v 1.121.6.1.2.9 2010/02/01 04:16:20 matt Exp $	*/
+/*	$NetBSD: vm_machdep.c,v 1.121.6.1.2.10 2010/02/05 07:36:50 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -80,7 +80,7 @@
 #include "opt_coredump.h"
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.121.6.1.2.9 2010/02/01 04:16:20 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.121.6.1.2.10 2010/02/05 07:36:50 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -207,7 +207,103 @@
 #ifdef IPL_ICU_MASK
 	pcb->pcb_ppl = 0;	/* machine depenedend interrupt mask */
 #endif
-}	
+}
+
+static struct evcnt uarea_remapped = 
+    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "remapped");
+static struct evcnt uarea_reallocated = 
+    EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea", "reallocated");
+EVCNT_ATTACH_STATIC(uarea_remapped);
+EVCNT_ATTACH_STATIC(uarea_reallocated);
+
+void
+cpu_uarea_remap(struct lwp *l)
+{
+	bool uarea_ok;
+	vaddr_t va;
+	paddr_t pa;
+
+	/*
+	 * Grab the starting physical address of the uarea.
+	 */
+	va = (vaddr_t)l->l_addr;
+	if (!pmap_extract(pmap_kernel(), va, &pa))
+		panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va);
+
+	/*
+	 * Check to see if the existing uarea is physically contiguous.
+	 */
+	uarea_ok = true;
+	for (vaddr_t i = PAGE_SIZE; uarea_ok && i < USPACE; i += PAGE_SIZE) {
+		paddr_t pa0;
+		if (!pmap_extract(pmap_kernel(), va + i, &pa0))
+			panic("%s: pmap_extract(%#"PRIxVADDR") failed",
+			    __func__, va+1);
+		uarea_ok = (pa0 - pa == i);
+	}
+
+#ifndef _LP64
+	/*
+	 * If this is a 32bit kernel, it needs to be mappedable via KSEG0
+	 */
+	uarea_ok = uarea_ok && (pa + USPACE - 1 <= MIPS_PHYS_MASK);
+#endif
+	printf("ctx=%#"PRIxVADDR" utf=%p\n", 
+	    (vaddr_t)l->l_addr->u_pcb.pcb_context.val[_L_SP],
+	    l->l_md.md_utf);
+	KASSERT((vaddr_t)l->l_addr->u_pcb.pcb_context.val[_L_SP] == (vaddr_t)l->l_md.md_utf);
+	vaddr_t sp = l->l_addr->u_pcb.pcb_context.val[_L_SP] - (vaddr_t)l->l_addr;
+
+	if (!uarea_ok) {
+		struct pglist pglist;
+#ifdef _LP64
+		const paddr_t high = mips_avail_end;
+#else
+		const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
+#endif
+		int error;
+
+		/*
+		 * Allocate a new physically contiguou uarea which can be
+		 * direct-mapped.
+		 */
+		error = uvm_pglistalloc(USPACE, mips_avail_start, high,
+		    USPACE_ALIGN, 0, &pglist, 1, 1);
+		if (error)
+			panic("softint_init_md: uvm_pglistalloc failed: %d",
+			    error);
+
+		/*
+		 * Get the physical address from the first page.
+		 */
+		pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
+	}
+
+	/*
+	 * Now set the new uarea (if it's different). If l->l_addr was already
+	 * direct mapped address then routine really change anything but that's
+	 * not probably so don't micro optimize for it.
+	 */
+#ifdef _LP64
+	va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
+#else
+	va = MIPS_PHYS_TO_KSEG0(pa);
+#endif
+	if (!uarea_ok) {
+		((struct trapframe *)(va + USPACE))[-1] = *l->l_md.md_utf;
+		*(struct pcb *)va = l->l_addr->u_pcb;
+		/*
+		 * Discard the old uarea.
+		 */
+		uvm_uarea_free(USER_TO_UAREA(l->l_addr), curcpu());
+		uarea_reallocated.ev_count++;
+	}
+
+	l->l_addr = (struct user *)va;
+	l->l_addr->u_pcb.pcb_context.val[_L_SP] = sp + va;
+	l->l_md.md_utf = (struct trapframe *)((char *)l->l_addr + USPACE) - 1;
+	uarea_remapped.ev_count++;
+}
 
 /*
  * Finish a swapin operation.

Added files:

Index: src/sys/arch/mips/mips/mips_softint.c
diff -u /dev/null src/sys/arch/mips/mips/mips_softint.c:1.1.2.1
--- /dev/null	Fri Feb  5 07:36:51 2010
+++ src/sys/arch/mips/mips/mips_softint.c	Fri Feb  5 07:36:50 2010
@@ -0,0 +1,142 @@
+/*-
+ * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <m...@3am-software.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
+
+__KERNEL_RCSID(0, "$NetBSD: mips_softint.c,v 1.1.2.1 2010/02/05 07:36:50 matt Exp $");
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/lwp.h>
+#include <sys/user.h>
+#include <sys/intr.h>
+#include <sys/atomic.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <mips/locore.h>
+
+#define	SOFTINT_BIO_MASK	(1 << SOFTINT_BIO)
+#define	SOFTINT_CLOCK_MASK	(1 << SOFTINT_CLOCK)
+#define	SOFTINT_NET_MASK	(1 << SOFTINT_NET)
+#define	SOFTINT_SERIAL_MASK	(1 << SOFTINT_SERIAL)
+
+/*
+ * This is more complex than usual since we want the fast softint threads
+ * to have stacks that are direct-mapped and avoid the TLB.  This means we
+ * can avoid changing the TLB entry that maps the current lwp's kernel stack.
+ *
+ * This is a very big win so it's worth going through this effort.
+ */
+void
+softint_init_md(lwp_t *l, u_int si_level, uintptr_t *machdep)
+{
+	struct cpu_info * const ci = l->l_cpu;
+
+	cpu_uarea_remap(l);	/* switch to direct mapped stack */
+
+	*machdep = si_level;
+	ci->ci_softlwps[si_level] = l;
+}
+
+void
+softint_trigger(uintptr_t si)
+{
+	/*
+	 * Set the appropriate cause bit.  serial & net are 1 bit higher than
+	 * clock & bio.  This avoid a branch and is fast.
+	 */
+	const uint32_t int_mask = MIPS_SOFT_INT_MASK_0
+	    << (((SOFTINT_NET_MASK | SOFTINT_SERIAL_MASK) >> si) & 1);
+
+	/*
+	 * Use atomic_or since it's faster than splhigh/splx
+	 */
+	atomic_or_uint(&curcpu()->ci_softints, 1 << si);
+
+	/*
+	 * Now update cause.
+	 */
+	_setsoftintr(int_mask);
+}
+
+#define	SOFTINT_MASK_1	(SOFTINT_SERIAL_MASK | SOFTINT_NET_MASK)
+#define	SOFTINT_MASK_0	(SOFTINT_CLOCK_MASK  | SOFTINT_BIO_MASK)
+
+/*
+ * Helper macro.
+ *
+ * Dispatch a softint and then restart the loop so that higher
+ * priority softints are always done first.
+ */
+#define	DOSOFTINT(level) \
+	if (softints & SOFTINT_##level## _MASK) { \
+		ci->ci_softints ^= SOFTINT_##level##_MASK; \
+		softint_fast_dispatch(ci->ci_softlwps[SOFTINT_##level], \
+		    IPL_SOFT##level); \
+		continue; \
+	}
+
+void
+softint_process(uint32_t ipending)
+{
+	struct cpu_info * const ci = curcpu();
+	u_int mask;
+	int s;
+
+	KASSERT((ipending & MIPS_SOFT_INT_MASK) != 0);
+	KASSERT((ipending & ~MIPS_SOFT_INT_MASK) == 0);
+
+	if (ipending & MIPS_SOFT_INT_MASK_0) {
+		/*
+		 * Since we run at splhigh, 
+		 */
+		mask = SOFTINT_MASK_1 | SOFTINT_MASK_0;
+		ipending |= MIPS_SOFT_INT_MASK_1;
+	} else {
+		KASSERT(ipending & MIPS_SOFT_INT_MASK_1);
+		mask = SOFTINT_MASK_1;
+	}
+
+	s = splhigh();
+
+	for (;;) {
+		u_int softints = ci->ci_softints & mask;
+		if (softints == 0)
+			break;
+
+		DOSOFTINT(SERIAL);
+		DOSOFTINT(NET);
+		DOSOFTINT(BIO);
+		DOSOFTINT(CLOCK);
+	}
+
+	_clrsoftintr(ipending);
+	splx(s);
+}

Reply via email to