Module Name:    src
Committed By:   matt
Date:           Thu Jan 19 08:05:24 UTC 2012

Modified Files:
        src/sys/arch/mips/rmi [matt-nb5-mips64]: rmixl_cpu.c rmixl_cpuvar.h
            rmixl_fmn.c rmixl_intr.c

Log Message:
Change struct rmixl_cpu_softc to cpu_softc and remove casts.
Fix IPIs.
More FMN cleanup.


To generate a diff of this commit:
cvs rdiff -u -r1.1.2.23 -r1.1.2.24 src/sys/arch/mips/rmi/rmixl_cpu.c
cvs rdiff -u -r1.1.2.4 -r1.1.2.5 src/sys/arch/mips/rmi/rmixl_cpuvar.h
cvs rdiff -u -r1.1.2.9 -r1.1.2.10 src/sys/arch/mips/rmi/rmixl_fmn.c
cvs rdiff -u -r1.1.2.34 -r1.1.2.35 src/sys/arch/mips/rmi/rmixl_intr.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/rmi/rmixl_cpu.c
diff -u src/sys/arch/mips/rmi/rmixl_cpu.c:1.1.2.23 src/sys/arch/mips/rmi/rmixl_cpu.c:1.1.2.24
--- src/sys/arch/mips/rmi/rmixl_cpu.c:1.1.2.23	Wed Jan  4 16:17:53 2012
+++ src/sys/arch/mips/rmi/rmixl_cpu.c	Thu Jan 19 08:05:24 2012
@@ -71,7 +71,7 @@ __KERNEL_RCSID(0, "rmixl_cpu.c,v 1.1.2.2
 
 static int	cpu_rmixl_match(device_t, cfdata_t, void *);
 static void	cpu_rmixl_attach(device_t, device_t, void *);
-static void	cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const);
+static void	cpu_rmixl_attach_primary(struct cpu_softc * const);
 #ifdef NOTYET
 static int	cpu_fmn_intr(void *, rmixl_fmn_rxmsg_t *);
 #endif
@@ -92,7 +92,7 @@ struct cpu_info *
 		rmixl_cpuinfo_print(u_int);
 #endif	/* DEBUG */
 
-CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct rmixl_cpu_softc),
+CFATTACH_DECL_NEW(cpu_rmixl, sizeof(struct cpu_softc),
 	cpu_rmixl_match, cpu_rmixl_attach, NULL, NULL); 
 
 #ifdef MULTIPROCESSOR
@@ -177,7 +177,7 @@ cpu_rmixl_match(device_t parent, cfdata_
 static void
 cpu_rmixl_attach(device_t parent, device_t self, void *aux)
 {
-	struct rmixl_cpu_softc * const sc = device_private(self);
+	struct cpu_softc * const sc = device_private(self);
 	struct cpu_info *ci = NULL;
 	static bool once = false;
 	extern void rmixl_spl_init_cpu(void);
@@ -252,7 +252,7 @@ cpu_rmixl_attach(device_t parent, device
  * attach the primary processor
  */
 static void
-cpu_rmixl_attach_primary(struct rmixl_cpu_softc * const sc)
+cpu_rmixl_attach_primary(struct cpu_softc * const sc)
 {
 	struct cpu_info *ci = sc->sc_ci;
 	uint32_t ebase;
@@ -286,8 +286,8 @@ cpu_rmixl_attach_primary(struct rmixl_cp
 void
 cpu_rmixl_run(struct cpu_info *ci)
 {
-	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
-	rmixl_fmn_init_thead();
+	struct cpu_softc * const sc = ci->ci_softc;
+	rmixl_fmn_init_thread();
 	cpucore_rmixl_run(device_parent(sc->sc_dev));
 }
 
@@ -299,7 +299,7 @@ cpu_rmixl_run(struct cpu_info *ci)
 void
 cpu_rmixl_hatch(struct cpu_info *ci)
 {
-	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
+	struct cpu_softc * const sc = ci->ci_softc;
 	extern void rmixl_spl_init_cpu(void);
 
 	rmixl_spl_init_cpu();	/* spl initialization for this CPU */
@@ -395,7 +395,6 @@ cpu_setup_trampoline_callback(struct cpu
 }
 #endif	/* MULTIPROCESSOR */
 
-
 #ifdef DEBUG
 void
 rmixl_cpu_data_print(struct cpu_data *dp)

Index: src/sys/arch/mips/rmi/rmixl_cpuvar.h
diff -u src/sys/arch/mips/rmi/rmixl_cpuvar.h:1.1.2.4 src/sys/arch/mips/rmi/rmixl_cpuvar.h:1.1.2.5
--- src/sys/arch/mips/rmi/rmixl_cpuvar.h:1.1.2.4	Sat Dec 24 01:57:54 2011
+++ src/sys/arch/mips/rmi/rmixl_cpuvar.h	Thu Jan 19 08:05:24 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: rmixl_cpuvar.h,v 1.1.2.4 2011/12/24 01:57:54 matt Exp $	*/
+/*	$NetBSD: rmixl_cpuvar.h,v 1.1.2.5 2012/01/19 08:05:24 matt Exp $	*/
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -31,17 +31,24 @@
 #ifndef _ARCH_MIPS_RMI_RMIXL_CPUVAR_H_
 #define _ARCH_MIPS_RMI_RMIXL_CPUVAR_H_
 
+#include <mips/rmi/rmixl_fmnvar.h>
+
 struct rmixl_cpu_trampoline_args {
 	uint64_t	ta_sp;
 	uint64_t	ta_lwp;
 	uint64_t	ta_cpuinfo;
 };
 
-struct rmixl_cpu_softc {
+struct cpu_softc {
 	device_t sc_dev;
 	struct cpu_info *sc_ci;
+
+	void *sc_fmn_si;		/* fast messaging network softint */
+
 	struct evcnt sc_vec_evcnts[64];
 	struct evcnt sc_irt_evcnts[160];
+	struct evcnt sc_fmn_stid_evcnts[RMIXL_FMN_NSTID];
+	struct evcnt sc_fmn_cpu_evcnts[8];
 };
 
 #endif	/* _ARCH_MIPS_RMI_RMIXL_CPUVAR_H_ */

Index: src/sys/arch/mips/rmi/rmixl_fmn.c
diff -u src/sys/arch/mips/rmi/rmixl_fmn.c:1.1.2.9 src/sys/arch/mips/rmi/rmixl_fmn.c:1.1.2.10
--- src/sys/arch/mips/rmi/rmixl_fmn.c:1.1.2.9	Wed Jan  4 16:17:53 2012
+++ src/sys/arch/mips/rmi/rmixl_fmn.c	Thu Jan 19 08:05:24 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: rmixl_fmn.c,v 1.1.2.9 2012/01/04 16:17:53 matt Exp $	*/
+/*	$NetBSD: rmixl_fmn.c,v 1.1.2.10 2012/01/19 08:05:24 matt Exp $	*/
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -35,8 +35,8 @@
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/cpu.h>
-#include <sys/percpu.h>
 #include <sys/atomic.h>
+#include <sys/intr.h>
 
 #include <dev/pci/pcidevs.h>
 
@@ -212,9 +212,10 @@ typedef struct fmn_intrhand {
 typedef struct fmn_info {
 	const fmn_queue_id_t *		fmn_qidtab;
 	const fmn_station_info_t *	fmn_stinfo;
-	percpu_t *			fmn_ev_percpu;
+	const char			(*fmn_bucket_names)[12];
 	size_t				fmn_intr_vec;
-	uint32_t			fmn_nstid;
+	uint8_t				fmn_nstid;
+	uint8_t				fmn_nbucket;	// or vc (XLP)
 	volatile uint32_t		fmn_coremask;
 	volatile uint32_t		fmn_nthread;
 
@@ -224,7 +225,6 @@ typedef struct fmn_info {
 	volatile fmn_intrhand_t		fmn_intrhand[RMIXL_FMN_NSTID];
 } fmn_info_t;
 
-
 static fmn_info_t fmn_info = {
 	.fmn_intrhand = {
 		[0 ... RMIXL_FMN_NSTID-1] = {
@@ -234,6 +234,14 @@ static fmn_info_t fmn_info = {
 	},
 };
 
+static char fmn_stid_ev_names[RMIXL_FMN_NSTID][32];
+
+#if (MIPS64_XLR + MIPS64_XLS) > 0
+static const char xlrxls_bucket_names[8][12] = {
+	"bucket 0", "bucket 1", "bucket 2", "bucket 3",
+	"bucket 4", "bucket 5", "bucket 6", "bucket 7",
+};
+#endif /* (MIPS64_XLR + MIPS64_XLS) > 0 */
 
 #ifdef MIPS64_XLP
 static const struct xlp_stid_map {
@@ -293,6 +301,10 @@ static fmn_station_info_t xlp_xxx_stinfo
 	[RMIXLP_FMN_STID_SRIO]    = { "srio" },
 };
 
+static const char xlp_bucket_names[4][12] = {
+	"vc 0", "vc 1", "vc 2", "vc 3",
+};
+
 static void
 fmn_init_xlp_claim_qids(const fmn_station_info_t * const si, size_t stid)
 {
@@ -306,6 +318,8 @@ fmn_init_xlp_claim_qids(const fmn_statio
 static void
 fmn_init_xlp(fmn_info_t *fmn)
 {
+	fmn->fmn_nbucket = 4;	// 4 VCs per thread
+	fmn->fmn_bucket_names = xlp_bucket_names;
 	fmn->fmn_nstid = RMIXLP_FMN_NSTID;
 	fmn->fmn_qidtab = xlp_xxx_qidtab;
 	fmn->fmn_stinfo = xlp_xxx_stinfo;
@@ -516,6 +530,8 @@ fmn_init_xls(fmn_info_t *fmn)
 	for (size_t i = 0; i < __arraycount(xls_stid_map); i++) {
 		const struct xls_stid_map * const map = &xls_stid_map[i];
 		if (map->map_impl == impl) {
+			fmn->fmn_nbucket = 8;	// 4 buckets per core
+			fmn->fmn_bucket_names = xlrxls_bucket_names;
 			fmn->fmn_nstid = RMIXLS_FMN_NSTID;
 			fmn->fmn_qidtab = map->map_qidtab;
 			fmn->fmn_stinfo = map->map_stinfo;
@@ -554,7 +570,7 @@ static const fmn_queue_id_t xlr_xxx_qidt
  * caution:
  * - the XGMII/SPI4 stations si_regbase are 'special'
  * - the RGMII station si_regbase is 'special'
- */ 
+ */
 static const fmn_station_info_t xlr_xxx_stinfo[RMIXLR_FMN_NSTID] = {
 	[RMIXLR_FMN_STID_CORE0]   = { "core0",	  0,   7, 8,  32, 4, 0 },
 	[RMIXLR_FMN_STID_CORE1]   = { "core1",	  8,  15, 8,  32, 4, 0 },
@@ -579,6 +595,8 @@ static const fmn_station_info_t xlr_xxx_
 static void
 fmn_init_xlr(fmn_info_t *fmn)
 {
+	fmn->fmn_nbucket = 8;	// 4 buckets per core
+	fmn->fmn_bucket_names = xlrxls_bucket_names;
 	fmn->fmn_nstid = RMIXLR_FMN_NSTID;
 	fmn->fmn_qidtab = xlr_xxx_qidtab;
 	fmn->fmn_stinfo = xlr_xxx_stinfo;
@@ -614,7 +632,7 @@ fmn_init_noncore_xlrxls(fmn_info_t *fmn)
 /*
  * fmn_init_thread_xlrxls
  *
- *	- configure FMN 
+ *	- configure FMN
  *	- initialize bucket sizes and (minimum) credits for a core
  */
 static void
@@ -702,7 +720,7 @@ fmn_init_thread_xlrxls(fmn_info_t *fmn)
 	FMN_COP2_8SEL_WRITE(RMIXL_COP_2_CREDITS+12,    0);
 	FMN_COP2_8SEL_WRITE(RMIXL_COP_2_CREDITS+13,    0);
 	FMN_COP2_8SEL_WRITE(RMIXL_COP_2_CREDITS+14,    0);
-	FMN_COP2_8SEL_WRITE(RMIXL_COP_2_CREDITS+15,    0); 
+	FMN_COP2_8SEL_WRITE(RMIXL_COP_2_CREDITS+15,    0);
 
 	sts1 = mips_mfc2(RMIXL_COP_2_MSG_STS, 1);
 	KASSERT((sts1 & RMIXL_MSG_STS1_ERRS) == 0);
@@ -711,26 +729,11 @@ fmn_init_thread_xlrxls(fmn_info_t *fmn)
 }
 #endif /* (MIPS64_XLR + MIPS64_XLS) > 0 */
 
-static inline struct evcnt *
-fmn_ev_getref(void)
-{
-	struct evcnt * const ev = percpu_getref(fmn_info.fmn_ev_percpu);
-	KASSERT(ev != NULL);
-
-	return ev;
-}
-
-static inline void
-fmn_ev_putref(struct evcnt *ev)
-{
-	percpu_putref(fmn_info.fmn_ev_percpu);
-}
-
-
 #if (MIPS64_XLP) > 0
 static void	fmn_init_thread_xlp(fmn_info_t *);
 #endif
-static int	fmn_intr_dispatch(void *);
+static int	fmn_intr(void *);
+static void	fmn_softint(void *);
 
 #ifdef FMN_DEBUG
 void	rmixl_fmn_cp2_dump(void);
@@ -743,7 +746,7 @@ void	rmixl_fmn_cc_dump(void);
 void
 rmixl_fmn_init(void)
 {
-	fmn_info_t * const fmn = &fmn_info; 
+	fmn_info_t * const fmn = &fmn_info;
 
 	/*
 	 * do chip-dependent FMN initialization
@@ -768,40 +771,49 @@ rmixl_fmn_init(void)
 		panic("%s: RMI chip type %#x unknown", __func__,
 			cpu_rmixl_chip_type(mips_options.mips_cpu));
 	}
-
-	fmn->fmn_ev_percpu = percpu_alloc(fmn->fmn_nstid*sizeof(struct evcnt));
-	KASSERT(fmn->fmn_ev_percpu != NULL);
 }
 
-/*
- * This must be done in the context of the thread itself.
- */
 void
-rmixl_fmn_init_thread(void)
+rmixl_fmn_cpu_attach(struct cpu_info *ci)
 {
 	fmn_info_t * const fmn = &fmn_info;
-	struct cpu_info * const ci = curcpu();
-	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
+	struct cpu_softc * const sc = ci->ci_softc;
+
+	KASSERT(sc->sc_fmn_si == NULL);
+	sc->sc_fmn_si = softint_establish(SOFTINT_NET, fmn_softint, sc);
 
 	KASSERT(sc->sc_dev != NULL);
 
 	const char * const xname = device_xname(sc->sc_dev);
 	KASSERT(xname != NULL);
 
-	struct evcnt * const ev = fmn_ev_getref();
-	KASSERT(ev != NULL);
+	for (size_t i = 1; i < fmn_info.fmn_nstid; i++) {
+		evcnt_attach_dynamic(&sc->sc_fmn_stid_evcnts[i],
+		    EVCNT_TYPE_MISC, NULL, xname, fmn_stid_ev_names[i]);
+	}
+
+	for (size_t i = 0; i < fmn_info.fmn_nbucket; i++) {
+		evcnt_attach_dynamic(&sc->sc_fmn_cpu_evcnts[i],
+		    EVCNT_TYPE_MISC, NULL, xname, fmn->fmn_bucket_names[i]);
+	}
+}
+
+/*
+ * This must be done in the context of the thread itself.
+ */
+void
+rmixl_fmn_init_thread(void)
+{
+	fmn_info_t * const fmn = &fmn_info;
+	struct cpu_info * const ci = curcpu();
 
 	KASSERT(fmn->fmn_stinfo[0].si_name == NULL);
 	for (size_t i = 1; i < fmn_info.fmn_nstid; i++) {
 		KASSERT(fmn->fmn_stinfo[i].si_name != NULL);
-#if 0
-		evcnt_attach_dynamic(&ev[i], EVCNT_TYPE_INTR, NULL,
-		    xname, fmn->fmn_stinfo[i].si_name);
-#endif
+		snprintf(fmn_stid_ev_names[i], sizeof(fmn_stid_ev_names[i]),
+		    "fmn %s rx msgs", fmn->fmn_stinfo[i].si_name);
 	}
 
-	fmn_ev_putref(ev);
-
 	if (CPU_IS_PRIMARY(ci)) {
 		KASSERT(rmixl_intr_lock != NULL);
 		/*
@@ -810,7 +822,7 @@ rmixl_fmn_init_thread(void)
 		mutex_enter(rmixl_intr_lock);
 		fmn->fmn_intr_vec = rmixl_intr_get_vec(IPL_VM);
 		void * const ih = rmixl_vec_establish(fmn->fmn_intr_vec, NULL,
-		    IPL_VM, fmn_intr_dispatch, NULL, true);
+		    IPL_VM, fmn_intr, NULL, true);
 		if (ih == NULL)
 			panic("%s: rmixl_vec_establish failed", __func__);
 		mutex_exit(rmixl_intr_lock);
@@ -905,33 +917,74 @@ rmixl_fmn_intr_poll(u_int bucket, rmixl_
 	rmixl_cp2_restore(cp0_status);
 }
 
+size_t
+rmixl_fmn_qid_to_stid(size_t qid)
+{
+	return fmn_info.fmn_qidtab[qid];
+}
+
+const char *
+rmixl_fmn_stid_name(size_t stid)
+{
+	KASSERT(stid != 0);
+	KASSERT(stid < fmn_info.fmn_nstid);
+	return fmn_info.fmn_stinfo[stid].si_name;
+}
+
 static int
-fmn_intr_dispatch(void *arg)
+fmn_intr(void *arg)
+{
+	const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
+	struct cpu_softc * const sc = curcpu()->ci_softc;
+
+	softint_schedule(sc->sc_fmn_si);
+	if (!is_xlp_p) {
+		/*
+		 * On the XLR and XLS, we can only stop interrupts on a per
+		 * core basis but then there are no per-thread resources so
+		 * it doesn't really hurt.
+		 */
+		const uint32_t cp0_status = rmixl_cp2_enable();
+		uint32_t msg_cfg = mips_mfc2(RMIXL_COP_2_MSG_CFG, 0);
+		msg_cfg &= ~(RMIXL_MSG_CFG0_EIE|RMIXL_MSG_CFG0_WIE);
+		mips_mtc2(RMIXL_COP_2_MSG_CFG, 0, msg_cfg);
+		rmixl_cp2_restore(cp0_status);
+	}
+	return 1;
+}
+
+static void
+fmn_softint(void *arg)
 {
 	const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
 	fmn_info_t * const fmn = &fmn_info;
-	uint32_t mask;
-	int rv = 0;
+	struct cpu_softc * const sc = curcpu()->ci_softc;
+	uint32_t mask = 0;
+	uint32_t processed = 0;
 
 	const uint32_t cp0_status = rmixl_cp2_enable();
 
-	if (is_xlp_p) {
-		uint32_t msg_rx_sts = mips_mfc2(RMIXLP_COP_2_MSG_RX_STS, 0);
-		mask = __SHIFTOUT(~msg_rx_sts, RMIXLP_MSG_RX_STS_RXQVCE);
-	} else {
-		uint32_t msg_sts = mips_mfc2(RMIXL_COP_2_MSG_STS, 0);
-		mask = __SHIFTOUT(~msg_sts, RMIXL_MSG_STS0_RFBE);
-	}
+	for (;;) {
+		if (mask == 0) {
+			if (is_xlp_p) {
+				mask = __SHIFTOUT(
+				    ~mips_mfc2(RMIXLP_COP_2_MSG_RX_STS,0),
+				    RMIXLP_MSG_RX_STS_RXQVCE);
+			} else {
+				mask = __SHIFTOUT(
+				    ~mips_mfc2(RMIXL_COP_2_MSG_STS,0),
+				    RMIXL_MSG_STS0_RFBE);
+			}
+			if (mask == 0)
+				break;
+		}
 
-	if (mask != 0) {
 		DPRINTF(("%s: non-empty q-mask %#x\n", __func__, mask));
-	}
 
-	struct evcnt * const ev = fmn_ev_getref(); // acquire per-cpu counters
+		const u_int rxq = ffs(mask) - 1;
+		processed = (1 << rxq);
 
-	for (u_int rxq=0; mask != 0; rxq++, mask >>= 1) {
-		if ((mask & 1) == 0)
-			continue;
+		sc->sc_fmn_cpu_evcnts[rxq].ev_count++;
 
 		rmixl_fmn_rxmsg_t rxmsg;
 		if (!rmixl_fmn_msg_recv(rxq, &rxmsg))
@@ -939,20 +992,29 @@ fmn_intr_dispatch(void *arg)
 
 		const size_t txstid = fmn->fmn_qidtab[rxmsg.rxsid];
 		volatile fmn_intrhand_t * const ih = &fmn->fmn_intrhand[txstid];
+
 		membar_consumer(); // make sure arg is loaded before func
 		void * const ih_arg = ih->ih_arg;
+
 		membar_consumer(); // make sure arg is loaded before func
 		rmixl_fmn_intr_handler_t ih_func = ih->ih_func;
+
 		if ((*ih_func)(ih_arg, &rxmsg) != 0)
-			ev[txstid].ev_count++;
-		rv = 1;
+			sc->sc_fmn_stid_evcnts[txstid].ev_count++;
 	}
 
-	fmn_ev_putref(ev);			// release per-cpu counters
+	if (is_xlp_p) {
+		/*
+		 * We need to set VC_PEND again so interrupts can get posted
+		 * for the VCs we processed.  This register is global for all
+		 * threads on a core so only ACK the VCs we processed.
+		 */
+		uint32_t msg_sts1 = mips_mfc2(RMIXLP_COP_2_MSG_STS1, 0);
+		msg_sts1 |= __SHIFTOUT(processed, RMIXLP_MSG_STS1_VC_PEND);
+		mips_mtc2(RMIXLP_COP_2_MSG_STS1, 0, msg_sts1);
+	}
 
 	rmixl_cp2_restore(cp0_status);
-
-	return rv;
 }
 
 static bool
@@ -977,7 +1039,7 @@ rmixl_fmn_msg_send(u_int size, u_int cod
 {
 	const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
 	bool rv = false;	/* assume failure */
-    
+
 	KASSERT(1 <= size && size <= 4);
 	KASSERT(code < 0x100);
 	KASSERT(dest_id < (is_xlp_p ? 0x1000 : 0x80));
@@ -1107,7 +1169,7 @@ fmn_msgld_rmixl(u_int rxq, rmixl_fmn_rxm
 /*
  * rmixl_fmn_msg_recv
  *
- *	- assume cp2 access is already enabled 
+ *	- assume cp2 access is already enabled
  */
 bool
 rmixl_fmn_msg_recv(u_int rxq, rmixl_fmn_rxmsg_t *rxmsg)

Index: src/sys/arch/mips/rmi/rmixl_intr.c
diff -u src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.34 src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.35
--- src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.34	Wed Jan  4 16:17:53 2012
+++ src/sys/arch/mips/rmi/rmixl_intr.c	Thu Jan 19 08:05:24 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: rmixl_intr.c,v 1.1.2.34 2012/01/04 16:17:53 matt Exp $	*/
+/*	$NetBSD: rmixl_intr.c,v 1.1.2.35 2012/01/19 08:05:24 matt Exp $	*/
 
 /*-
  * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
@@ -64,7 +64,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.34 2012/01/04 16:17:53 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.35 2012/01/19 08:05:24 matt Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -724,6 +724,7 @@ rmixl_intrvec_t rmixl_intrvec[NINTRVECS]
 	[0 ... NINTRVECS-1] = {
 		.iv_intrhand = {
 			.ih_func = rmixl_stray_intr,
+			.ih_arg = rmixl_stray_intr,
 		},
 	},
 };
@@ -731,6 +732,7 @@ rmixl_intrvec_t rmixl_intrvec[NINTRVECS]
 rmixl_intrhand_t rmixl_irt_intrhands[RMIXL_NIRTS] = {
 	[0 ... RMIXL_NIRTS-1] = {
 		.ih_func = rmixl_stray_intr,
+		.ih_arg = rmixl_stray_intr,
 	},
 };
 static u_int rmixl_nirts;
@@ -944,7 +946,7 @@ rmixl_intr_init_ipi(void)
 void
 rmixl_intr_init_cpu(struct cpu_info *ci)
 {
-	struct rmixl_cpu_softc * const sc = (void *)ci->ci_softc;
+	struct cpu_softc * const sc = ci->ci_softc;
 	const char * xname = device_xname(sc->sc_dev);
 
 	KASSERT(sc != NULL);
@@ -1304,6 +1306,7 @@ rmixl_vec_disestablish(void *cookie)
 	KASSERT(mutex_owned(rmixl_intr_lock));
 	KASSERT(vec < NINTRVECS);
 	KASSERT(ih->ih_func != rmixl_stray_intr);
+	KASSERT(ih->ih_arg != rmixl_stray_intr);
 	KASSERT(IPL_VM <= iv->iv_ipl && iv->iv_ipl <= IPL_HIGH);
 
 	LIST_REMOVE(ih, ih_link);
@@ -1318,7 +1321,7 @@ rmixl_vec_disestablish(void *cookie)
 
 	ih->ih_vec = 0;
 	ih->ih_mpsafe = false;
-	ih->ih_arg = NULL;
+	ih->ih_arg = rmixl_stray_intr;
 
 	/*
 	 * If this vector isn't servicing any interrupts, then check to
@@ -1370,7 +1373,7 @@ rmixl_intr_disestablish(void *cookie)
 void
 evbmips_iointr(int ipl, vaddr_t pc, uint32_t pending)
 {
-	struct rmixl_cpu_softc * const sc = (void *)curcpu()->ci_softc;
+	struct cpu_softc * const sc = curcpu()->ci_softc;
 	const bool is_xlp_p = cpu_rmixlp(mips_options.mips_cpu);
 
 	DPRINTF(("%s: cpu%u: ipl %d, pc %#"PRIxVADDR", pending %#x\n",
@@ -1462,13 +1465,13 @@ rmixl_send_ipi(struct cpu_info *ci, int 
 	KASSERT(tag >= 0 && tag < NIPIS);
 
 	if (is_xlp_p) {
-		r = RMXLP_PIC_IPI_CTRL_MAKE(0, __BIT(cpuid & 15),
-		   RMIXL_INTERVEC_IPI + tag);
+		r = RMIXLP_PIC_IPI_CTRL_MAKE(0, __BIT(cpuid & 15),
+		   RMIXL_INTRVEC_IPI + tag);
 	} else {
 		const uint32_t core = (uint32_t)(cpuid >> 2);
 		const uint32_t thread = (uint32_t)(cpuid & __BITS(1,0));
-		r = RMXLP_PIC_IPI_CTRL_MAKE(0, core, thread,
-		   RMIXL_INTERVEC_IPI + tag);
+		r = RMIXL_PIC_IPIBASE_MAKE(0, core, thread,
+		   RMIXL_INTRVEC_IPI + tag);
 	}
 
 	mutex_enter(rmixl_ipi_lock);

Reply via email to