Module Name: src Committed By: cliff Date: Thu Jun 10 00:41:43 UTC 2010
Modified Files: src/sys/arch/mips/rmi [matt-nb5-mips64]: rmixl_intr.c Log Message: - remove rmixl_spl_init_cpu calls from rmixl_intr_init_cpu because rmixl_intr_init_cpu only runs on cpu#0. rmixl_spl_init_cpu calls are now done in cpu_rmixl_atach (for cpu#0) or cpu_rmixl_hatch (for other cpus). - in evbmips_iointr, be sure to mask out count/compare interrupt along with softints (these are handled elsewhere) and improve coments - fix bug in rmixl_send_ipi: 'cpus_running' is bit-indexed by ci_index, not ci_cpuid To generate a diff of this commit: cvs rdiff -u -r1.1.2.21 -r1.1.2.22 src/sys/arch/mips/rmi/rmixl_intr.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/mips/rmi/rmixl_intr.c diff -u src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.21 src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.22 --- src/sys/arch/mips/rmi/rmixl_intr.c:1.1.2.21 Fri May 28 22:14:53 2010 +++ src/sys/arch/mips/rmi/rmixl_intr.c Thu Jun 10 00:41:43 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: rmixl_intr.c,v 1.1.2.21 2010/05/28 22:14:53 cliff Exp $ */ +/* $NetBSD: rmixl_intr.c,v 1.1.2.22 2010/06/10 00:41:43 cliff Exp $ */ /*- * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. @@ -64,7 +64,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.21 2010/05/28 22:14:53 cliff Exp $"); +__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.1.2.22 2010/06/10 00:41:43 cliff Exp $"); #include "opt_ddb.h" #define __INTR_PRIVATE @@ -474,12 +474,9 @@ rmixl_intr_init_cpu(struct cpu_info *ci) { struct rmixl_cpu_softc *sc = (void *)ci->ci_softc; - extern void rmixl_spl_init_cpu(void); KASSERT(sc != NULL); - rmixl_spl_init_cpu(); - for (int vec=0; vec < NINTRVECS; vec++) evcnt_attach_dynamic(&sc->sc_vec_evcnts[vec], EVCNT_TYPE_INTR, NULL, @@ -869,28 +866,39 @@ int vec; asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr)); + asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); #ifdef IOINTR_DEBUG - asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); printf("%s: eirr %#"PRIx64", eimr %#"PRIx64", mask %#"PRIx64"\n", __func__, eirr, eimr, ipl_eimr_map[ipl-1]); #endif /* IOINTR_DEBUG */ + /* + * reduce eirr to + * - ints that are enabled at or below this ipl + * - exclude count/compare clock and soft ints + * they are handled elsewhere + */ eirr &= ipl_eimr_map[ipl-1]; - eirr &= ~ipl_eimr_map[ipl]; /* mask off higher ints */ - eirr &= ~(MIPS_SOFT_INT_MASK >> 8); /* mask off soft ints */ + eirr &= ~ipl_eimr_map[ipl]; + eirr &= ~((MIPS_INT_MASK_5 | MIPS_SOFT_INT_MASK) >> 8); if (eirr == 0) break; vec = 63 - dclz(eirr); ih = &rmixl_intrhand[vec]; - KASSERT (ih->ih_ipl == ipl); - - asm volatile("dmfc0 %0, $9, 7;" : "=r"(eimr)); - asm volatile("dmtc0 $0, $9, 7;"); vecbit = 1ULL << vec; + KASSERT (ih->ih_ipl == ipl); KASSERT ((vecbit & eimr) == 0); KASSERT ((vecbit & RMIXL_EIRR_PRESERVE_MASK) == 0); + + /* + * ack in EIRR the irq we are about to handle + * disable all interrupt to prevent a race that would allow + * e.g. softints set from a higher interrupt getting + * clobbered by the EIRR read-modify-write + */ + asm volatile("dmtc0 $0, $9, 7;"); asm volatile("dmfc0 %0, $9, 6;" : "=r"(eirr)); eirr &= RMIXL_EIRR_PRESERVE_MASK; eirr |= vecbit; @@ -929,7 +937,7 @@ uint32_t r; extern volatile u_long cpus_running; - if ((cpus_running & 1 << ci->ci_cpuid) == 0) + if ((cpus_running & 1 << ci->ci_index) == 0) return -1; KASSERT(tag < NIPIS);