Module Name: src Committed By: matt Date: Wed Jun 10 22:31:00 UTC 2015
Modified Files: src/sys/arch/evbmips/cavium: machdep.c src/sys/arch/mips/cavium: octeon_cpunode.c octeonvar.h src/sys/arch/mips/include: cpu.h pmap.h types.h src/sys/arch/mips/mips: cpu_subr.c ipifuncs.c locore_octeon.S pmap.c pmap_tlb.c src/sys/arch/mips/rmi: rmixl_cpu.c rmixl_intr.c Removed Files: src/sys/arch/mips/include: cpuset.h Log Message: Transition from __cpuset_t to kcpuset_t *. This brings the local pmap one step closer to uvm/pmap, its eventual replacement. Tested on ERLITE MP kernel. To generate a diff of this commit: cvs rdiff -u -r1.4 -r1.5 src/sys/arch/evbmips/cavium/machdep.c cvs rdiff -u -r1.4 -r1.5 src/sys/arch/mips/cavium/octeon_cpunode.c cvs rdiff -u -r1.3 -r1.4 src/sys/arch/mips/cavium/octeonvar.h cvs rdiff -u -r1.115 -r1.116 src/sys/arch/mips/include/cpu.h cvs rdiff -u -r1.3 -r0 src/sys/arch/mips/include/cpuset.h cvs rdiff -u -r1.64 -r1.65 src/sys/arch/mips/include/pmap.h cvs rdiff -u -r1.56 -r1.57 src/sys/arch/mips/include/types.h cvs rdiff -u -r1.23 -r1.24 src/sys/arch/mips/mips/cpu_subr.c cvs rdiff -u -r1.9 -r1.10 src/sys/arch/mips/mips/ipifuncs.c cvs rdiff -u -r1.6 -r1.7 src/sys/arch/mips/mips/locore_octeon.S cvs rdiff -u -r1.214 -r1.215 src/sys/arch/mips/mips/pmap.c cvs rdiff -u -r1.8 -r1.9 src/sys/arch/mips/mips/pmap_tlb.c cvs rdiff -u -r1.7 -r1.8 src/sys/arch/mips/rmi/rmixl_cpu.c cvs rdiff -u -r1.8 -r1.9 src/sys/arch/mips/rmi/rmixl_intr.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/evbmips/cavium/machdep.c diff -u src/sys/arch/evbmips/cavium/machdep.c:1.4 src/sys/arch/evbmips/cavium/machdep.c:1.5 --- src/sys/arch/evbmips/cavium/machdep.c:1.4 Thu Jun 4 05:21:09 2015 +++ src/sys/arch/evbmips/cavium/machdep.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.4 2015/06/04 05:21:09 matt Exp $ */ +/* $NetBSD: machdep.c,v 1.5 2015/06/10 22:31:00 matt Exp $ */ /* * Copyright 2001, 2002 Wasabi Systems, Inc. @@ -111,8 +111,10 @@ * from: Utah Hdr: machdep.c 1.63 91/04/24 */ +#include "opt_multiprocessor.h" + #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.4 2015/06/04 05:21:09 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.5 2015/06/10 22:31:00 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -172,8 +174,6 @@ int netboot; phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; int mem_cluster_cnt; - -void configure(void); void mach_init(uint64_t, uint64_t, uint64_t, uint64_t); struct octeon_config octeon_configuration; @@ -233,6 +233,7 @@ mach_init(uint64_t arg0, uint64_t arg1, mips_init_lwp0_uarea(); boothowto = RB_AUTOBOOT; + boothowto |= AB_VERBOSE; #if defined(DDB) if (boothowto & RB_KDB) @@ -266,7 +267,7 @@ mach_init_vector(void) { /* Make sure exception base at 0 (MIPS_COP_0_EBASE) */ - asm volatile("mtc0 %0, $15, 1" : : "r"(0x80000000) ); + __asm __volatile("mtc0 %0, $15, 1" : : "r"(0x80000000) ); /* * Set up the exception vectors and CPU-specific function @@ -275,11 +276,7 @@ mach_init_vector(void) * first printf() after that is called). * Also clears the I+D caches. */ -#if MULTIPROCESSOR mips_vector_init(NULL, true); -#else - mips_vector_init(NULL, false); -#endif } void @@ -380,6 +377,11 @@ int waittime = -1; void cpu_startup(void) { +#ifdef MULTIPROCESSOR + // Create a kcpuset so we can see on which CPUs the kernel was started. + kcpuset_create(&cpus_booted, true); +#endif + /* * Do the common startup items. */ Index: src/sys/arch/mips/cavium/octeon_cpunode.c diff -u src/sys/arch/mips/cavium/octeon_cpunode.c:1.4 src/sys/arch/mips/cavium/octeon_cpunode.c:1.5 --- src/sys/arch/mips/cavium/octeon_cpunode.c:1.4 Tue Jun 9 12:10:08 2015 +++ src/sys/arch/mips/cavium/octeon_cpunode.c Wed Jun 10 22:31:00 2015 @@ -40,6 +40,7 @@ __KERNEL_RCSID(0, "$NetBSD"); #include <sys/device.h> #include <sys/lwp.h> #include <sys/cpu.h> +#include <sys/atomic.h> #include <sys/wdog.h> #include <uvm/uvm.h> @@ -47,7 +48,6 @@ __KERNEL_RCSID(0, "$NetBSD"); #include <dev/sysmon/sysmonvar.h> #include <mips/cache.h> -#include <mips/cpuset.h> #include <mips/mips_opcode.h> #include <mips/mips3_clock.h> @@ -78,7 +78,7 @@ CFATTACH_DECL_NEW(cpunode, sizeof(struct CFATTACH_DECL_NEW(cpu_cpunode, 0, cpu_cpunode_match, cpu_cpunode_attach, NULL, NULL); -volatile __cpuset_t cpus_booted = 1; +kcpuset_t *cpus_booted; void octeon_reset_vector(void); @@ -123,7 +123,9 @@ cpunode_mainbus_attach(device_t parent, if (cvmctl & CP0_CVMCTL_REPUN) aprint_normal(", unaligned-access ok"); #ifdef MULTIPROCESSOR - aprint_normal(", booted %#" PRIx64, cpus_booted); + uint32_t booted[1]; + kcpuset_export_u32(cpus_booted, booted, sizeof(booted)); + aprint_normal(", booted %#" PRIx32, booted[0]); #endif aprint_normal("\n"); @@ -290,7 +292,7 @@ cpu_cpunode_attach(device_t parent, devi } #ifdef MULTIPROCESSOR KASSERTMSG(cpunum == 1, "cpunum %d", cpunum); - if (!CPUSET_HAS_P(cpus_booted, cpunum)) { + if (!kcpuset_isset(cpus_booted, cpunum)) { aprint_naive(" disabled\n"); aprint_normal(" disabled (unresponsive)\n"); return; @@ -303,10 +305,10 @@ cpu_cpunode_attach(device_t parent, devi cpu_cpunode_attach_common(self, ci); KASSERT(ci->ci_data.cpu_idlelwp != NULL); - for (int i = 0; i < 100 && !CPUSET_HAS_P(cpus_hatched, cpunum); i++) { + for (int i = 0; i < 100 && !kcpuset_isset(cpus_hatched, cpunum); i++) { delay(10000); } - if (!CPUSET_HAS_P(cpus_hatched, cpunum)) { + if (!kcpuset_isset(cpus_hatched, cpunum)) { #ifdef DDB aprint_verbose_dev(self, "hatch failed ci=%p flags=%#"PRIx64"\n", ci, ci->ci_flags); cpu_Debugger(); Index: src/sys/arch/mips/cavium/octeonvar.h diff -u src/sys/arch/mips/cavium/octeonvar.h:1.3 src/sys/arch/mips/cavium/octeonvar.h:1.4 --- src/sys/arch/mips/cavium/octeonvar.h:1.3 Sat Jun 6 20:52:16 2015 +++ src/sys/arch/mips/cavium/octeonvar.h Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: octeonvar.h,v 1.3 2015/06/06 20:52:16 matt Exp $ */ +/* $NetBSD: octeonvar.h,v 1.4 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 2001 The NetBSD Foundation, Inc. @@ -34,6 +34,7 @@ #include <sys/bus.h> #include <sys/evcnt.h> +#include <sys/kcpuset.h> #include <mips/locore.h> #include <dev/pci/pcivar.h> @@ -224,6 +225,7 @@ struct octeon_fau_map { #ifdef _KERNEL extern struct octeon_config octeon_configuration; #ifdef MULTIPROCESSOR +extern kcpuset_t *cpus_booted; extern struct cpu_softc octeon_cpu1_softc; #endif Index: src/sys/arch/mips/include/cpu.h diff -u src/sys/arch/mips/include/cpu.h:1.115 src/sys/arch/mips/include/cpu.h:1.116 --- src/sys/arch/mips/include/cpu.h:1.115 Sun Jun 7 06:07:49 2015 +++ src/sys/arch/mips/include/cpu.h Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu.h,v 1.115 2015/06/07 06:07:49 matt Exp $ */ +/* $NetBSD: cpu.h,v 1.116 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -55,6 +55,7 @@ #include <sys/cpu_data.h> #include <sys/device_if.h> #include <sys/evcnt.h> +#include <sys/kcpuset.h> typedef struct cpu_watchpoint { register_t cw_addr; @@ -460,9 +461,9 @@ extern struct mips_options mips_options; void cpu_broadcast_ipi(int); /* - * Send an inter-processor interupt to CPUs in cpuset (excludes curcpu()) + * Send an inter-processor interupt to CPUs in kcpuset (excludes curcpu()) */ -void cpu_multicast_ipi(__cpuset_t, int); +void cpu_multicast_ipi(const kcpuset_t *, int); /* * Send an inter-processor interupt to another CPU. @@ -560,16 +561,16 @@ void cpu_halt(void); void cpu_halt_others(void); void cpu_pause(struct reg *); void cpu_pause_others(void); -void cpu_resume(int); +void cpu_resume(cpuid_t); void cpu_resume_others(void); -int cpu_is_paused(int); +bool cpu_is_paused(cpuid_t); void cpu_debug_dump(void); -extern volatile __cpuset_t cpus_running; -extern volatile __cpuset_t cpus_hatched; -extern volatile __cpuset_t cpus_paused; -extern volatile __cpuset_t cpus_resumed; -extern volatile __cpuset_t cpus_halted; +extern kcpuset_t *cpus_running; +extern kcpuset_t *cpus_hatched; +extern kcpuset_t *cpus_paused; +extern kcpuset_t *cpus_resumed; +extern kcpuset_t *cpus_halted; #endif /* copy.S */ Index: src/sys/arch/mips/include/pmap.h diff -u src/sys/arch/mips/include/pmap.h:1.64 src/sys/arch/mips/include/pmap.h:1.65 --- src/sys/arch/mips/include/pmap.h:1.64 Sun Jun 7 06:07:49 2015 +++ src/sys/arch/mips/include/pmap.h Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.64 2015/06/07 06:07:49 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.65 2015/06/10 22:31:00 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -79,9 +79,9 @@ #endif #include <sys/evcnt.h> +#include <sys/kcpuset.h> #include <mips/cpuregs.h> /* for KSEG0 below */ -//#include <mips/pte.h> /* * The user address space is 2Gb (0x0 - 0x80000000). @@ -174,8 +174,8 @@ struct pmap_asid_info { */ struct pmap { #ifdef MULTIPROCESSOR - volatile uint32_t pm_active; /* pmap was active on ... */ - volatile uint32_t pm_onproc; /* pmap is active on ... */ + kcpuset_t *pm_active; /* pmap was active on ... */ + kcpuset_t *pm_onproc; /* pmap is active on ... */ volatile u_int pm_shootdown_pending; #endif union segtab *pm_segtab; /* pointers to pages of PTEs */ @@ -207,7 +207,7 @@ struct pmap_tlb_info { #ifdef MULTIPROCESSOR pmap_t ti_victim; uint32_t ti_synci_page_bitmap; /* page indices needing a syncicache */ - uint32_t ti_cpu_mask; /* bitmask of CPUs sharing this TLB */ + kcpuset_t *ti_kcpuset; /* bitmask of CPUs sharing this TLB */ enum tlb_invalidate_op ti_tlbinvop; u_int ti_index; #define tlbinfo_index(ti) ((ti)->ti_index) @@ -264,7 +264,7 @@ bool pmap_tlb_shootdown_bystanders(pmap_ void pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *); void pmap_tlb_syncicache_ast(struct cpu_info *); void pmap_tlb_syncicache_wanted(struct cpu_info *); -void pmap_tlb_syncicache(vaddr_t, uint32_t); +void pmap_tlb_syncicache(vaddr_t, const kcpuset_t *); #endif void pmap_tlb_info_init(struct pmap_tlb_info *); void pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *); Index: src/sys/arch/mips/include/types.h diff -u src/sys/arch/mips/include/types.h:1.56 src/sys/arch/mips/include/types.h:1.57 --- src/sys/arch/mips/include/types.h:1.56 Sat Jun 6 17:45:49 2015 +++ src/sys/arch/mips/include/types.h Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: types.h,v 1.56 2015/06/06 17:45:49 macallan Exp $ */ +/* $NetBSD: types.h,v 1.57 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -131,15 +131,6 @@ typedef struct label_t { #define PCU_UNIT_COUNT 2 #endif -#if defined(__mips_o32) -typedef __uint32_t __cpuset_t; -#define __CPUSET_MAXNUMCPU 32 -#define PRIxCPUSET PRIx32 -#else -typedef __uint64_t __cpuset_t; -#define __CPUSET_MAXNUMCPU 64 -#define PRIxCPUSET PRIx64 -#endif typedef volatile unsigned int __cpu_simple_lock_t; #define __SIMPLELOCK_LOCKED 1 Index: src/sys/arch/mips/mips/cpu_subr.c diff -u src/sys/arch/mips/mips/cpu_subr.c:1.23 src/sys/arch/mips/mips/cpu_subr.c:1.24 --- src/sys/arch/mips/mips/cpu_subr.c:1.23 Sat Jun 6 21:03:45 2015 +++ src/sys/arch/mips/mips/cpu_subr.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu_subr.c,v 1.23 2015/06/06 21:03:45 matt Exp $ */ +/* $NetBSD: cpu_subr.c,v 1.24 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.23 2015/06/06 21:03:45 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.24 2015/06/10 22:31:00 matt Exp $"); #include "opt_cputype.h" #include "opt_ddb.h" @@ -47,6 +47,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v #include <sys/bitops.h> #include <sys/idle.h> #include <sys/xcall.h> +#include <sys/kernel.h> #include <sys/ipi.h> #include <uvm/uvm.h> @@ -58,7 +59,6 @@ __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v #include <mips/frame.h> #include <mips/userret.h> #include <mips/pte.h> -#include <mips/cpuset.h> #if defined(DDB) || defined(KGDB) #ifdef DDB @@ -106,14 +106,13 @@ struct cpu_info * cpuid_infos[MAXCPUS] = [0] = &cpu_info_store, }; -volatile __cpuset_t cpus_running = 1; -volatile __cpuset_t cpus_hatched = 1; -volatile __cpuset_t cpus_paused = 0; -volatile __cpuset_t cpus_resumed = 0; -volatile __cpuset_t cpus_halted = 0; +kcpuset_t *cpus_halted; +kcpuset_t *cpus_hatched; +kcpuset_t *cpus_paused; +kcpuset_t *cpus_resumed; +kcpuset_t *cpus_running; -static int cpu_ipi_wait(volatile __cpuset_t *, u_long); -static void cpu_ipi_error(const char *, __cpuset_t, __cpuset_t); +static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *); struct cpu_info * cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id, @@ -307,6 +306,21 @@ cpu_startup_common(void) pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); +#ifdef MULTIPROCESSOR + kcpuset_create(&cpus_halted, true); + KASSERT(cpus_halted != NULL); + kcpuset_create(&cpus_hatched, true); + KASSERT(cpus_hatched != NULL); + kcpuset_create(&cpus_paused, true); + KASSERT(cpus_paused != NULL); + kcpuset_create(&cpus_resumed, true); + KASSERT(cpus_resumed != NULL); + kcpuset_create(&cpus_running, true); + KASSERT(cpus_running != NULL); + kcpuset_set(cpus_hatched, cpu_number()); + kcpuset_set(cpus_running, cpu_number()); +#endif + cpu_hwrena_setup(); /* @@ -637,26 +651,26 @@ cpu_intr_p(void) void cpu_broadcast_ipi(int tag) { - (void)cpu_multicast_ipi( - CPUSET_EXCEPT(cpus_running, cpu_index(curcpu())), tag); + // No reason to remove ourselves since multicast_ipi will do that for us + cpu_multicast_ipi(cpus_running, tag); } void -cpu_multicast_ipi(__cpuset_t cpuset, int tag) +cpu_multicast_ipi(const kcpuset_t *kcp, int tag) { - CPU_INFO_ITERATOR cii; - struct cpu_info *ci; + struct cpu_info * const ci = curcpu(); + kcpuset_t *kcp2; - CPUSET_DEL(cpuset, cpu_index(curcpu())); - if (CPUSET_EMPTY_P(cpuset)) + if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) return; - for (CPU_INFO_FOREACH(cii, ci)) { - if (CPUSET_HAS_P(cpuset, cpu_index(ci))) { - CPUSET_DEL(cpuset, cpu_index(ci)); - (void)cpu_send_ipi(ci, tag); - } + kcpuset_clone(&kcp2, kcp); + kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset); + for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) { + kcpuset_clear(kcp2, --cii); + (void)cpu_send_ipi(cpu_lookup(cii), tag); } + kcpuset_destroy(kcp2); } int @@ -667,30 +681,35 @@ cpu_send_ipi(struct cpu_info *ci, int ta } static void -cpu_ipi_error(const char *s, __cpuset_t succeeded, __cpuset_t expected) -{ - CPUSET_SUB(expected, succeeded); - if (!CPUSET_EMPTY_P(expected)) { - printf("Failed to %s:", s); - do { - int index = CPUSET_NEXT(expected); - CPUSET_DEL(expected, index); - printf(" cpu%d", index); - } while (!CPUSET_EMPTY_P(expected)); - printf("\n"); - } -} - -static int -cpu_ipi_wait(volatile __cpuset_t *watchset, u_long mask) +cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted) { - u_long limit = curcpu()->ci_cpu_freq/10;/* some finite amount of time */ + bool done = false; + kcpuset_t *kcp; + kcpuset_create(&kcp, false); + + /* some finite amount of time */ - while (limit--) - if (*watchset == mask) - return 0; /* success */ + for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) { + kcpuset_copy(kcp, watchset); + kcpuset_intersect(kcp, wanted); + done = kcpuset_match(kcp, wanted); + } + + if (!done) { + cpuid_t cii; + kcpuset_copy(kcp, wanted); + kcpuset_remove(kcp, watchset); + if ((cii = kcpuset_ffs(kcp)) != 0) { + printf("Failed to %s:", s); + do { + kcpuset_clear(kcp, --cii); + printf(" cpu%lu", cii); + } while ((cii = kcpuset_ffs(kcp)) != 0); + printf("\n"); + } + } - return 1; /* timed out */ + kcpuset_destroy(kcp); } /* @@ -699,10 +718,10 @@ cpu_ipi_wait(volatile __cpuset_t *watchs void cpu_halt(void) { - int index = cpu_index(curcpu()); + cpuid_t cii = cpu_index(curcpu()); - printf("cpu%d: shutting down\n", index); - CPUSET_ADD(cpus_halted, index); + printf("cpu%lu: shutting down\n", cii); + kcpuset_atomic_set(cpus_halted, cii); spl0(); /* allow interrupts e.g. further ipi ? */ for (;;) ; /* spin */ @@ -715,24 +734,29 @@ cpu_halt(void) void cpu_halt_others(void) { - __cpuset_t cpumask, cpuset; + kcpuset_t *kcp; - CPUSET_ASSIGN(cpuset, cpus_running); - CPUSET_DEL(cpuset, cpu_index(curcpu())); - CPUSET_ASSIGN(cpumask, cpuset); - CPUSET_SUB(cpuset, cpus_halted); - - if (CPUSET_EMPTY_P(cpuset)) + // If we are the only CPU running, there's nothing to do. + if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset)) return; - cpu_multicast_ipi(cpuset, IPI_HALT); - if (cpu_ipi_wait(&cpus_halted, cpumask)) - cpu_ipi_error("halt", cpumask, cpus_halted); + // Get all running CPUs + kcpuset_clone(&kcp, cpus_running); + // Remove ourself + kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset); + // Remove any halted CPUs + kcpuset_remove(kcp, cpus_halted); + // If there are CPUs left, send the IPIs + if (!kcpuset_iszero(kcp)) { + cpu_multicast_ipi(kcp, IPI_HALT); + cpu_ipi_wait("halt", cpus_halted, kcp); + } + kcpuset_destroy(kcp); /* * TBD * Depending on available firmware methods, other cpus will - * either shut down themselfs, or spin and wait for us to + * either shut down themselves, or spin and wait for us to * stop them. */ } @@ -744,23 +768,24 @@ void cpu_pause(struct reg *regsp) { int s = splhigh(); - int index = cpu_index(curcpu()); + cpuid_t cii = cpu_index(curcpu()); - for (;;) { - CPUSET_ADD(cpus_paused, index); + if (__predict_false(cold)) + return; + + do { + kcpuset_atomic_set(cpus_paused, cii); do { ; - } while (CPUSET_HAS_P(cpus_paused, index)); - CPUSET_ADD(cpus_resumed, index); - + } while (kcpuset_isset(cpus_paused, cii)); + kcpuset_atomic_set(cpus_resumed, cii); #if defined(DDB) if (ddb_running_on_this_cpu_p()) cpu_Debugger(); if (ddb_running_on_any_cpu_p()) continue; #endif - break; - } + } while (false); splx(s); } @@ -771,30 +796,41 @@ cpu_pause(struct reg *regsp) void cpu_pause_others(void) { - __cpuset_t cpuset; - - CPUSET_ASSIGN(cpuset, cpus_running); - CPUSET_DEL(cpuset, cpu_index(curcpu())); + struct cpu_info * const ci = curcpu(); + kcpuset_t *kcp; - if (CPUSET_EMPTY_P(cpuset)) + if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) return; - cpu_multicast_ipi(cpuset, IPI_SUSPEND); - if (cpu_ipi_wait(&cpus_paused, cpuset)) - cpu_ipi_error("pause", cpus_paused, cpuset); + kcpuset_clone(&kcp, cpus_running); + kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset); + kcpuset_remove(kcp, cpus_paused); + + cpu_broadcast_ipi(IPI_SUSPEND); + cpu_ipi_wait("pause", cpus_paused, kcp); + + kcpuset_destroy(kcp); } /* * Resume a single cpu */ void -cpu_resume(int index) +cpu_resume(cpuid_t cii) { - CPUSET_CLEAR(cpus_resumed); - CPUSET_DEL(cpus_paused, index); + kcpuset_t *kcp; + + if (__predict_false(cold)) + return; + + kcpuset_create(&kcp, true); + kcpuset_set(kcp, cii); + kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); + kcpuset_atomic_clear(cpus_paused, cii); + + cpu_ipi_wait("resume", cpus_resumed, kcp); - if (cpu_ipi_wait(&cpus_resumed, CPUSET_SINGLE(index))) - cpu_ipi_error("resume", cpus_resumed, CPUSET_SINGLE(index)); + kcpuset_destroy(kcp); } /* @@ -803,22 +839,26 @@ cpu_resume(int index) void cpu_resume_others(void) { - __cpuset_t cpuset; + kcpuset_t *kcp; + + if (__predict_false(cold)) + return; - CPUSET_CLEAR(cpus_resumed); - CPUSET_ASSIGN(cpuset, cpus_paused); - CPUSET_CLEAR(cpus_paused); + kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); + kcpuset_clone(&kcp, cpus_paused); + kcpuset_atomicly_remove(cpus_paused, cpus_paused); /* CPUs awake on cpus_paused clear */ - if (cpu_ipi_wait(&cpus_resumed, cpuset)) - cpu_ipi_error("resume", cpus_resumed, cpuset); + cpu_ipi_wait("resume", cpus_resumed, kcp); + + kcpuset_destroy(kcp); } -int -cpu_is_paused(int index) +bool +cpu_is_paused(cpuid_t cii) { - return CPUSET_HAS_P(cpus_paused, index); + return !cold && kcpuset_isset(cpus_paused, cii); } #ifdef DDB @@ -831,11 +871,11 @@ cpu_debug_dump(void) db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n"); for (CPU_INFO_FOREACH(cii, ci)) { - hatched = (CPUSET_HAS_P(cpus_hatched, cpu_index(ci)) ? 'H' : '-'); - running = (CPUSET_HAS_P(cpus_running, cpu_index(ci)) ? 'R' : '-'); - paused = (CPUSET_HAS_P(cpus_paused, cpu_index(ci)) ? 'P' : '-'); - resumed = (CPUSET_HAS_P(cpus_resumed, cpu_index(ci)) ? 'r' : '-'); - halted = (CPUSET_HAS_P(cpus_halted, cpu_index(ci)) ? 'h' : '-'); + hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-'); + running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-'); + paused = (kcpuset_isset(cpus_paused, cpu_index(ci)) ? 'P' : '-'); + resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-'); + halted = (kcpuset_isset(cpus_halted, cpu_index(ci)) ? 'h' : '-'); db_printf("%3d 0x%03lx %c%c%c%c%c %p " "%3d %3d %3d " "0x%02" PRIx64 "/0x%02" PRIx64 "\n", @@ -893,12 +933,12 @@ cpu_hatch(struct cpu_info *ci) /* * Announce we are hatched */ - CPUSET_ADD(cpus_hatched, cpu_index(ci)); + kcpuset_atomic_set(cpus_hatched, cpu_index(ci)); /* * Now wait to be set free! */ - while (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) { + while (! kcpuset_isset(cpus_running, cpu_index(ci))) { /* spin, spin, spin */ } @@ -924,6 +964,9 @@ cpu_hatch(struct cpu_info *ci) KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl); KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); + kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci)); + kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci)); + /* * And do a tail call to idle_loop */ @@ -943,15 +986,15 @@ cpu_boot_secondary_processors(void) /* * Skip this CPU if it didn't sucessfully hatch. */ - if (! CPUSET_HAS_P(cpus_hatched, cpu_index(ci))) + if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) continue; ci->ci_data.cpu_cc_skew = mips3_cp0_count_read(); atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING); - CPUSET_ADD(cpus_running, cpu_index(ci)); + kcpuset_set(cpus_running, cpu_index(ci)); // Spin until the cpu calls idle_loop for (u_int i = 0; i < 100; i++) { - if (kcpuset_isset(kcpuset_running, cpu_index(ci))) + if (kcpuset_isset(cpus_running, cpu_index(ci))) break; delay(1000); } Index: src/sys/arch/mips/mips/ipifuncs.c diff -u src/sys/arch/mips/mips/ipifuncs.c:1.9 src/sys/arch/mips/mips/ipifuncs.c:1.10 --- src/sys/arch/mips/mips/ipifuncs.c:1.9 Sat Jun 6 04:35:14 2015 +++ src/sys/arch/mips/mips/ipifuncs.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: ipifuncs.c,v 1.9 2015/06/06 04:35:14 matt Exp $ */ +/* $NetBSD: ipifuncs.c,v 1.10 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -32,7 +32,7 @@ #include "opt_ddb.h" #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.9 2015/06/06 04:35:14 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.10 2015/06/10 22:31:00 matt Exp $"); #include <sys/param.h> #include <sys/cpu.h> @@ -44,7 +44,6 @@ __KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v #include <uvm/uvm_extern.h> #include <mips/cache.h> -#include <mips/cpuset.h> #ifdef DDB #include <mips/db_machdep.h> #endif @@ -102,7 +101,7 @@ ipi_halt(void) { const u_int my_cpu = cpu_number(); printf("cpu%u: shutting down\n", my_cpu); - CPUSET_ADD(cpus_halted, my_cpu); + kcpuset_set(cpus_halted, my_cpu); splhigh(); for (;;) ; Index: src/sys/arch/mips/mips/locore_octeon.S diff -u src/sys/arch/mips/mips/locore_octeon.S:1.6 src/sys/arch/mips/mips/locore_octeon.S:1.7 --- src/sys/arch/mips/mips/locore_octeon.S:1.6 Mon Jun 8 14:24:20 2015 +++ src/sys/arch/mips/mips/locore_octeon.S Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: locore_octeon.S,v 1.6 2015/06/08 14:24:20 matt Exp $ */ +/* $NetBSD: locore_octeon.S,v 1.7 2015/06/10 22:31:00 matt Exp $ */ /* * Copyright (c) 2007 Internet Initiative Japan, Inc. @@ -27,7 +27,7 @@ */ #include <mips/asm.h> -RCSID("$NetBSD: locore_octeon.S,v 1.6 2015/06/08 14:24:20 matt Exp $") +RCSID("$NetBSD: locore_octeon.S,v 1.7 2015/06/10 22:31:00 matt Exp $") #include "cpunode.h" /* for NWDOG */ #include "opt_cputype.h" @@ -136,19 +136,22 @@ NESTED_NOPROFILE(octeon_cpu_spinup, 0, r b 1b nop - // Indicate we've gotten this far -2: PTR_LA a0, _C_LABEL(cpus_booted) - li a1, 1 - jal _C_LABEL(atomic_or_64) - sllv a1, a1, s0 + // Indicate this CPU was started by u-boot +2: PTR_LA t0, _C_LABEL(cpus_booted) # get addr for kcpuset +3: sync + PTR_L a0, (t0) # get kcpuset + beqz a0, 3b # loop until not NULL + nop + jal _C_LABEL(kcpuset_atomic_set) + move a1, s1 # pass it our cpu number // Wait until cpuid_infos[cpunum] is not NULL. PTR_LA a1, _C_LABEL(cpuid_infos) dsll v0, s0, PTR_SCALESHIFT # cpunum -> array index PTR_ADD t0, a1, v0 # add to array start -3: sync +4: sync PTR_L a1, (t0) # get cpu_info pointer - beqz a1, 3b # loop until non-NULL + beqz a1, 4b # loop until non-NULL nop j _C_LABEL(cpu_trampoline) Index: src/sys/arch/mips/mips/pmap.c diff -u src/sys/arch/mips/mips/pmap.c:1.214 src/sys/arch/mips/mips/pmap.c:1.215 --- src/sys/arch/mips/mips/pmap.c:1.214 Sun May 11 07:53:28 2014 +++ src/sys/arch/mips/mips/pmap.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.214 2014/05/11 07:53:28 skrll Exp $ */ +/* $NetBSD: pmap.c,v 1.215 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.214 2014/05/11 07:53:28 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.215 2015/06/10 22:31:00 matt Exp $"); /* * Manages physical address maps. @@ -116,6 +116,8 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2 #include "opt_multiprocessor.h" #include "opt_mips_cache.h" +#define __MUTEX_PRIVATE + #ifdef MULTIPROCESSOR #define PMAP_NO_PV_UNCACHED #endif @@ -265,10 +267,6 @@ struct pmap_kernel kernel_pmap_store = { .kernel_pmap = { .pm_count = 1, .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef), -#ifdef MULTIPROCESSOR - .pm_active = 1, - .pm_onproc = 1, -#endif }, }; struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap; @@ -375,12 +373,14 @@ pmap_page_syncicache(struct vm_page *pg) struct vm_page_md * const md = VM_PAGE_TO_MD(pg); #ifdef MULTIPROCESSOR pv_entry_t pv = &md->pvh_first; - uint32_t onproc = 0; + kcpuset_t *onproc; + kcpuset_create(&onproc, true); + KASSERT(onproc != NULL); (void)PG_MD_PVLIST_LOCK(md, false); if (pv->pv_pmap != NULL) { for (; pv != NULL; pv = pv->pv_next) { - onproc |= pv->pv_pmap->pm_onproc; - if (onproc == cpus_running) + kcpuset_merge(onproc, pv->pv_pmap->pm_onproc); + if (kcpuset_match(onproc, cpus_running)) break; } } @@ -388,6 +388,7 @@ pmap_page_syncicache(struct vm_page *pg) kpreempt_disable(); pmap_tlb_syncicache(trunc_page(md->pvh_first.pv_va), onproc); kpreempt_enable(); + kcpuset_destroy(onproc); #else if (MIPS_HAS_R4K_MMU) { if (PG_MD_CACHED_P(md)) { @@ -485,6 +486,15 @@ pmap_bootstrap(void) pmap_page_colormask = (uvmexp.ncolors -1) << PAGE_SHIFT; +#ifdef MULTIPROCESSOR + pmap_t pm = pmap_kernel(); + kcpuset_create(&pm->pm_onproc, true); + kcpuset_create(&pm->pm_active, true); + KASSERT(pm->pm_onproc != NULL); + KASSERT(pm->pm_active != NULL); + kcpuset_set(pm->pm_onproc, cpu_number()); + kcpuset_set(pm->pm_active, cpu_number()); +#endif pmap_tlb_info_init(&pmap_tlb0_info); /* init the lock */ /* @@ -812,6 +822,12 @@ pmap_create(void) memset(pmap, 0, PMAP_SIZE); pmap->pm_count = 1; +#ifdef MULTIPROCESSOR + kcpuset_create(&pmap->pm_onproc, true); + kcpuset_create(&pmap->pm_active, true); + KASSERT(pmap->pm_onproc != NULL); + KASSERT(pmap->pm_active != NULL); +#endif pmap_segtab_init(pmap); @@ -841,6 +857,13 @@ pmap_destroy(pmap_t pmap) pmap_tlb_asid_release_all(pmap); pmap_segtab_destroy(pmap); +#ifdef MULTIPROCESSOR + kcpuset_destroy(pmap->pm_onproc); + kcpuset_destroy(pmap->pm_active); + pmap->pm_onproc = NULL; + pmap->pm_active = NULL; +#endif + pool_put(&pmap_pmap_pool, pmap); kpreempt_enable(); } @@ -1716,9 +1739,9 @@ pmap_remove_all(struct pmap *pmap) * tlb_invalidate_addrs(). */ #ifdef MULTIPROCESSOR - const uint32_t cpu_mask = 1 << cpu_index(curcpu()); - KASSERT((pmap->pm_onproc & ~cpu_mask) == 0); - if (pmap->pm_onproc & cpu_mask) + // This should be the last CPU with this pmap onproc + KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu()))); + if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu()))) pmap_tlb_asid_deactivate(pmap); #endif pmap_tlb_asid_release_all(pmap); @@ -2408,6 +2431,7 @@ pmap_pvlist_lock_init(void) if (sizeof(kmutex_t) > cache_line_size) { cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size); } + memset((void *)lock_page, 0, PAGE_SIZE); const size_t nlocks = PAGE_SIZE / cache_line_size; KASSERT((nlocks & (nlocks - 1)) == 0); /* @@ -2415,7 +2439,7 @@ pmap_pvlist_lock_init(void) */ for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) { kmutex_t * const lock = (kmutex_t *)lock_va; - mutex_init(lock, MUTEX_DEFAULT, IPL_VM); + mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH); pli->pli_locks[i] = lock; } pli->pli_lock_mask = nlocks - 1; @@ -2447,9 +2471,16 @@ pmap_pvlist_lock(struct vm_page_md *md, } } + KASSERTMSG(lock >= pli->pli_locks[0], + "lock %p < start %p", lock, pli->pli_locks); + KASSERTMSG(lock <= pli->pli_locks[pli->pli_lock_mask], + "lock %p > end %p", lock, &pli->pli_locks[pli->pli_lock_mask]); + /* * Now finally lock the pvlists. */ + KASSERTMSG(lock->mtx_ipl._spl == IPL_HIGH, + "%p ipl %d", lock, lock->mtx_ipl._spl); mutex_spin_enter(lock); /* @@ -2469,7 +2500,7 @@ pmap_pvlist_lock(struct vm_page_md *md, static void pmap_pvlist_lock_init(void) { - mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM); + mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH); } #endif /* MULTIPROCESSOR */ Index: src/sys/arch/mips/mips/pmap_tlb.c diff -u src/sys/arch/mips/mips/pmap_tlb.c:1.8 src/sys/arch/mips/mips/pmap_tlb.c:1.9 --- src/sys/arch/mips/mips/pmap_tlb.c:1.8 Tue Sep 27 01:02:34 2011 +++ src/sys/arch/mips/mips/pmap_tlb.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_tlb.c,v 1.8 2011/09/27 01:02:34 jym Exp $ */ +/* $NetBSD: pmap_tlb.c,v 1.9 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.8 2011/09/27 01:02:34 jym Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.9 2015/06/10 22:31:00 matt Exp $"); /* * Manages address spaces in a TLB. @@ -156,7 +156,6 @@ struct pmap_tlb_info pmap_tlb0_info = { .ti_lock = &pmap_tlb0_mutex, .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb_info.ti_pais), #ifdef MULTIPROCESSOR - .ti_cpu_mask = 1, .ti_tlbinvop = TLBINV_NOBODY, #endif }; @@ -191,7 +190,7 @@ pmap_pai_reset(struct pmap_tlb_info *ti, */ KASSERT(pai->pai_asid); #ifdef MULTIPROCESSOR - KASSERT((pm->pm_onproc & ti->ti_cpu_mask) == 0); + KASSERT(!kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset)); #endif LIST_REMOVE(pai, pai_link); #ifdef DIAGNOSTIC @@ -212,7 +211,7 @@ pmap_pai_reset(struct pmap_tlb_info *ti, * The bits in pm_active belonging to this TLB can only be changed * while this TLB's lock is held. */ - atomic_and_32(&pm->pm_active, ~ti->ti_cpu_mask); + kcpuset_atomicly_remove(pm->pm_active, ti->ti_kcpuset); #endif /* MULTIPROCESSOR */ } @@ -265,6 +264,9 @@ pmap_tlb_info_init(struct pmap_tlb_info } } #ifdef MULTIPROCESSOR + kcpuset_create(&ti->ti_kcpuset, true); + KASSERT(ti->ti_kcpuset != NULL); + kcpuset_set(ti->ti_kcpuset, cpu_number()); const u_int icache_way_pages = mips_cache_info.mci_picache_way_size >> PGSHIFT; KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask)); @@ -288,7 +290,8 @@ pmap_tlb_info_init(struct pmap_tlb_info ti->ti_asids_free = ti->ti_asid_max; ti->ti_tlbinvop = TLBINV_NOBODY, ti->ti_victim = NULL; - ti->ti_cpu_mask = 0; + kcpuset_create(&ti->ti_kcpuset, true); + KASSERT(ti->ti_kcpuset != NULL); ti->ti_index = pmap_ntlbs++; snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u", ti->ti_index); @@ -313,8 +316,7 @@ pmap_tlb_info_attach(struct pmap_tlb_inf KASSERT(cold); TLBINFO_LOCK(ti); - uint32_t cpu_mask = 1 << cpu_index(ci); - ti->ti_cpu_mask |= cpu_mask; + kcpuset_set(ti->ti_kcpuset, cpu_index(ci)); ci->ci_tlb_info = ti; ci->ci_ksp_tlb_slot = ti->ti_wired++; /* @@ -326,8 +328,8 @@ pmap_tlb_info_attach(struct pmap_tlb_inf * Mark the kernel as active and "onproc" for this cpu. We assume * we are the only CPU running so atomic ops are not needed. */ - pmap_kernel()->pm_active |= cpu_mask; - pmap_kernel()->pm_onproc |= cpu_mask; + kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci)); + kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci)); TLBINFO_UNLOCK(ti); } #endif /* MULTIPROCESSOR */ @@ -406,7 +408,7 @@ pmap_tlb_asid_reinitialize(struct pmap_t next = LIST_NEXT(pai, pai_link); KASSERT(pai->pai_asid != 0); #ifdef MULTIPROCESSOR - if (pm->pm_onproc & ti->ti_cpu_mask) { + if (kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset)) { if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) { TLBINFO_ASID_MARK_USED(ti, pai->pai_asid); ti->ti_asids_free--; @@ -451,7 +453,7 @@ pmap_tlb_shootdown_process(void) */ struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); KASSERT(ti->ti_victim != pmap_kernel()); - if (ti->ti_victim->pm_onproc & ti->ti_cpu_mask) { + if (kcpuset_intersecting_p(ti->ti_victim->pm_onproc, ti->ti_kcpuset)) { /* * The victim is an active pmap so we will just * invalidate its TLB entries. @@ -467,7 +469,7 @@ pmap_tlb_shootdown_process(void) * next called for this pmap, it will allocate a new * ASID. */ - KASSERT((curpmap->pm_onproc & ti->ti_cpu_mask) == 0); + KASSERT(kcpuset_intersecting_p(curpmap->pm_onproc, ti->ti_kcpuset) == 0); pmap_pai_reset(ti, pai, PAI_PMAP(pai, ti)); } break; @@ -534,29 +536,37 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) /* * We don't need to deal our own TLB. */ - uint32_t pm_active = pm->pm_active & ~curcpu()->ci_tlb_info->ti_cpu_mask; const bool kernel_p = (pm == pmap_kernel()); bool ipi_sent = false; + kcpuset_t *pm_active; + + if (pmap_ntlbs == 1) + return false; + + KASSERT(pm->pm_active != NULL); + kcpuset_clone(&pm_active, pm->pm_active); + KASSERT(pm_active != NULL); + kcpuset_remove(pm_active, curcpu()->ci_tlb_info->ti_kcpuset); /* * If pm_active gets more bits set, then it's after all our changes * have been made so they will already be cognizant of them. */ - for (size_t i = 0; pm_active != 0; i++) { + for (size_t i = 0; !kcpuset_iszero(pm_active); i++) { KASSERT(i < pmap_ntlbs); struct pmap_tlb_info * const ti = pmap_tlbs[i]; KASSERT(tlbinfo_index(ti) == i); /* * Skip this TLB if there are no active mappings for it. */ - if ((pm_active & ti->ti_cpu_mask) == 0) + if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset) == 0) continue; struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); - pm_active &= ~ti->ti_cpu_mask; + kcpuset_remove(pm_active, ti->ti_kcpuset); TLBINFO_LOCK(ti); - const uint32_t onproc = (pm->pm_onproc & ti->ti_cpu_mask); - if (onproc != 0) { + cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc, ti->ti_kcpuset); + if (j != 0) { if (kernel_p) { ti->ti_tlbinvop = TLBINV_KERNEL_MAP(ti->ti_tlbinvop); @@ -588,13 +598,11 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) * change now that we have released the lock but we * can tolerate spurious shootdowns. */ - KASSERT(onproc != 0); - u_int j = ffs(onproc) - 1; - cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); + cpu_send_ipi(cpu_lookup(j-1), IPI_SHOOTDOWN); ipi_sent = true; continue; } - if (pm->pm_active & ti->ti_cpu_mask) { + if (kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset)) { /* * If this pmap has an ASID assigned but it's not * currently running, nuke its ASID. Next time the @@ -607,6 +615,7 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) } TLBINFO_UNLOCK(ti); } + kcpuset_destroy(pm_active); return ipi_sent; } @@ -668,8 +677,8 @@ pmap_tlb_asid_alloc(struct pmap_tlb_info KASSERT(pai->pai_asid == 0); KASSERT(pai->pai_link.le_prev == NULL); #ifdef MULTIPROCESSOR - KASSERT((pm->pm_onproc & ti->ti_cpu_mask) == 0); - KASSERT((pm->pm_active & ti->ti_cpu_mask) == 0); + KASSERT(!kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset)); + KASSERT(!kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset)); #endif KASSERT(ti->ti_asids_free > 0); KASSERT(ti->ti_asid_hint <= ti->ti_asid_max); @@ -724,7 +733,7 @@ pmap_tlb_asid_alloc(struct pmap_tlb_info * The bits in pm_active belonging to this TLB can only be changed * while this TLBs lock is held. */ - atomic_or_32(&pm->pm_active, ti->ti_cpu_mask); + kcpuset_atomicly_merge(pm->pm_active, ti->ti_kcpuset); #endif } @@ -769,7 +778,7 @@ pmap_tlb_asid_acquire(pmap_t pm, struct * The bits in pm_onproc belonging to this TLB can only * be changed while this TLBs lock is held. */ - atomic_or_32(&pm->pm_onproc, 1 << cpu_index(ci)); + kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci)); /* * If this CPU has had exec pages changes that haven't been * icache synched, make sure to do that before returning to @@ -782,6 +791,7 @@ pmap_tlb_asid_acquire(pmap_t pm, struct atomic_or_ulong(&ci->ci_flags, CPUF_USERPMAP); #endif /* MULTIPROCESSOR */ ci->ci_pmap_asid_cur = pai->pai_asid; + KASSERT(!cold); tlb_set_asid(pai->pai_asid); pmap_tlb_asid_check(); } else { @@ -810,19 +820,18 @@ pmap_tlb_asid_deactivate(pmap_t pm) * deactivated the pmap and thusly onproc will be 0 so there's nothing * to do. */ - if (pm != pmap_kernel() && pm->pm_onproc != 0) { + if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) { struct cpu_info * const ci = curcpu(); - const uint32_t cpu_mask = 1 << cpu_index(ci); KASSERT(!cpu_intr_p()); - KASSERTMSG(pm->pm_onproc & cpu_mask, - "%s: pmap %p onproc %#x doesn't include cpu %d (%p)", + KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)), + "%s: pmap %p onproc %p doesn't include cpu %d (%p)", __func__, pm, pm->pm_onproc, cpu_index(ci), ci); /* * The bits in pm_onproc that belong to this TLB can * be changed while this TLBs lock is not held as long * as we use atomic ops. */ - atomic_and_32(&pm->pm_onproc, ~cpu_mask); + kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci)); atomic_and_ulong(&ci->ci_flags, ~CPUF_USERPMAP); } #elif defined(DEBUG) @@ -838,11 +847,11 @@ pmap_tlb_asid_release_all(struct pmap *p KASSERT(pm != pmap_kernel()); KASSERT(kpreempt_disabled()); #ifdef MULTIPROCESSOR - KASSERT(pm->pm_onproc == 0); - for (u_int i = 0; pm->pm_active != 0; i++) { + KASSERT(kcpuset_iszero(pm->pm_onproc)); + for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) { KASSERT(i < pmap_ntlbs); struct pmap_tlb_info * const ti = pmap_tlbs[i]; - if (pm->pm_active & ti->ti_cpu_mask) { + if (kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset)) { struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); TLBINFO_LOCK(ti); KASSERT(ti->ti_victim != pm); @@ -907,7 +916,7 @@ pmap_tlb_syncicache_ast(struct cpu_info } void -pmap_tlb_syncicache(vaddr_t va, uint32_t page_onproc) +pmap_tlb_syncicache(vaddr_t va, const kcpuset_t *page_onproc) { KASSERT(kpreempt_disabled()); /* @@ -929,10 +938,12 @@ pmap_tlb_syncicache(vaddr_t va, uint32_t * then become equal but that's a one in 4 billion cache and will * just cause an extra sync of the icache. */ - const uint32_t cpu_mask = 1L << cpu_index(curcpu()); + struct cpu_info * const ci = curcpu(); const uint32_t page_mask = 1L << ((va >> PGSHIFT) & pmap_tlb_synci_page_mask); - uint32_t onproc = 0; + kcpuset_t *onproc; + kcpuset_create(&onproc, true); + KASSERT(onproc != NULL); for (size_t i = 0; i < pmap_ntlbs; i++) { struct pmap_tlb_info * const ti = pmap_tlbs[0]; TLBINFO_LOCK(ti); @@ -949,7 +960,7 @@ pmap_tlb_syncicache(vaddr_t va, uint32_t if (orig_page_bitmap == old_page_bitmap) { if (old_page_bitmap == 0) { - onproc |= ti->ti_cpu_mask; + kcpuset_merge(onproc, ti->ti_kcpuset); } else { ti->ti_evcnt_synci_deferred.ev_count++; } @@ -960,24 +971,24 @@ pmap_tlb_syncicache(vaddr_t va, uint32_t #if 0 printf("%s: %s: %x to %x on cpus %#x\n", __func__, ti->ti_name, page_mask, ti->ti_synci_page_bitmap, - onproc & page_onproc & ti->ti_cpu_mask); + onproc & page_onproc & ti->ti_kcpuset); #endif TLBINFO_UNLOCK(ti); } - onproc &= page_onproc; - if (__predict_false(onproc != 0)) { + kcpuset_intersect(onproc, page_onproc); + if (__predict_false(!kcpuset_iszero(onproc))) { /* * If the cpu need to sync this page, tell the current lwp * to sync the icache before it returns to userspace. */ - if (onproc & cpu_mask) { - if (curcpu()->ci_flags & CPUF_USERPMAP) { + if (kcpuset_isset(onproc, cpu_index(ci))) { + if (ci->ci_flags & CPUF_USERPMAP) { curlwp->l_md.md_astpending = 1; /* force call to ast() */ - curcpu()->ci_evcnt_synci_onproc_rqst.ev_count++; + ci->ci_evcnt_synci_onproc_rqst.ev_count++; } else { - curcpu()->ci_evcnt_synci_deferred_rqst.ev_count++; + ci->ci_evcnt_synci_deferred_rqst.ev_count++; } - onproc ^= cpu_mask; + kcpuset_clear(onproc, cpu_index(ci)); } /* @@ -986,12 +997,15 @@ pmap_tlb_syncicache(vaddr_t va, uint32_t * We might cause some spurious icache syncs but that's not * going to break anything. */ - for (u_int n = ffs(onproc); - onproc != 0; - onproc >>= n, onproc <<= n, n = ffs(onproc)) { - cpu_send_ipi(cpu_lookup(n-1), IPI_SYNCICACHE); + for (cpuid_t n = kcpuset_ffs(onproc); + n-- != 0; + n = kcpuset_ffs(onproc)) { + kcpuset_clear(onproc, n); + cpu_send_ipi(cpu_lookup(n), IPI_SYNCICACHE); } } + + kcpuset_destroy(onproc); } void Index: src/sys/arch/mips/rmi/rmixl_cpu.c diff -u src/sys/arch/mips/rmi/rmixl_cpu.c:1.7 src/sys/arch/mips/rmi/rmixl_cpu.c:1.8 --- src/sys/arch/mips/rmi/rmixl_cpu.c:1.7 Mon Jun 1 22:55:13 2015 +++ src/sys/arch/mips/rmi/rmixl_cpu.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: rmixl_cpu.c,v 1.7 2015/06/01 22:55:13 matt Exp $ */ +/* $NetBSD: rmixl_cpu.c,v 1.8 2015/06/10 22:31:00 matt Exp $ */ /* * Copyright 2002 Wasabi Systems, Inc. @@ -38,7 +38,7 @@ #include "locators.h" #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.7 2015/06/01 22:55:13 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: rmixl_cpu.c,v 1.8 2015/06/10 22:31:00 matt Exp $"); #include "opt_multiprocessor.h" #include "opt_ddb.h" @@ -218,13 +218,12 @@ cpu_rmixl_attach(device_t parent, device return; } - const u_long cpu_mask = 1L << cpu_index(ci); for (size_t i=0; i < 10000; i++) { - if ((cpus_hatched & cpu_mask) != 0) + if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) break; DELAY(100); } - if ((cpus_hatched & cpu_mask) == 0) { + if (!kcpuset_isset(cpus_hatched, cpu_index(ci))) { aprint_error(": failed to hatch\n"); return; } Index: src/sys/arch/mips/rmi/rmixl_intr.c diff -u src/sys/arch/mips/rmi/rmixl_intr.c:1.8 src/sys/arch/mips/rmi/rmixl_intr.c:1.9 --- src/sys/arch/mips/rmi/rmixl_intr.c:1.8 Tue Sep 27 01:02:34 2011 +++ src/sys/arch/mips/rmi/rmixl_intr.c Wed Jun 10 22:31:00 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: rmixl_intr.c,v 1.8 2011/09/27 01:02:34 jym Exp $ */ +/* $NetBSD: rmixl_intr.c,v 1.9 2015/06/10 22:31:00 matt Exp $ */ /*- * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. @@ -64,7 +64,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.8 2011/09/27 01:02:34 jym Exp $"); +__KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c,v 1.9 2015/06/10 22:31:00 matt Exp $"); #include "opt_ddb.h" #include "opt_multiprocessor.h" @@ -81,7 +81,6 @@ __KERNEL_RCSID(0, "$NetBSD: rmixl_intr.c #include <sys/mutex.h> #include <sys/systm.h> -#include <mips/cpuset.h> #include <mips/locore.h> #include <mips/rmi/rmixlreg.h> @@ -958,7 +957,7 @@ rmixl_send_ipi(struct cpu_info *ci, int uint64_t req = 1 << tag; uint32_t r; - if (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) + if (!kcpuset_isset(cpus_running, cpu_index(ci))) return -1; KASSERT((tag >= 0) && (tag < NIPIS));