Module Name: src Committed By: rmind Date: Mon May 31 01:12:15 UTC 2010
Modified Files: src/sys/arch/x86/include [rmind-uvmplock]: cpuvar.h pmap.h src/sys/arch/x86/x86 [rmind-uvmplock]: pmap.c pmap_tlb.c src/sys/arch/xen/conf [rmind-uvmplock]: files.xen src/sys/arch/xen/x86 [rmind-uvmplock]: cpu.c xen_pmap.c Log Message: - Split off Xen versions of pmap_map_ptes/pmap_unmap_ptes into Xen pmap, also move pmap_apte_flush() with pmap_unmap_apdp() there. - Make Xen buildable. To generate a diff of this commit: cvs rdiff -u -r1.31.4.1 -r1.31.4.2 src/sys/arch/x86/include/cpuvar.h cvs rdiff -u -r1.29.2.5 -r1.29.2.6 src/sys/arch/x86/include/pmap.h cvs rdiff -u -r1.105.2.8 -r1.105.2.9 src/sys/arch/x86/x86/pmap.c cvs rdiff -u -r1.1.2.1 -r1.1.2.2 src/sys/arch/x86/x86/pmap_tlb.c cvs rdiff -u -r1.106.4.2 -r1.106.4.3 src/sys/arch/xen/conf/files.xen cvs rdiff -u -r1.42.2.1 -r1.42.2.2 src/sys/arch/xen/x86/cpu.c cvs rdiff -u -r1.1.2.2 -r1.1.2.3 src/sys/arch/xen/x86/xen_pmap.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/x86/include/cpuvar.h diff -u src/sys/arch/x86/include/cpuvar.h:1.31.4.1 src/sys/arch/x86/include/cpuvar.h:1.31.4.2 --- src/sys/arch/x86/include/cpuvar.h:1.31.4.1 Sun May 30 05:17:12 2010 +++ src/sys/arch/x86/include/cpuvar.h Mon May 31 01:12:13 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: cpuvar.h,v 1.31.4.1 2010/05/30 05:17:12 rmind Exp $ */ +/* $NetBSD: cpuvar.h,v 1.31.4.2 2010/05/31 01:12:13 rmind Exp $ */ /*- * Copyright (c) 2000, 2007 The NetBSD Foundation, Inc. @@ -96,9 +96,7 @@ #endif #endif /* defined(_KERNEL_OPT) */ -#ifdef MULTIPROCESSOR extern uint32_t cpus_running; -#endif int x86_ipi(int, int, int); void x86_self_ipi(int); Index: src/sys/arch/x86/include/pmap.h diff -u src/sys/arch/x86/include/pmap.h:1.29.2.5 src/sys/arch/x86/include/pmap.h:1.29.2.6 --- src/sys/arch/x86/include/pmap.h:1.29.2.5 Sun May 30 05:17:12 2010 +++ src/sys/arch/x86/include/pmap.h Mon May 31 01:12:13 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.29.2.5 2010/05/30 05:17:12 rmind Exp $ */ +/* $NetBSD: pmap.h,v 1.29.2.6 2010/05/31 01:12:13 rmind Exp $ */ /* * @@ -436,6 +436,9 @@ paddr_t vtomach(vaddr_t); #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) +void pmap_apte_flush(struct pmap *); +void pmap_unmap_apdp(void); + #endif /* XEN */ /* pmap functions with machine addresses */ Index: src/sys/arch/x86/x86/pmap.c diff -u src/sys/arch/x86/x86/pmap.c:1.105.2.8 src/sys/arch/x86/x86/pmap.c:1.105.2.9 --- src/sys/arch/x86/x86/pmap.c:1.105.2.8 Sun May 30 05:17:13 2010 +++ src/sys/arch/x86/x86/pmap.c Mon May 31 01:12:14 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.105.2.8 2010/05/30 05:17:13 rmind Exp $ */ +/* $NetBSD: pmap.c,v 1.105.2.9 2010/05/31 01:12:14 rmind Exp $ */ /*- * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc. @@ -177,7 +177,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105.2.8 2010/05/30 05:17:13 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105.2.9 2010/05/31 01:12:14 rmind Exp $"); #include "opt_user_ldt.h" #include "opt_lockdebug.h" @@ -328,7 +328,6 @@ const long nkptpmax[] = NKPTPMAX_INITIALIZER; const long nbpd[] = NBPD_INITIALIZER; pd_entry_t * const normal_pdes[] = PDES_INITIALIZER; -pd_entry_t * const alternate_pdes[] = APDES_INITIALIZER; long nkptp[] = NKPTP_INITIALIZER; @@ -336,8 +335,6 @@ static vaddr_t pmap_maxkvaddr; -#define COUNT(x) /* nothing */ - /* * XXX kludge: dummy locking to make KASSERTs in uvm_page.c comfortable. * actual locking is done by pm_lock. @@ -571,9 +568,6 @@ vaddr_t, vaddr_t, vaddr_t, struct pv_entry **); -#ifdef XEN -static void pmap_unmap_apdp(void); -#endif static bool pmap_get_physpage(vaddr_t, int, paddr_t *); static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int, long *); @@ -706,43 +700,6 @@ (kernel && (pmap->pm_kernel_cpus & ci->ci_cpumask) != 0)); } -#ifdef XEN -static void -pmap_apte_flush(struct pmap *pmap) -{ - - KASSERT(kpreempt_disabled()); - - /* - * Flush the APTE mapping from all other CPUs that - * are using the pmap we are using (who's APTE space - * is the one we've just modified). - * - * XXXthorpej -- find a way to defer the IPI. - */ - pmap_tlb_shootdown(pmap, (vaddr_t)-1LL, 0, TLBSHOOT_APTE); - pmap_tlb_shootnow(); -} - -/* - * Unmap the content of APDP PDEs - */ -static void -pmap_unmap_apdp(void) -{ - int i; - - for (i = 0; i < PDP_SIZE; i++) { - pmap_pte_set(APDP_PDE+i, 0); -#if defined (PAE) - /* clear shadow entries too */ - pmap_pte_set(APDP_PDE_SHADOW+i, 0); -#endif - } -} - -#endif /* XEN */ - /* * Add a reference to the specified pmap. */ @@ -754,6 +711,8 @@ atomic_inc_uint(&pmap->pm_obj[0].uo_refs); } +#ifndef XEN + /* * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in * @@ -765,116 +724,6 @@ pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2, pd_entry_t **ptepp, pd_entry_t * const **pdeppp) { -#ifdef XEN - pd_entry_t opde, npde; - struct pmap *ourpmap; - struct cpu_info *ci; - struct lwp *l; - bool iscurrent; - uint64_t ncsw; - int s; - - /* the kernel's pmap is always accessible */ - if (pmap == pmap_kernel()) { - *pmap2 = NULL; - *ptepp = PTE_BASE; - *pdeppp = normal_pdes; - return; - } - KASSERT(kpreempt_disabled()); - - retry: - l = curlwp; - ncsw = l->l_ncsw; - ourpmap = NULL; - ci = curcpu(); -#if defined(__x86_64__) - /* - * curmap can only be pmap_kernel so at this point - * pmap_is_curpmap is always false - */ - iscurrent = 0; - ourpmap = pmap_kernel(); -#else /* __x86_64__*/ - if (ci->ci_want_pmapload && - vm_map_pmap(&l->l_proc->p_vmspace->vm_map) == pmap) { - pmap_load(); - if (l->l_ncsw != ncsw) - goto retry; - } - iscurrent = pmap_is_curpmap(pmap); - /* if curpmap then we are always mapped */ - if (iscurrent) { - mutex_enter(pmap->pm_lock); - *pmap2 = NULL; - *ptepp = PTE_BASE; - *pdeppp = normal_pdes; - goto out; - } - ourpmap = ci->ci_pmap; -#endif /* __x86_64__ */ - - /* need to lock both curpmap and pmap: use ordered locking */ - pmap_reference(ourpmap); - if ((uintptr_t) pmap < (uintptr_t) ourpmap) { - mutex_enter(pmap->pm_lock); - mutex_enter(ourpmap->pm_lock); - } else { - mutex_enter(ourpmap->pm_lock); - mutex_enter(pmap->pm_lock); - } - - if (l->l_ncsw != ncsw) - goto unlock_and_retry; - - /* need to load a new alternate pt space into curpmap? */ - COUNT(apdp_pde_map); - opde = *APDP_PDE; - if (!pmap_valid_entry(opde) || - pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) { - int i; - s = splvm(); - /* Make recursive entry usable in user PGD */ - for (i = 0; i < PDP_SIZE; i++) { - npde = pmap_pa2pte( - pmap_pdirpa(pmap, i * NPDPG)) | PG_k | PG_V; - xpq_queue_pte_update( - xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)), - npde); - xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]), - npde); -#ifdef PAE - /* update shadow entry too */ - xpq_queue_pte_update( - xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde); -#endif /* PAE */ - xpq_queue_invlpg( - (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]); - } - if (pmap_valid_entry(opde)) - pmap_apte_flush(ourpmap); - splx(s); - } - *pmap2 = ourpmap; - *ptepp = APTE_BASE; - *pdeppp = alternate_pdes; - KASSERT(l->l_ncsw == ncsw); -#if !defined(__x86_64__) - out: -#endif - /* - * might have blocked, need to retry? - */ - if (l->l_ncsw != ncsw) { - unlock_and_retry: - if (ourpmap != NULL) { - mutex_exit(ourpmap->pm_lock); - pmap_destroy(ourpmap); - } - mutex_exit(pmap->pm_lock); - goto retry; - } -#else /* XEN */ struct pmap *curpmap; struct cpu_info *ci; uint32_t cpumask; @@ -931,7 +780,6 @@ *pmap2 = curpmap; *ptepp = PTE_BASE; *pdeppp = normal_pdes; -#endif /* XEN */ } /* @@ -941,31 +789,6 @@ void pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2) { -#ifdef XEN - - if (pmap == pmap_kernel()) { - return; - } - KASSERT(kpreempt_disabled()); - if (pmap2 == NULL) { - mutex_exit(pmap->pm_lock); - } else { -#if defined(__x86_64__) - KASSERT(pmap2 == pmap_kernel()); -#else - KASSERT(curcpu()->ci_pmap == pmap2); -#endif -#if defined(MULTIPROCESSOR) - pmap_unmap_apdp(); - pmap_pte_flush(); - pmap_apte_flush(pmap2); -#endif /* MULTIPROCESSOR */ - COUNT(apdp_pde_unmap); - mutex_exit(pmap->pm_lock); - mutex_exit(pmap2->pm_lock); - pmap_destroy(pmap2); - } -#else /* XEN */ struct cpu_info *ci; struct pmap *mypmap; @@ -1005,9 +828,10 @@ */ pmap_reference(pmap); pmap_destroy(pmap2); -#endif /* XEN */ } +#endif + inline static void pmap_exec_account(struct pmap *pm, vaddr_t va, pt_entry_t opte, pt_entry_t npte) { @@ -2059,7 +1883,7 @@ int npde; #endif #ifdef XEN - int s, i; + int s; #endif /* Index: src/sys/arch/x86/x86/pmap_tlb.c diff -u src/sys/arch/x86/x86/pmap_tlb.c:1.1.2.1 src/sys/arch/x86/x86/pmap_tlb.c:1.1.2.2 --- src/sys/arch/x86/x86/pmap_tlb.c:1.1.2.1 Wed May 26 04:55:24 2010 +++ src/sys/arch/x86/x86/pmap_tlb.c Mon May 31 01:12:14 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_tlb.c,v 1.1.2.1 2010/05/26 04:55:24 rmind Exp $ */ +/* $NetBSD: pmap_tlb.c,v 1.1.2.2 2010/05/31 01:12:14 rmind Exp $ */ /*- * Copyright (c) 2008, 2010 The NetBSD Foundation, Inc. @@ -40,7 +40,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.1 2010/05/26 04:55:24 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.2 2010/05/31 01:12:14 rmind Exp $"); #include <sys/param.h> #include <sys/kernel.h> @@ -204,11 +204,10 @@ { struct pmap_tlb_packet *tp; struct pmap_tlb_mailbox *tm; - struct cpu_info *ci, *lci; - CPU_INFO_ITERATOR cii; + struct cpu_info *ci; uint32_t remote; uintptr_t gen; - int s, err, i, count; + int s, i, count; KASSERT(kpreempt_disabled()); @@ -222,7 +221,12 @@ gen = 0; /* XXXgcc */ tm = &pmap_tlb_mailbox; remote = tp->tp_cpumask & ~ci->ci_cpumask; + +#ifdef MULTIPROCESSOR if (remote != 0) { + CPU_INFO_ITERATOR cii; + struct cpu_info *lci; + int err; /* * Gain ownership of the shootdown mailbox. We must stay * at IPL_VM once we own it or could deadlock against an @@ -275,7 +279,7 @@ panic("pmap_tlb_shootdown: IPI failed"); } } - +#endif /* * Shootdowns on remote CPUs are now in flight. In the meantime, * perform local shootdowns and do not forget to update emap gen. Index: src/sys/arch/xen/conf/files.xen diff -u src/sys/arch/xen/conf/files.xen:1.106.4.2 src/sys/arch/xen/conf/files.xen:1.106.4.3 --- src/sys/arch/xen/conf/files.xen:1.106.4.2 Sun May 30 05:17:13 2010 +++ src/sys/arch/xen/conf/files.xen Mon May 31 01:12:14 2010 @@ -1,4 +1,4 @@ -# $NetBSD: files.xen,v 1.106.4.2 2010/05/30 05:17:13 rmind Exp $ +# $NetBSD: files.xen,v 1.106.4.3 2010/05/31 01:12:14 rmind Exp $ # NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp # NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp @@ -144,6 +144,7 @@ file arch/xen/x86/intr.c file arch/x86/x86/ipi.c file arch/x86/x86/pmap.c +file arch/x86/x86/pmap_tlb.c file arch/x86/x86/sys_machdep.c file arch/x86/x86/tsc.c file arch/x86/x86/vm_machdep.c Index: src/sys/arch/xen/x86/cpu.c diff -u src/sys/arch/xen/x86/cpu.c:1.42.2.1 src/sys/arch/xen/x86/cpu.c:1.42.2.2 --- src/sys/arch/xen/x86/cpu.c:1.42.2.1 Sun May 30 05:17:13 2010 +++ src/sys/arch/xen/x86/cpu.c Mon May 31 01:12:14 2010 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu.c,v 1.42.2.1 2010/05/30 05:17:13 rmind Exp $ */ +/* $NetBSD: cpu.c,v 1.42.2.2 2010/05/31 01:12:14 rmind Exp $ */ /* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */ /*- @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.42.2.1 2010/05/30 05:17:13 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.42.2.2 2010/05/31 01:12:14 rmind Exp $"); #include "opt_ddb.h" #include "opt_multiprocessor.h" @@ -449,7 +449,6 @@ cpu_get_tsc_freq(ci); cpu_init(ci); cpu_set_tss_gates(ci); - pmap_cpu_init_late(ci); #if NLAPIC > 0 if (caa->cpu_role != CPU_ROLE_SP) { /* Enable lapic. */ @@ -494,8 +493,6 @@ cpu_intr_init(ci); gdt_alloc_cpu(ci); cpu_set_tss_gates(ci); - pmap_cpu_init_early(ci); - pmap_cpu_init_late(ci); cpu_start_secondary(ci); if (ci->ci_flags & CPUF_PRESENT) { struct cpu_info *tmp; Index: src/sys/arch/xen/x86/xen_pmap.c diff -u src/sys/arch/xen/x86/xen_pmap.c:1.1.2.2 src/sys/arch/xen/x86/xen_pmap.c:1.1.2.3 --- src/sys/arch/xen/x86/xen_pmap.c:1.1.2.2 Sun May 30 05:17:14 2010 +++ src/sys/arch/xen/x86/xen_pmap.c Mon May 31 01:12:14 2010 @@ -1,3 +1,5 @@ +/* $NetBSD: xen_pmap.c,v 1.1.2.3 2010/05/31 01:12:14 rmind Exp $ */ + /* * Copyright (c) 2007 Manuel Bouyer. * @@ -107,7 +109,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.1.2.2 2010/05/30 05:17:14 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.1.2.3 2010/05/31 01:12:14 rmind Exp $"); #include "opt_user_ldt.h" #include "opt_lockdebug.h" @@ -154,9 +156,201 @@ #define PG_k 0 #endif +#define COUNT(x) /* nothing */ + +static pd_entry_t * const alternate_pdes[] = APDES_INITIALIZER; +extern pd_entry_t * const normal_pdes[]; + extern paddr_t pmap_pa_start; /* PA of first physical page for this domain */ extern paddr_t pmap_pa_end; /* PA of last physical page for this domain */ +void +pmap_apte_flush(struct pmap *pmap) +{ + + KASSERT(kpreempt_disabled()); + + /* + * Flush the APTE mapping from all other CPUs that + * are using the pmap we are using (who's APTE space + * is the one we've just modified). + * + * XXXthorpej -- find a way to defer the IPI. + */ + pmap_tlb_shootdown(pmap, (vaddr_t)-1LL, 0, TLBSHOOT_APTE); + pmap_tlb_shootnow(); +} + +/* + * Unmap the content of APDP PDEs + */ +void +pmap_unmap_apdp(void) +{ + int i; + + for (i = 0; i < PDP_SIZE; i++) { + pmap_pte_set(APDP_PDE+i, 0); +#if defined (PAE) + /* clear shadow entries too */ + pmap_pte_set(APDP_PDE_SHADOW+i, 0); +#endif + } +} + +/* + * pmap_map_ptes: map a pmap's PTEs into KVM and lock them in + * + * => we lock enough pmaps to keep things locked in + * => must be undone with pmap_unmap_ptes before returning + */ + +void +pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2, + pd_entry_t **ptepp, pd_entry_t * const **pdeppp) +{ + pd_entry_t opde, npde; + struct pmap *ourpmap; + struct cpu_info *ci; + struct lwp *l; + bool iscurrent; + uint64_t ncsw; + int s; + + /* the kernel's pmap is always accessible */ + if (pmap == pmap_kernel()) { + *pmap2 = NULL; + *ptepp = PTE_BASE; + *pdeppp = normal_pdes; + return; + } + KASSERT(kpreempt_disabled()); + + retry: + l = curlwp; + ncsw = l->l_ncsw; + ourpmap = NULL; + ci = curcpu(); +#if defined(__x86_64__) + /* + * curmap can only be pmap_kernel so at this point + * pmap_is_curpmap is always false + */ + iscurrent = 0; + ourpmap = pmap_kernel(); +#else /* __x86_64__*/ + if (ci->ci_want_pmapload && + vm_map_pmap(&l->l_proc->p_vmspace->vm_map) == pmap) { + pmap_load(); + if (l->l_ncsw != ncsw) + goto retry; + } + iscurrent = pmap_is_curpmap(pmap); + /* if curpmap then we are always mapped */ + if (iscurrent) { + mutex_enter(pmap->pm_lock); + *pmap2 = NULL; + *ptepp = PTE_BASE; + *pdeppp = normal_pdes; + goto out; + } + ourpmap = ci->ci_pmap; +#endif /* __x86_64__ */ + + /* need to lock both curpmap and pmap: use ordered locking */ + pmap_reference(ourpmap); + if ((uintptr_t) pmap < (uintptr_t) ourpmap) { + mutex_enter(pmap->pm_lock); + mutex_enter(ourpmap->pm_lock); + } else { + mutex_enter(ourpmap->pm_lock); + mutex_enter(pmap->pm_lock); + } + + if (l->l_ncsw != ncsw) + goto unlock_and_retry; + + /* need to load a new alternate pt space into curpmap? */ + COUNT(apdp_pde_map); + opde = *APDP_PDE; + if (!pmap_valid_entry(opde) || + pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) { + int i; + s = splvm(); + /* Make recursive entry usable in user PGD */ + for (i = 0; i < PDP_SIZE; i++) { + npde = pmap_pa2pte( + pmap_pdirpa(pmap, i * NPDPG)) | PG_k | PG_V; + xpq_queue_pte_update( + xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)), + npde); + xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]), + npde); +#ifdef PAE + /* update shadow entry too */ + xpq_queue_pte_update( + xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde); +#endif /* PAE */ + xpq_queue_invlpg( + (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]); + } + if (pmap_valid_entry(opde)) + pmap_apte_flush(ourpmap); + splx(s); + } + *pmap2 = ourpmap; + *ptepp = APTE_BASE; + *pdeppp = alternate_pdes; + KASSERT(l->l_ncsw == ncsw); +#if !defined(__x86_64__) + out: +#endif + /* + * might have blocked, need to retry? + */ + if (l->l_ncsw != ncsw) { + unlock_and_retry: + if (ourpmap != NULL) { + mutex_exit(ourpmap->pm_lock); + pmap_destroy(ourpmap); + } + mutex_exit(pmap->pm_lock); + goto retry; + } +} + +/* + * pmap_unmap_ptes: unlock the PTE mapping of "pmap" + */ + +void +pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2) +{ + + if (pmap == pmap_kernel()) { + return; + } + KASSERT(kpreempt_disabled()); + if (pmap2 == NULL) { + mutex_exit(pmap->pm_lock); + } else { +#if defined(__x86_64__) + KASSERT(pmap2 == pmap_kernel()); +#else + KASSERT(curcpu()->ci_pmap == pmap2); +#endif +#if defined(MULTIPROCESSOR) + pmap_unmap_apdp(); + pmap_pte_flush(); + pmap_apte_flush(pmap2); +#endif /* MULTIPROCESSOR */ + COUNT(apdp_pde_unmap); + mutex_exit(pmap->pm_lock); + mutex_exit(pmap2->pm_lock); + pmap_destroy(pmap2); + } +} + int pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) {