Author: ian Date: Thu Mar 26 21:13:53 2015 New Revision: 280712 URL: https://svnweb.freebsd.org/changeset/base/280712
Log: New pmap code for armv6. Disabled by default, option ARM_NEW_PMAP enables it. This is pretty much a complete rewrite based on the existing i386 code. The patches have been circulating for a couple years and have been looked at by plenty of people, but I'm not putting anybody on the hook as having reviewed this in any formal sense except myself. After this has gotten wider testing from the user community, ARM_NEW_PMAP will become the default and various dregs of the old pmap code will be removed. Submitted by: Svatopluk Kraus <onw...@gmail.com>, Michal Meloun <mel...@miracle.cz> Added: head/sys/arm/arm/pmap-v6-new.c (contents, props changed) head/sys/arm/include/pmap-v6.h (contents, props changed) head/sys/arm/include/pmap_var.h (contents, props changed) head/sys/arm/include/pte-v6.h (contents, props changed) Modified: head/sys/arm/arm/genassym.c head/sys/arm/arm/machdep.c head/sys/arm/arm/mem.c head/sys/arm/arm/minidump_machdep.c head/sys/arm/arm/mp_machdep.c head/sys/arm/arm/swtch.S head/sys/arm/include/machdep.h head/sys/arm/include/pcb.h head/sys/arm/include/pmap.h head/sys/arm/include/pte.h head/sys/arm/include/sf_buf.h head/sys/arm/include/smp.h head/sys/arm/include/vm.h head/sys/arm/include/vmparam.h head/sys/conf/files.arm head/sys/conf/options.arm Modified: head/sys/arm/arm/genassym.c ============================================================================== --- head/sys/arm/arm/genassym.c Thu Mar 26 21:10:42 2015 (r280711) +++ head/sys/arm/arm/genassym.c Thu Mar 26 21:13:53 2015 (r280712) @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include <machine/frame.h> #include <machine/pcb.h> #include <machine/cpu.h> +#include <machine/cpu-v6.h> #include <machine/proc.h> #include <machine/cpufunc.h> #include <machine/cpuinfo.h> @@ -58,12 +59,19 @@ __FBSDID("$FreeBSD$"); ASSYM(KERNBASE, KERNBASE); ASSYM(PCB_NOALIGNFLT, PCB_NOALIGNFLT); +#ifdef ARM_NEW_PMAP +ASSYM(CPU_ASID_KERNEL,CPU_ASID_KERNEL); +#endif ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); +#ifndef ARM_NEW_PMAP ASSYM(PCB_DACR, offsetof(struct pcb, pcb_dacr)); +#endif ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_PAGEDIR, offsetof(struct pcb, pcb_pagedir)); +#ifndef ARM_NEW_PMAP ASSYM(PCB_L1VEC, offsetof(struct pcb, pcb_l1vec)); ASSYM(PCB_PL1VEC, offsetof(struct pcb, pcb_pl1vec)); +#endif ASSYM(PCB_R4, offsetof(struct pcb, pcb_regs.sf_r4)); ASSYM(PCB_R5, offsetof(struct pcb, pcb_regs.sf_r5)); ASSYM(PCB_R6, offsetof(struct pcb, pcb_regs.sf_r6)); @@ -131,7 +139,6 @@ ASSYM(PC_CURPMAP, offsetof(struct pcpu, #endif ASSYM(PAGE_SIZE, PAGE_SIZE); -ASSYM(PDESIZE, PDESIZE); ASSYM(PMAP_DOMAIN_KERNEL, PMAP_DOMAIN_KERNEL); #ifdef PMAP_INCLUDE_PTE_SYNC ASSYM(PMAP_INCLUDE_PTE_SYNC, 1); @@ -145,8 +152,13 @@ ASSYM(TRAPFRAMESIZE, sizeof(struct trapf ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); +ASSYM(_NCPUWORDS, _NCPUWORDS); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); +ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); +ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); +ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); +ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); Modified: head/sys/arm/arm/machdep.c ============================================================================== --- head/sys/arm/arm/machdep.c Thu Mar 26 21:10:42 2015 (r280711) +++ head/sys/arm/arm/machdep.c Thu Mar 26 21:13:53 2015 (r280712) @@ -138,6 +138,14 @@ int _min_bzero_size = 0; extern int *end; #ifdef FDT +vm_paddr_t pmap_pa; + +#ifdef ARM_NEW_PMAP +vm_offset_t systempage; +vm_offset_t irqstack; +vm_offset_t undstack; +vm_offset_t abtstack; +#else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, @@ -147,15 +155,13 @@ extern int *end; static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; -vm_paddr_t pmap_pa; - struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; - +#endif #endif #if defined(LINUX_BOOT_ABI) @@ -381,9 +387,11 @@ cpu_startup(void *dummy) vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; - vector_page_setprot(VM_PROT_READ); pmap_set_pcb_pagedir(pmap_kernel(), pcb); +#ifndef ARM_NEW_PMAP + vector_page_setprot(VM_PROT_READ); pmap_postinit(); +#endif #ifdef ARM_TP_ADDRESS #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); @@ -1003,6 +1011,19 @@ init_proc0(vm_offset_t kstack) pcpup->pc_curpcb = thread0.td_pcb; } +#ifdef ARM_NEW_PMAP +void +set_stackptrs(int cpu) +{ + + set_stackptr(PSR_IRQ32_MODE, + irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); + set_stackptr(PSR_ABT32_MODE, + abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); + set_stackptr(PSR_UND32_MODE, + undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); +} +#else void set_stackptrs(int cpu) { @@ -1014,6 +1035,7 @@ set_stackptrs(int cpu) set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } +#endif #ifdef FDT static char * @@ -1048,6 +1070,7 @@ print_kenv(void) debugf(" %x %s\n", (uint32_t)cp, cp); } +#ifndef ARM_NEW_PMAP void * initarm(struct arm_boot_params *abp) { @@ -1316,4 +1339,181 @@ initarm(struct arm_boot_params *abp) return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } +#else /* !ARM_NEW_PMAP */ +void * +initarm(struct arm_boot_params *abp) +{ + struct mem_region mem_regions[FDT_MEM_REGIONS]; + vm_paddr_t lastaddr; + vm_offset_t dtbp, kernelstack, dpcpu; + uint32_t memsize; + char *env; + void *kmdp; + int err_devmap, mem_regions_sz; + + /* get last allocated physical address */ + arm_physmem_kernaddr = abp->abp_physaddr; + lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; + + memsize = 0; + set_cpufuncs(); + cpuinfo_init(); + + /* + * Find the dtb passed in by the boot loader. + */ + kmdp = preload_search_by_type("elf kernel"); + if (kmdp != NULL) + dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); + else + dtbp = (vm_offset_t)NULL; +#if defined(FDT_DTB_STATIC) + /* + * In case the device tree blob was not retrieved (from metadata) try + * to use the statically embedded one. + */ + if (dtbp == (vm_offset_t)NULL) + dtbp = (vm_offset_t)&fdt_static_dtb; #endif + + if (OF_install(OFW_FDT, 0) == FALSE) + panic("Cannot install FDT"); + + if (OF_init((void *)dtbp) != 0) + panic("OF_init failed with the found device tree"); + + /* Grab physical memory regions information from device tree. */ + if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) + panic("Cannot get physical memory regions"); + arm_physmem_hardware_regions(mem_regions, mem_regions_sz); + + /* Grab reserved memory regions information from device tree. */ + if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) + arm_physmem_exclude_regions(mem_regions, mem_regions_sz, + EXFLAG_NODUMP | EXFLAG_NOALLOC); + + /* + * Set TEX remapping registers. + * Setup kernel page tables and switch to kernel L1 page table. + */ + pmap_set_tex(); + pmap_bootstrap_prepare(lastaddr); + + /* + * Now that proper page tables are installed, call cpu_setup() to enable + * instruction and data caches and other chip-specific features. + */ + cpu_setup(""); + + /* Platform-specific initialisation */ + platform_probe_and_attach(); + pcpu0_init(); + + /* Do basic tuning, hz etc */ + init_param1(); + + /* + * Allocate a page for the system page mapped to 0xffff0000 + * This page will just contain the system vectors and can be + * shared by all processes. + */ + systempage = pmap_preboot_get_pages(1); + + /* Map the vector page. */ + pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); + if (virtual_end >= ARM_VECTORS_HIGH) + virtual_end = ARM_VECTORS_HIGH - 1; + + /* Allocate dynamic per-cpu area. */ + dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); + dpcpu_init((void *)dpcpu, 0); + + /* Allocate stacks for all modes */ + irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); + abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); + undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); + kernelstack = pmap_preboot_get_vpages(KSTACK_PAGES * MAXCPU); + + /* Allocate message buffer. */ + msgbufp = (void *)pmap_preboot_get_vpages( + round_page(msgbufsize) / PAGE_SIZE); + + /* + * Pages were allocated during the secondary bootstrap for the + * stacks for different CPU modes. + * We must now set the r13 registers in the different CPU modes to + * point to these stacks. + * Since the ARM stacks use STMFD etc. we must set r13 to the top end + * of the stack memory. + */ + set_stackptrs(0); + mutex_init(); + + /* Establish static device mappings. */ + err_devmap = platform_devmap_init(); + arm_devmap_bootstrap(0, NULL); + vm_max_kernel_address = platform_lastaddr(); + + /* + * Only after the SOC registers block is mapped we can perform device + * tree fixups, as they may attempt to read parameters from hardware. + */ + OF_interpret("perform-fixup", 0); + platform_gpio_init(); + cninit(); + + debugf("initarm: console initialized\n"); + debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); + debugf(" boothowto = 0x%08x\n", boothowto); + debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); + debugf(" lastaddr1: 0x%08x\n", lastaddr); + print_kenv(); + + env = kern_getenv("kernelname"); + if (env != NULL) + strlcpy(kernelname, env, sizeof(kernelname)); + + if (err_devmap != 0) + printf("WARNING: could not fully configure devmap, error=%d\n", + err_devmap); + + platform_late_init(); + + /* + * We must now clean the cache again.... + * Cleaning may be done by reading new data to displace any + * dirty data in the cache. This will have happened in setttb() + * but since we are boot strapping the addresses used for the read + * may have just been remapped and thus the cache could be out + * of sync. A re-clean after the switch will cure this. + * After booting there are no gross relocations of the kernel thus + * this problem will not occur after initarm(). + */ + /* Set stack for exception handlers */ + undefined_init(); + init_proc0(kernelstack); + arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); + enable_interrupts(PSR_A); + pmap_bootstrap(0); + + /* Exclude the kernel (and all the things we allocated which immediately + * follow the kernel) from the VM allocation pool but not from crash + * dumps. virtual_avail is a global variable which tracks the kva we've + * "allocated" while setting up pmaps. + * + * Prepare the list of physical memory available to the vm subsystem. + */ + arm_physmem_exclude_region(abp->abp_physaddr, + pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); + arm_physmem_init_kernel_globals(); + + init_param2(physmem); + /* Init message buffer. */ + msgbufinit(msgbufp, msgbufsize); + kdb_init(); + return ((void *)STACKALIGN(thread0.td_pcb)); + +} + +#endif /* !ARM_NEW_PMAP */ +#endif /* FDT */ Modified: head/sys/arm/arm/mem.c ============================================================================== --- head/sys/arm/arm/mem.c Thu Mar 26 21:10:42 2015 (r280711) +++ head/sys/arm/arm/mem.c Thu Mar 26 21:13:53 2015 (r280712) @@ -113,6 +113,9 @@ memrw(struct cdev *dev, struct uio *uio, return (EINVAL); sx_xlock(&tmppt_lock); pmap_kenter((vm_offset_t)_tmppt, v); +#ifdef ARM_NEW_PMAP + pmap_tlb_flush(kernel_pmap, (vm_offset_t)_tmppt); +#endif o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); Modified: head/sys/arm/arm/minidump_machdep.c ============================================================================== --- head/sys/arm/arm/minidump_machdep.c Thu Mar 26 21:10:42 2015 (r280711) +++ head/sys/arm/arm/minidump_machdep.c Thu Mar 26 21:13:53 2015 (r280712) @@ -61,7 +61,10 @@ CTASSERT(sizeof(struct kerneldumpheader) uint32_t *vm_page_dump; int vm_page_dump_size; +#ifndef ARM_NEW_PMAP + static struct kerneldumpheader kdh; + static off_t dumplo; /* Handle chunked writes. */ @@ -473,8 +476,20 @@ fail: else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); + return (0); } +#else /* ARM_NEW_PMAP */ + +int +minidumpsys(struct dumperinfo *di) +{ + + return (0); +} + +#endif + void dump_add_page(vm_paddr_t pa) { Modified: head/sys/arm/arm/mp_machdep.c ============================================================================== --- head/sys/arm/arm/mp_machdep.c Thu Mar 26 21:10:42 2015 (r280711) +++ head/sys/arm/arm/mp_machdep.c Thu Mar 26 21:13:53 2015 (r280712) @@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$"); #include <machine/cpufunc.h> #include <machine/smp.h> #include <machine/pcb.h> +#include <machine/pmap.h> #include <machine/pte.h> #include <machine/physmem.h> #include <machine/intr.h> @@ -151,10 +152,20 @@ init_secondary(int cpu) uint32_t loop_counter; int start = 0, end = 0; +#ifdef ARM_NEW_PMAP + pmap_set_tex(); + reinit_mmu(pmap_kern_ttb, (1<<6) | (1<< 0), (1<<6) | (1<< 0)); + cpu_setup(""); + + /* Provide stack pointers for other processor modes. */ + set_stackptrs(cpu); + + enable_interrupts(PSR_A); +#else /* ARM_NEW_PMAP */ cpu_setup(NULL); setttb(pmap_pa); cpu_tlb_flushID(); - +#endif /* ARM_NEW_PMAP */ pc = &__pcpu[cpu]; /* @@ -166,10 +177,10 @@ init_secondary(int cpu) pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); - +#ifndef ARM_NEW_PMAP /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); - +#endif /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); @@ -298,6 +309,12 @@ ipi_handler(void *arg) CTR1(KTR_SMP, "%s: IPI_TLB", __func__); cpufuncs.cf_tlb_flushID(); break; +#ifdef ARM_NEW_PMAP + case IPI_LAZYPMAP: + CTR1(KTR_SMP, "%s: IPI_LAZYPMAP", __func__); + pmap_lazyfix_action(); + break; +#endif default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } Added: head/sys/arm/arm/pmap-v6-new.c ============================================================================== --- /dev/null 00:00:00 1970 (empty, because file is newly added) +++ head/sys/arm/arm/pmap-v6-new.c Thu Mar 26 21:13:53 2015 (r280712) @@ -0,0 +1,6723 @@ +/*- + * Copyright (c) 1991 Regents of the University of California. + * Copyright (c) 1994 John S. Dyson + * Copyright (c) 1994 David Greenman + * Copyright (c) 2005-2010 Alan L. Cox <a...@cs.rice.edu> + * Copyright (c) 2014 Svatopluk Kraus <onw...@gmail.com> + * Copyright (c) 2014 Michal Meloun <mel...@miracle.cz> + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department and William Jolitz of UUNET Technologies Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 + */ +/*- + * Copyright (c) 2003 Networks Associates Technology, Inc. + * All rights reserved. + * + * This software was developed for the FreeBSD Project by Jake Burkholder, + * Safeport Network Services, and Network Associates Laboratories, the + * Security Research Division of Network Associates, Inc. under + * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA + * CHATS research program. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/cdefs.h> +__FBSDID("$FreeBSD$"); + +/* + * Manages physical address maps. + * + * Since the information managed by this module is + * also stored by the logical address mapping module, + * this module may throw away valid virtual-to-physical + * mappings at almost any time. However, invalidations + * of virtual-to-physical mappings must be done as + * requested. + * + * In order to cope with hardware architectures which + * make virtual-to-physical map invalidates expensive, + * this module may delay invalidate or reduced protection + * operations until such time as they are actually + * necessary. This module is given full information as + * to which processors are currently using which maps, + * and to when physical maps must be made correct. + */ + +#include "opt_vm.h" +#include "opt_pmap.h" +#include "opt_ddb.h" + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/kernel.h> +#include <sys/ktr.h> +#include <sys/lock.h> +#include <sys/proc.h> +#include <sys/rwlock.h> +#include <sys/malloc.h> +#include <sys/vmmeter.h> +#include <sys/malloc.h> +#include <sys/mman.h> +#include <sys/sf_buf.h> +#include <sys/smp.h> +#include <sys/sched.h> +#include <sys/sysctl.h> +#ifdef SMP +#include <sys/smp.h> +#else +#include <sys/cpuset.h> +#endif + +#ifdef DDB +#include <ddb/ddb.h> +#endif + +#include <machine/physmem.h> +#include <machine/vmparam.h> + +#include <vm/vm.h> +#include <vm/uma.h> +#include <vm/pmap.h> +#include <vm/vm_param.h> +#include <vm/vm_kern.h> +#include <vm/vm_object.h> +#include <vm/vm_map.h> +#include <vm/vm_page.h> +#include <vm/vm_pageout.h> +#include <vm/vm_phys.h> +#include <vm/vm_extern.h> +#include <vm/vm_reserv.h> +#include <sys/lock.h> +#include <sys/mutex.h> + +#include <machine/md_var.h> +#include <machine/pmap_var.h> +#include <machine/cpu.h> +#include <machine/cpu-v6.h> +#include <machine/pcb.h> +#include <machine/sf_buf.h> +#ifdef SMP +#include <machine/smp.h> +#endif + +#ifndef PMAP_SHPGPERPROC +#define PMAP_SHPGPERPROC 200 +#endif + +#ifndef DIAGNOSTIC +#define PMAP_INLINE __inline +#else +#define PMAP_INLINE +#endif + +#ifdef PMAP_DEBUG +static void pmap_zero_page_check(vm_page_t m); +void pmap_debug(int level); +int pmap_pid_dump(int pid); +void pmap_pvdump(vm_paddr_t pa); + +#define PDEBUG(_lev_,_stat_) \ + if (pmap_debug_level >= (_lev_)) \ + ((_stat_)) +#define dprintf printf +int pmap_debug_level = 1; +#else /* PMAP_DEBUG */ +#define PDEBUG(_lev_,_stat_) /* Nothing */ +#define dprintf(x, arg...) +#endif /* PMAP_DEBUG */ + +/* + * Level 2 page tables map definion ('max' is excluded). + */ + +#define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) +#define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) + +#define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) +#define UPT2V_MAX_ADDRESS \ + ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) + +/* + * Promotion to a 1MB (PTE1) page mapping requires that the corresponding + * 4KB (PTE2) page mappings have identical settings for the following fields: + */ +#define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ + PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ + PTE2_ATTR_MASK) + +#define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ + PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ + PTE1_ATTR_MASK) + +#define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ + (((l2_attr) & L2_C) ? L1_S_C : 0) | \ + (((l2_attr) & L2_B) ? L1_S_B : 0) | \ + (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ + (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ + (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ + (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ + (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ + (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ + (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ + (((l2_attr) & PTE2_W) ? PTE1_W : 0)) + +#define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ + (((l1_attr) & L1_S_C) ? L2_C : 0) | \ + (((l1_attr) & L1_S_B) ? L2_B : 0) | \ + (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ + (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ + (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ + (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ + (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ + (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ + (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ + (((l1_attr) & PTE1_W) ? PTE2_W : 0)) + +/* + * PTE2 descriptors creation macros. + */ +#define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, pt_memattr) +#define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, pt_memattr) + +#define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_NORMAL) +#define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_NORMAL) + +#define PV_STATS +#ifdef PV_STATS +#define PV_STAT(x) do { x ; } while (0) +#else +#define PV_STAT(x) do { } while (0) +#endif + +/* + * The boot_pt1 is used temporary in very early boot stage as L1 page table. + * We can init many things with no memory allocation thanks to its static + * allocation and this brings two main advantages: + * (1) other cores can be started very simply, + * (2) various boot loaders can be supported as its arguments can be processed + * in virtual address space and can be moved to safe location before + * first allocation happened. + * Only disadvantage is that boot_pt1 is used only in very early boot stage. + * However, the table is uninitialized and so lays in bss. Therefore kernel + * image size is not influenced. + * + * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and + * CPU suspend/resume game. + */ +extern pt1_entry_t boot_pt1[]; + +vm_paddr_t base_pt1; +pt1_entry_t *kern_pt1; +pt2_entry_t *kern_pt2tab; +pt2_entry_t *PT2MAP; + +static uint32_t ttb_flags; +static vm_memattr_t pt_memattr; +ttb_entry_t pmap_kern_ttb; + +/* XXX use converion function*/ +#define PTE2_ATTR_NORMAL VM_MEMATTR_DEFAULT +#define PTE1_ATTR_NORMAL ATTR_TO_L1(PTE2_ATTR_NORMAL) + +struct pmap kernel_pmap_store; +LIST_HEAD(pmaplist, pmap); +static struct pmaplist allpmaps; +static struct mtx allpmaps_lock; + +vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ +vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ + +static vm_offset_t kernel_vm_end_new; +vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; +vm_offset_t vm_max_kernel_address; +vm_paddr_t kernel_l1pa; + +static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; + +/* + * Data for the pv entry allocation mechanism + */ +static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); +static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; +static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ +static int shpgperproc = PMAP_SHPGPERPROC; + +struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ +int pv_maxchunks; /* How many chunks we have KVA for */ +vm_offset_t pv_vafree; /* freelist stored in the PTE */ + +vm_paddr_t first_managed_pa; +#define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) + +/* + * All those kernel PT submaps that BSD is so fond of + */ +struct sysmaps { + struct mtx lock; + pt2_entry_t *CMAP1; + pt2_entry_t *CMAP2; + pt2_entry_t *CMAP3; + caddr_t CADDR1; + caddr_t CADDR2; + caddr_t CADDR3; +}; +static struct sysmaps sysmaps_pcpu[MAXCPU]; +static pt2_entry_t *CMAP3; +static caddr_t CADDR3; +caddr_t _tmppt = 0; + +struct msgbuf *msgbufp = 0; /* XXX move it to machdep.c */ + +/* + * Crashdump maps. + */ +static caddr_t crashdumpmap; + +static pt2_entry_t *PMAP1 = 0, *PMAP2; +static pt2_entry_t *PADDR1 = 0, *PADDR2; +#ifdef DDB +static pt2_entry_t *PMAP3; +static pt2_entry_t *PADDR3; +static int PMAP3cpu __unused; /* for SMP only */ +#endif +#ifdef SMP +static int PMAP1cpu; +static int PMAP1changedcpu; +SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, + &PMAP1changedcpu, 0, + "Number of times pmap_pte2_quick changed CPU with same PMAP1"); +#endif +static int PMAP1changed; +SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, + &PMAP1changed, 0, + "Number of times pmap_pte2_quick changed PMAP1"); +static int PMAP1unchanged; +SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, + &PMAP1unchanged, 0, + "Number of times pmap_pte2_quick didn't change PMAP1"); +static struct mtx PMAP2mutex; + +static __inline void pt2_wirecount_init(vm_page_t m); +static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, + vm_offset_t va); +void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); + +/* + * Function to set the debug level of the pmap code. + */ +#ifdef PMAP_DEBUG +void +pmap_debug(int level) +{ + + pmap_debug_level = level; + dprintf("pmap_debug: level=%d\n", pmap_debug_level); +} +#endif /* PMAP_DEBUG */ + +/* + * This table must corespond with memory attribute configuration in vm.h. + * First entry is used for normal system mapping. + * + * Device memory is always marked as shared. + * Normal memory is shared only in SMP . + * Not outer shareable bits are not used yet. + * Class 6 cannot be used on ARM11. + */ +#define TEXDEF_TYPE_SHIFT 0 +#define TEXDEF_TYPE_MASK 0x3 +#define TEXDEF_INNER_SHIFT 2 +#define TEXDEF_INNER_MASK 0x3 +#define TEXDEF_OUTER_SHIFT 4 +#define TEXDEF_OUTER_MASK 0x3 +#define TEXDEF_NOS_SHIFT 6 +#define TEXDEF_NOS_MASK 0x1 + +#define TEX(t, i, o, s) \ + ((t) << TEXDEF_TYPE_SHIFT) | \ + ((i) << TEXDEF_INNER_SHIFT) | \ + ((o) << TEXDEF_OUTER_SHIFT | \ + ((s) << TEXDEF_NOS_SHIFT)) + +static uint32_t tex_class[8] = { +/* type inner cache outer cache */ + TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ + TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ + TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ + TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ + TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 4 - NOT USED YET */ + TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ + TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ + TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ +}; +#undef TEX + +/* + * Convert TEX definition entry to TTB flags. + */ +static uint32_t +encode_ttb_flags(int idx) +{ + uint32_t inner, outer, nos, reg; + + inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & + TEXDEF_INNER_MASK; + outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & + TEXDEF_OUTER_MASK; + nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & + TEXDEF_NOS_MASK; + + reg = nos << 5; + reg |= outer << 3; + if (cpuinfo.coherent_walk) + reg |= (inner & 0x1) << 6; + reg |= (inner & 0x2) >> 1; +#ifdef SMP + reg |= 1 << 1; +#endif + return reg; +} + +/* + * Set TEX remapping registers in current CPU. + */ +void +pmap_set_tex(void) +{ + uint32_t prrr, nmrr; + uint32_t type, inner, outer, nos; + int i; + +#ifdef PMAP_PTE_NOCACHE + /* XXX fixme */ + if (cpuinfo.coherent_walk) { + pt_memattr = VM_MEMATTR_WB_WA; + ttb_flags = encode_ttb_flags(0); + } + else { + pt_memattr = VM_MEMATTR_NOCACHE; + ttb_flags = encode_ttb_flags(1); + } +#else + pt_memattr = VM_MEMATTR_WB_WA; + ttb_flags = encode_ttb_flags(0); +#endif + + prrr = 0; + nmrr = 0; + + /* Build remapping register from TEX classes. */ + for (i = 0; i < 8; i++) { + type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & + TEXDEF_TYPE_MASK; + inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & + TEXDEF_INNER_MASK; + outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & + TEXDEF_OUTER_MASK; + nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & + TEXDEF_NOS_MASK; + + prrr |= type << (i * 2); + prrr |= nos << (i + 24); + nmrr |= inner << (i * 2); + nmrr |= outer << (i * 2 + 16); + } + /* Add shareable bits for device memory. */ + prrr |= PRRR_DS0 | PRRR_DS1; + + /* Add shareable bits for normal memory in SMP case. */ +#ifdef SMP + prrr |= PRRR_NS1; +#endif + cp15_prrr_set(prrr); + cp15_nmrr_set(nmrr); + + /* Caches are disabled, so full TLB flush should be enough. */ + tlb_flush_all_local(); +} + +/* + * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, + * KERNBASE is mapped by first L2 page table in L2 page table page. It + * meets same constrain due to PT2MAP being placed just under KERNBASE. + */ +CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); +CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); + +/* + * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. + * For now, anyhow, the following check must be fulfilled. + */ +CTASSERT(PAGE_SIZE == PTE2_SIZE); +/* + * We don't want to mess up MI code with all MMU and PMAP definitions, + * so some things, which depend on other ones, are defined independently. + * Now, it is time to check that we don't screw up something. + */ +CTASSERT(PDRSHIFT == PTE1_SHIFT); +/* + * Check L1 and L2 page table entries definitions consistency. + */ +CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); +CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); +/* + * Check L2 page tables page consistency. + */ +CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); +CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); +/* + * Check PT2TAB consistency. + * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. + * This should be done without remainder. + */ +CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); + +/* + * A PT2MAP magic. + * + * All level 2 page tables (PT2s) are mapped continuously and accordingly + * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can + * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page + * must be used together, but not necessary at once. The first PT2 in a page + * must map things on correctly aligned address and the others must follow + * in right order. + */ +#define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) +#define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) +#define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) + +/* + * Check PT2TAB consistency. + * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. + * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. + * The both should be done without remainder. + */ +CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); +CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); +/* + * The implementation was made general, however, with the assumption + * bellow in mind. In case of another value of NPG_IN_PT2TAB, + * the code should be once more rechecked. + */ +CTASSERT(NPG_IN_PT2TAB == 1); + +/* + * Get offset of PT2 in a page + * associated with given PT1 index. + */ +static __inline u_int +page_pt2off(u_int pt1_idx) +{ + + return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); +} + +/* + * Get physical address of PT2 + * associated with given PT2s page and PT1 index. + */ +static __inline vm_paddr_t +page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) +{ + + return (pgpa + page_pt2off(pt1_idx)); +} + +/* + * Get first entry of PT2 + * associated with given PT2s page and PT1 index. *** DIFF OUTPUT TRUNCATED AT 1000 LINES *** _______________________________________________ svn-src-head@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"