Module Name:    src
Committed By:   matt
Date:           Mon Feb 13 18:20:26 UTC 2012

Modified Files:
        src/sys/arch/mips/include [matt-nb5-mips64]: cpu.h locore.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: bus_dma.c mem.c
            mips_machdep.c pmap.c pmap_segtab.c vm_machdep.c

Log Message:
Add mm_md_direct_mapped_virt (inverse of mm_md_direct_mapped_phys).  Add a
third argument, vsize_t *, which, if not NULL, returns the amount of virtual
space left in that direct mapped segment.
Get rid most of the individual direct_mapped assert and use the above
routines instead.
Improve kernel core dump code.


To generate a diff of this commit:
cvs rdiff -u -r1.90.16.42 -r1.90.16.43 src/sys/arch/mips/include/cpu.h
cvs rdiff -u -r1.78.36.1.2.33 -r1.78.36.1.2.34 \
    src/sys/arch/mips/include/locore.h
cvs rdiff -u -r1.22.16.21 -r1.22.16.22 src/sys/arch/mips/mips/bus_dma.c
cvs rdiff -u -r1.35.38.10 -r1.35.38.11 src/sys/arch/mips/mips/mem.c
cvs rdiff -u -r1.205.4.1.2.1.2.64 -r1.205.4.1.2.1.2.65 \
    src/sys/arch/mips/mips/mips_machdep.c
cvs rdiff -u -r1.179.16.40 -r1.179.16.41 src/sys/arch/mips/mips/pmap.c
cvs rdiff -u -r1.1.2.13 -r1.1.2.14 src/sys/arch/mips/mips/pmap_segtab.c
cvs rdiff -u -r1.121.6.1.2.26 -r1.121.6.1.2.27 \
    src/sys/arch/mips/mips/vm_machdep.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/include/cpu.h
diff -u src/sys/arch/mips/include/cpu.h:1.90.16.42 src/sys/arch/mips/include/cpu.h:1.90.16.43
--- src/sys/arch/mips/include/cpu.h:1.90.16.42	Mon Feb 13 08:13:41 2012
+++ src/sys/arch/mips/include/cpu.h	Mon Feb 13 18:20:25 2012
@@ -637,7 +637,8 @@ void	fpusave_cpu(struct cpu_info *);
 void	dumpsys(void);
 int	savectx(struct pcb *);
 void	cpu_identify(device_t, const char *);
-bool	mm_md_direct_mapped_phys(paddr_t, vaddr_t *);
+bool	mm_md_direct_mapped_phys(paddr_t, vaddr_t *, vsize_t *);
+bool	mm_md_direct_mapped_virt(vaddr_t, paddr_t *, vsize_t *);
 
 /* locore*.S */
 int	badaddr(void *, size_t);

Index: src/sys/arch/mips/include/locore.h
diff -u src/sys/arch/mips/include/locore.h:1.78.36.1.2.33 src/sys/arch/mips/include/locore.h:1.78.36.1.2.34
--- src/sys/arch/mips/include/locore.h:1.78.36.1.2.33	Mon Feb 13 08:13:41 2012
+++ src/sys/arch/mips/include/locore.h	Mon Feb 13 18:20:25 2012
@@ -32,6 +32,8 @@
 #include "opt_cputype.h"
 #endif
 
+#include <sys/kcore.h>
+
 #include <mips/mutex.h>
 #include <mips/cpuregs.h>
 #include <mips/reg.h>
@@ -417,6 +419,19 @@ void	mips_page_physload(vaddr_t, vaddr_t
 	    const struct phys_ram_seg *, size_t,
 	    const struct mips_vmfreelist *, size_t);
 
+paddr_t	kvtophys(vaddr_t);
+
+extern struct phys_ram_seg mem_clusters[];
+extern u_int mem_cluster_cnt;
+
+#ifndef _LP64
+/*
+ * Helper routines for kernel coredumps.
+ */
+bool	mips_kcore_window_vtophys(vaddr_t, paddr_t *);
+vaddr_t	mips_kcore_window_map(paddr_t, vsize_t *);
+#endif
+
 
 /*
  * CPU identification, from PRID register.

Index: src/sys/arch/mips/mips/bus_dma.c
diff -u src/sys/arch/mips/mips/bus_dma.c:1.22.16.21 src/sys/arch/mips/mips/bus_dma.c:1.22.16.22
--- src/sys/arch/mips/mips/bus_dma.c:1.22.16.21	Tue Dec  6 17:37:55 2011
+++ src/sys/arch/mips/mips/bus_dma.c	Mon Feb 13 18:20:25 2012
@@ -1009,18 +1009,22 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma
 		return 0;
 	}
 #else
-	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
-		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
-		&&  (flags & BUS_DMA_COHERENT))
+	if (nsegs == 1) {
+		vsize_t vs;
+		if (!(mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
+		    && segs[0].ds_addr < MIPS_PHYS_MASK
+		    && segs[0].ds_addr + segs[0].ds_len <= MIPS_PHYS_MASK + 1
+		    && (flags & BUS_DMA_COHERENT)) {
 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
-#ifdef ENABLE_MIPS_KSEGX
-		else if (mips_ksegx_start < segs[0].ds_addr
-		    && segs[0].ds_addr < mips_ksegx_start + VM_KSEGX_SIZE)
-			*kvap = (void *)(vaddr_t)(VM_KSEGX_ADDRESS + segs[0].ds_addr);
-#endif
-		else
-			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
-		return (0);
+			return (0);
+		}
+		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
+		     || (flags & BUS_DMA_COHERENT) == 0)
+		    && mm_md_direct_mapped_phys(segs[0].ds_addr, &va, &vs)
+		    && segs[0].ds_len <= vs) {
+			*kvap = (void *)va;
+			return (0);
+		}
 	}
 #endif	/* _LP64 */
 
@@ -1068,17 +1072,9 @@ _bus_dmamem_unmap(bus_dma_tag_t t, void 
 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
 	 * not in KSEG2 or XKSEG).
 	 */
-	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
-		return;
-#ifdef ENABLE_MIPS_KSEGX
-	if (VM_KSEGX_ADDRESS <= (vaddr_t)kva
-	    && (vaddr_t)kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
+	if (mm_md_direct_mapped_virt((vaddr_t)kva, NULL, NULL)
+	    || MIPS_KSEG1_P(kva))
 		return;
-#endif
-#ifdef _LP64
-	if (MIPS_XKPHYS_P((vaddr_t)kva))
-		return;
-#endif
 
 	size = round_page(size);
 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);

Index: src/sys/arch/mips/mips/mem.c
diff -u src/sys/arch/mips/mips/mem.c:1.35.38.10 src/sys/arch/mips/mips/mem.c:1.35.38.11
--- src/sys/arch/mips/mips/mem.c:1.35.38.10	Tue Jan 10 18:36:58 2012
+++ src/sys/arch/mips/mips/mem.c	Mon Feb 13 18:20:25 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: mem.c,v 1.35.38.10 2012/01/10 18:36:58 matt Exp $	*/
+/*	mem.c,v 1.35.38.10 2012/01/10 18:36:58 matt Exp	*/
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -44,7 +44,7 @@
 #include "opt_mips_cache.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: mem.c,v 1.35.38.10 2012/01/10 18:36:58 matt Exp $");
+__KERNEL_RCSID(0, "mem.c,v 1.35.38.10 2012/01/10 18:36:58 matt Exp");
 
 #include <sys/param.h>
 #include <sys/conf.h>
@@ -85,8 +85,10 @@ const struct cdevsw mem_ultrix_cdevsw = 
 int
 mmrw(dev_t dev, struct uio *uio, int flags)
 {
-	vaddr_t v;
-	int c;
+	paddr_t pa;
+	vsize_t vs;
+	vaddr_t va;
+	size_t c;
 	struct iovec *iov;
 	int error = 0;
 
@@ -102,79 +104,70 @@ mmrw(dev_t dev, struct uio *uio, int fla
 		switch (minor(dev)) {
 
 		case DEV_MEM:
-			v = uio->uio_offset;
+			pa = uio->uio_offset;
 			c = iov->iov_len;
 			/*
 			 * XXX Broken; assumes contiguous physical memory.
 			 */
-#ifdef _LP64
-			if (v + c > ctob(physmem))
+			if (!mm_md_direct_mapped_phys(pa, &va, &vs))
+				return EFAULT;
+			if (vs < c)
+				c = vs;
+			if (pa + c > ctob(physmem))
 				return (EFAULT);
-			v = MIPS_PHYS_TO_XKPHYS_CACHED(v);
-#else
-			if (MIPS_KSEG0_P(v + c - 1)) {
-				v = MIPS_PHYS_TO_KSEG0(v);
-#ifdef ENABLE_MIPS_KSEGX
-			} else if (mips_ksegx_start <= v
-			    && v + c <= mips_ksegx_start + VM_KSEGX_SIZE) {
-				v += VM_KSEGX_ADDRESS - mips_ksegx_start;
-#endif
-			} else
-				return (EFAULT);
-#endif
-			error = uiomove((void *)v, c, uio);
+
+			error = uiomove((void *)va, c, uio);
 #if defined(MIPS3_PLUS)
 			if (MIPS_CACHE_VIRTUAL_ALIAS)
-				mips_dcache_wbinv_range(v, c);
+				mips_dcache_wbinv_range(va, c);
 #endif
 			continue;
 
 		case DEV_KMEM:
-			v = uio->uio_offset;
+			va = uio->uio_offset;
 			c = min(iov->iov_len, MAXPHYS);
 #ifdef _LP64
-			if (v < MIPS_XKPHYS_START) {
-				return (EFAULT);
-			} else if (MIPS_XKPHYS_P(v)
-			    && v > MIPS_PHYS_TO_XKPHYS_CACHED(mips_avail_end +
-					mips_round_page(MSGBUFSIZE) - c)) {
+			if (va < MIPS_XKPHYS_START) {
 				return (EFAULT);
-			} else if (MIPS_XKSEG_P(v)
+			} else if (mm_md_direct_mapped_virt(va, &pa, &vs)) {
+				if (c > vs)
+					c = vs;
+				if (pa > mips_avail_end
+				    + mips_round_page(MSGBUFSIZE) - c)) {
+					return (EFAULT);
+			} else if (MIPS_XKSEG_P(va)
 			    && v < MIPS_KSEG0_START
-			    && !uvm_kernacc((void *)v, c,
+			    && !uvm_kernacc((void *)va, c,
 			    uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) {
 				return (EFAULT);
-			} else if (MIPS_KSEG1_P(v) || MIPS_KSEG2_P(v)) {
+			} else if (MIPS_KSEG1_P(va) || MIPS_KSEG2_P(va)) {
 				return (EFAULT);
 			}
 #else
-			if (v < MIPS_KSEG0_START)
+			if (va < MIPS_KSEG0_START)
 				return (EFAULT);
-			if (MIPS_KSEG0_P(v + c - 1)) {
+			if (mm_md_direct_mapped_virt(va, &pa, &vs)) {
+				if (c > vs)
+					c = vs;
 				/*
 				 * If all of memory is in KSEG0, make sure we
 				 * don't go beyond its limit.  (mips_avail_end
 				 * may be beyond the end of KSEG0).
 				 */
-				if (MIPS_KSEG0_TO_PHYS(v) >= mips_avail_end
+				if (pa >= mips_avail_end
 				    + mips_round_page(MSGBUFSIZE) - c)
 					return (EFAULT);
-#ifdef ENABLE_MIPS_KSEGX
-			} else if (VM_KSEGX_ADDRESS <= v
-			    && v + c <= VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
-				/* nothing */
-#endif
-			} else if (v < MIPS_KSEG2_START
-				   || !uvm_kernacc((void *)v, c,
+			} else if (va < MIPS_KSEG2_START
+				   || !uvm_kernacc((void *)va, c,
 					    uio->uio_rw == UIO_READ
 						? B_READ
 						: B_WRITE))
 				return (EFAULT);
 #endif
-			error = uiomove((void *)v, c, uio);
+			error = uiomove((void *)va, c, uio);
 #if defined(MIPS3_PLUS)
 			if (MIPS_CACHE_VIRTUAL_ALIAS)
-				mips_dcache_wbinv_range(v, c);
+				mips_dcache_wbinv_range(va, c);
 #endif
 			continue;
 

Index: src/sys/arch/mips/mips/mips_machdep.c
diff -u src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.64 src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.65
--- src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.64	Mon Feb 13 08:13:42 2012
+++ src/sys/arch/mips/mips/mips_machdep.c	Mon Feb 13 18:20:25 2012
@@ -259,6 +259,8 @@ extern const mips_locore_jumpvec_t mips6
 void std_splsw_test(void);
 #endif
 
+static void mips_kcore_window_unmap(void);
+
 CTASSERT(CPU_ARCH_MIPS64R2 / CPU_ARCH_MIPS64 == CPU_ARCH_MIPS32R2 / CPU_ARCH_MIPS32);
 
 mips_locore_jumpvec_t mips_locore_jumpvec;
@@ -1766,22 +1768,13 @@ SYSCTL_SETUP(sysctl_machdep_setup, "sysc
 }
 
 /*
- * These are imported from platform-specific code.
- * XXX Should be declared in a header file.
- */
-extern phys_ram_seg_t mem_clusters[];
-extern int mem_cluster_cnt;
-
-/*
  * These variables are needed by /sbin/savecore.
  */
-u_int32_t dumpmag = 0x8fca0101;	/* magic number */
+uint32_t dumpmag = 0x8fca0101;	/* magic number */
 int	dumpsize = 0;		/* pages */
-long	dumplo = 0;		/* blocks */
+long	dumplo = -1;		/* blocks */
 
-#if 0
 struct pcb dumppcb;
-#endif
 
 /*
  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
@@ -1789,11 +1782,13 @@ struct pcb dumppcb;
 int
 cpu_dumpsize(void)
 {
-	int size;
+	size_t size = 0;
+
+	size += ALIGN(sizeof(kcore_seg_t));
+	size += ALIGN(sizeof(cpu_kcore_hdr_t));
+	size += ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
 
-	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
-	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
-	if (roundup(size, dbtob(1)) != dbtob(1))
+	if (size > dbtob(1))
 		return (-1);
 
 	return (1);
@@ -1805,11 +1800,16 @@ cpu_dumpsize(void)
 u_long
 cpu_dump_mempagecnt(void)
 {
-	u_long i, n;
+	u_long n;
 
 	n = 0;
-	for (i = 0; i < mem_cluster_cnt; i++)
+	for (u_int i = 0; i < mem_cluster_cnt; i++) {
 		n += atop(mem_clusters[i].size);
+		if (n >= atop(INT32_MAX)) {
+			n = atop(INT32_MAX);
+			break;
+		}
+	}
 	return (n);
 }
 
@@ -1820,11 +1820,14 @@ int
 cpu_dump(void)
 {
 	int (*dump)(dev_t, daddr_t, void *, size_t);
-	char buf[dbtob(1)];
+	uint64_t buf64[dbtob(1)/sizeof(uint64_t)];
+	uint8_t * const buf = (uint8_t *)buf64;
+	uint8_t *bp = buf;
 	kcore_seg_t *segp;
 	cpu_kcore_hdr_t *cpuhdrp;
 	phys_ram_seg_t *memsegp;
 	const struct bdevsw *bdev;
+	const size_t blocksize = dbtob(1);
 	int i;
 
 	bdev = bdevsw_lookup(dumpdev);
@@ -1834,16 +1837,15 @@ cpu_dump(void)
 	dump = bdev->d_dump;
 
 	memset(buf, 0, sizeof buf);
-	segp = (kcore_seg_t *)buf;
-	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
-	memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
-	    ALIGN(sizeof(*cpuhdrp))];
+	segp = (kcore_seg_t *)bp;		bp += ALIGN(sizeof(*segp));
+	cpuhdrp = (cpu_kcore_hdr_t *)bp;	bp += ALIGN(sizeof(*cpuhdrp));
+	memsegp = (phys_ram_seg_t  *)bp;
 
 	/*
 	 * Generate a segment header.
 	 */
 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
-	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
+	segp->c_size = blocksize - ALIGN(sizeof(*segp));
 
 	/*
 	 * Add the machine-dependent header info.
@@ -1879,7 +1881,7 @@ cpu_dump(void)
 		memsegp[i].size = mem_clusters[i].size;
 	}
 
-	return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
+	return dump(dumpdev, dumplo, buf, blocksize);
 }
 
 /*
@@ -1905,7 +1907,7 @@ cpu_dumpconf(void)
 	if (bdev->d_psize == NULL)
 		goto bad;
 	nblks = (*bdev->d_psize)(dumpdev);
-	if (nblks <= ctod(1))
+	if (nblks < 0)
 		goto bad;
 
 	dumpblks = cpu_dumpsize();
@@ -1914,7 +1916,7 @@ cpu_dumpconf(void)
 	dumpblks += ctod(cpu_dump_mempagecnt());
 
 	/* If dump won't fit (incl. room for possible label), punt. */
-	if (dumpblks > (nblks - ctod(1)))
+	if (nblks <= dumpblks)
 		goto bad;
 
 	/* Put dump at end of partition */
@@ -1931,23 +1933,19 @@ cpu_dumpconf(void)
 /*
  * Dump the kernel's image to the swap partition.
  */
-#define	BYTES_PER_DUMP	PAGE_SIZE
+#define	BYTES_PER_DUMP	MAXPHYS
 
 void
 dumpsys(void)
 {
-	u_long totalbytesleft, bytes, i, n, memcl;
-	u_long maddr;
-	int psize;
+	psize_t totalbytesleft;
 	daddr_t blkno;
 	const struct bdevsw *bdev;
 	int (*dump)(dev_t, daddr_t, void *, size_t);
 	int error;
 
-#if 0
 	/* Save registers. */
 	savectx(&dumppcb);
-#endif
 
 	if (dumpdev == NODEV)
 		return;
@@ -1969,9 +1967,9 @@ dumpsys(void)
 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
 	    minor(dumpdev), dumplo);
 
-	psize = (*bdev->d_psize)(dumpdev);
+	int nblks = (*bdev->d_psize)(dumpdev);
 	printf("dump ");
-	if (psize == -1) {
+	if (nblks == -1) {
 		printf("area unavailable\n");
 		return;
 	}
@@ -1986,29 +1984,29 @@ dumpsys(void)
 	dump = bdev->d_dump;
 	error = 0;
 
-	for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
-		maddr = mem_clusters[memcl].start;
-		bytes = mem_clusters[memcl].size;
-
-		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
-			void *maddr_va;
+	int32_t meg = 1024*1024;
+	for (size_t memcl = 0; memcl < mem_cluster_cnt; memcl++) {
+		paddr_t maddr = mem_clusters[memcl].start;
+		psize_t msize = mem_clusters[memcl].size;
+		vsize_t n = 0;
+
+		for (psize_t i = 0;
+		     i < msize;
+		     i += n, totalbytesleft -= n, meg -= n) {
+			vaddr_t maddr_va;
 
 			/* Print out how many MBs we have left to go. */
-			if ((totalbytesleft % (1024*1024)) == 0)
-				printf_nolog("%ld ",
-				    totalbytesleft / (1024 * 1024));
+			if (meg <= 0) {
+				printf_nolog("%u ",
+				    (u_int)(totalbytesleft >> 20));
+				meg += 1024*1024;
+			}
 
 			/* Limit size for next transfer. */
-			n = bytes - i;
-			if (n > BYTES_PER_DUMP)
-				n = BYTES_PER_DUMP;
+			n = MIN(BYTES_PER_DUMP, msize - i);
 
-#ifdef _LP64
-			maddr_va = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(maddr);
-#else
-			maddr_va = (void *)MIPS_PHYS_TO_KSEG0(maddr);
-#endif
-			error = (*dump)(dumpdev, blkno, maddr_va, n);
+			maddr_va = mips_kcore_window_map(maddr, &n);
+			error = (*dump)(dumpdev, blkno, (void*)maddr_va, n);
 			if (error)
 				goto err;
 			maddr += n;
@@ -2017,6 +2015,7 @@ dumpsys(void)
 			/* XXX should look for keystrokes, to cancel. */
 		}
 	}
+	mips_kcore_window_unmap();
 
  err:
 	switch (error) {
@@ -2053,6 +2052,98 @@ dumpsys(void)
 	delay(5000000);		/* 5 seconds */
 }
 
+#ifdef MIPS3_PLUS
+static struct mips_kcore_window_info {
+	struct tlbmask mkwi_tlb;
+	int mkwi_tlb_slot;
+} mips_kcore_window_info = {
+	.mkwi_tlb = {
+		.tlb_hi = 1024*1024*1024,
+		.tlb_mask = MIPS3_PG_SIZE_256M,
+	},
+	.mkwi_tlb_slot = -1,
+};
+#endif
+
+bool
+mips_kcore_window_vtophys(vaddr_t va, paddr_t *pap)
+{
+#ifdef MIPS3_PLUS
+	struct mips_kcore_window_info * const mkwi = &mips_kcore_window_info;
+	const vaddr_t tlb_va = mkwi->mkwi_tlb.tlb_hi & -PAGE_SIZE;
+	psize_t tlb_size = MIPS3_PG_SIZE_MASK_TO_SIZE(mkwi->mkwi_tlb.tlb_mask);
+	if (mips_pg_v(mkwi->mkwi_tlb.tlb_lo0)
+	    && tlb_va <= va && va < tlb_va + 2 * tlb_size) {
+		*pap = va - tlb_va
+		    + mips_tlbpfn_to_paddr(mkwi->mkwi_tlb.tlb_lo0);
+		return true;
+	}
+#endif
+	return false;
+}
+
+vaddr_t
+mips_kcore_window_map(paddr_t pa, vsize_t *vsp)
+{
+	vaddr_t va;
+	vsize_t vs;
+	if (mm_md_direct_mapped_phys(pa, &va, &vs)) {
+#ifndef _LP64
+		/*
+		 * Make sure the length we want to mapped doesn't cross the
+		 * direct-mapped boundary.
+		 */
+		if (*vsp > vs)
+			*vsp = vs;
+#endif
+		return va;
+	}
+#if defined(MIPS3_PLUS) && !defined(_LP64)
+	KASSERT(MIPS_HAS_R4K_MMU);
+	struct mips_kcore_window_info * const mkwi = &mips_kcore_window_info;
+	paddr_t tlb_pa = mips_tlbpfn_to_paddr(mkwi->mkwi_tlb.tlb_lo0);
+	psize_t tlb_size = MIPS3_PG_SIZE_MASK_TO_SIZE(mkwi->mkwi_tlb.tlb_mask);
+	if (!mips_pg_v(mkwi->mkwi_tlb.tlb_lo0)
+	    || pa < tlb_pa
+	    || tlb_pa + 2 * tlb_size <= pa) {
+		tlb_pa = pa & (2 * tlb_size - 1);
+		mkwi->mkwi_tlb.tlb_lo0 = mips_paddr_to_tlbpfn(tlb_pa)
+		    | MIPS3_PG_CACHED | MIPS3_PG_V | MIPS3_PG_G;
+		mkwi->mkwi_tlb.tlb_lo0 = mkwi->mkwi_tlb.tlb_lo1
+		    + mips_paddr_to_tlbpfn(tlb_size);
+
+		if (mkwi->mkwi_tlb_slot < 0) {
+			mkwi->mkwi_tlb_slot = pmap_tlb0_info.ti_wired++;
+			mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
+		}
+		tlb_write_indexed(mkwi->mkwi_tlb_slot, &mkwi->mkwi_tlb);
+	}
+	if (*vsp + pa - tlb_pa > 2 * tlb_size)
+		*vsp = tlb_pa + 2 * tlb_size - pa;
+	return (mkwi->mkwi_tlb.tlb_hi & -PAGE_SIZE) + pa - tlb_pa;
+#else
+	panic("%s: failed to map non-KSEG0 memory", __func__);
+#endif /* MIPS3_PLUS && !_LP64 */
+}
+
+void
+mips_kcore_window_unmap(void)
+{
+#if defined(MIPS3_PLUS) && !defined(_LP64)
+	struct mips_kcore_window_info * const mkwi = &mips_kcore_window_info;
+	if (mkwi->mkwi_tlb_slot >= 0) {
+		struct tlbmask tlb;
+		mkwi->mkwi_tlb.tlb_lo0 = 0;
+		mkwi->mkwi_tlb.tlb_lo1 = 0;
+		tlb.tlb_hi = mkwi->mkwi_tlb_slot << (PAGE_SHIFT | 1);
+		tlb.tlb_lo0 = 0;
+		tlb.tlb_lo1 = 0;
+		tlb.tlb_mask = 0;
+		tlb_write_indexed(mkwi->mkwi_tlb_slot, &mkwi->mkwi_tlb);
+	}
+#endif /* MIPS3_PLUS && !_LP64 */
+}
+
 void
 mips_init_msgbuf(void)
 {
@@ -2456,23 +2547,64 @@ mips_watchpoint_init(void)
 #endif
 
 bool
-mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
+mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap, vsize_t *vsp)
 {
 #ifdef _LP64
-	*vap = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
+	if (vap != NULL)
+		*vap = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
+	if (vsp != NULL)
+		*vsp = MIPS_XKPHYS_TO_PHYS(MIPS_XKPHYS_MASK) - pa + 1;
 	return true;
 #else
 #ifdef ENABLE_MIPS_KSEGX
 	if (mips_ksegx_start <= pa && pa < mips_ksegx_start + VM_KSEGX_SIZE) {
-		*vap = VM_KSEGX_ADDRESS + pa - mips_ksegx_start;
+		if (vap != NULL)
+			*vap = VM_KSEGX_ADDRESS + pa - mips_ksegx_start;
+		if (vsp != NULL)
+			*vsp = mips_ksegx_start + VM_KSEGX_SIZE - pa;
 		return true;
 	}
 #endif
 	if (pa <= MIPS_PHYS_MASK) {
-		*vap = MIPS_PHYS_TO_KSEG0(pa);
+		if (vap != NULL)
+			*vap = MIPS_PHYS_TO_KSEG0(pa);
+		if (vsp != NULL)
+			*vsp = MIPS_PHYS_MASK - pa + 1;
 		return true;
 	}
 	return false;
 #endif
 }
 
+bool
+mm_md_direct_mapped_virt(vaddr_t va, paddr_t *pap, vsize_t *vsp)
+{
+#ifdef _LP64
+	if (MIPS_XKPHYS_P(va)) {
+		const paddr_t pa = MIPS_XKPHYS_TO_PHYS(va);
+		if (pap != NULL)
+			*pap = pa;
+		if (vsp != NULL)
+			*vsp = MIPS_XKPHYS_TO_PHYS(MIPS_XKPHYS_MASK) - pa + 1;
+		return true;
+	}
+#endif
+#ifdef ENABLE_MIPS_KSEGX
+	if (VM_KSEGX_ADDRESS <= va && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
+		const paddr_t pa = mips_ksegx_start + va - VM_KSEGX_ADDRESS;
+		if (pap != NULL)
+			*pap = pa;
+		if (vsp != NULL)
+			*vsp = VM_KSEGX_ADDRESS + VM_KSEGX_SIZE - va;
+		return true;
+	}
+#endif
+	if (MIPS_KSEG0_P(va)) {
+		if (pap != NULL)
+			*pap = MIPS_KSEG0_TO_PHYS(va);
+		if (vsp != NULL)
+			*vsp = MIPS_KSEG1_START - va;
+		return true;
+	}
+	return false;
+}

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.179.16.40 src/sys/arch/mips/mips/pmap.c:1.179.16.41
--- src/sys/arch/mips/mips/pmap.c:1.179.16.40	Sun Feb 12 07:48:37 2012
+++ src/sys/arch/mips/mips/pmap.c	Mon Feb 13 18:20:26 2012
@@ -364,18 +364,9 @@ pmap_map_ephemeral_page(struct vm_page *
 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv = &md->pvh_first;
-
-#ifdef _LP64
-	vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
-#else
 	vaddr_t va;
-	if (pa <= MIPS_PHYS_MASK) {
-		va = MIPS_PHYS_TO_KSEG0(pa);
-#ifdef ENABLE_MIPS_KSEGX
-	} else if (mips_ksegx_start <= pa && pa < mips_ksegx_start + VM_KSEGX_SIZE) {
-		va = VM_KSEGX_ADDRESS + pa - mips_ksegx_start;
-#endif
-	} else {
+
+	if (!mm_md_direct_mapped_phys(pa, &va, NULL)) {
 		KASSERT(pmap_initialized);
 		/*
 		 * Make sure to use a congruent mapping to the last mapped
@@ -389,7 +380,6 @@ pmap_map_ephemeral_page(struct vm_page *
 		*old_pt_entry_p = *kvtopte(va);
 		pmap_kenter_pa(va, pa, prot);
 	}
-#endif /* _LP64 */
 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
 		/*
 		 * If we are forced to use an incompatible alias, flush the
@@ -1342,11 +1332,7 @@ pmap_page_cache(struct vm_page *pg, bool
 		uint32_t pt_entry;
 
 		KASSERT(pmap != NULL);
-		KASSERT(!MIPS_KSEG0_P(va));
-		KASSERT(!MIPS_KSEG1_P(va));
-#ifdef _LP64
-		KASSERT(!MIPS_XKPHYS_P(va));
-#endif
+		KASSERT(!mm_md_direct_mapped_virt(va, NULL, NULL));
 		if (pmap == pmap_kernel()) {
 			/*
 			 * Change entries in kernel pmap.
@@ -1829,22 +1815,8 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa
 		printf("pmap_extract(%p, %#"PRIxVADDR") -> ", pmap, va);
 #endif
 	if (pmap == pmap_kernel()) {
-		if (MIPS_KSEG0_P(va)) {
-			pa = MIPS_KSEG0_TO_PHYS(va);
+		if (mm_md_direct_mapped_virt(va, &pa, NULL))
 			goto done;
-		}
-#ifdef _LP64
-		if (MIPS_XKPHYS_P(va)) {
-			pa = MIPS_XKPHYS_TO_PHYS(va);
-			goto done;
-		}
-#elif defined(ENABLE_MIPS_KSEGX)
-		if (VM_KSEGX_ADDRESS <= va
-		    && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
-			pa = mips_ksegx_start + va - VM_KSEGX_ADDRESS;
-			goto done;
-		}
-#endif
 #ifdef DIAGNOSTIC
 		if (MIPS_KSEG1_P(va))
 			panic("pmap_extract: kseg1 address %#"PRIxVADDR"", va);
@@ -2125,11 +2097,7 @@ pmap_check_pvlist(struct vm_page_md *md)
 	pt_entry_t pv = &md->pvh_first;
 	if (pv->pv_pmap != NULL) {
 		for (; pv != NULL; pv = pv->pv_next) {
-			KASSERT(!MIPS_KSEG0_P(pv->pv_va));
-			KASSERT(!MIPS_KSEG1_P(pv->pv_va));
-#ifdef _LP64
-			KASSERT(!MIPS_XKPHYS_P(pv->pv_va));
-#endif
+			KASSERT(!mm_md_direct_mapped_virt(pv->pv_va, NULL, NULL));
 		}
 		pv = &md->pvh_first;
 	}
@@ -2148,11 +2116,7 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, s
 	int16_t gen;
 
         KASSERT(kpreempt_disabled());
-        KASSERT(!MIPS_KSEG0_P(va));
-        KASSERT(!MIPS_KSEG1_P(va));
-#ifdef _LP64
-        KASSERT(!MIPS_XKPHYS_P(va));
-#endif
+	KASSERT(!mm_md_direct_mapped_virt(va, NULL, NULL));
 
 	apv = NULL;
 	pv = &md->pvh_first;
@@ -2534,20 +2498,8 @@ pmap_pv_page_free(struct pool *pp, void 
 	vaddr_t va = (vaddr_t)v;
 	paddr_t pa;
 
-#ifdef _LP64
-	KASSERT(MIPS_XKPHYS_P(va));
-	pa = MIPS_XKPHYS_TO_PHYS(va);
-#else
-#ifdef ENABLE_MIPS_KSEGX
-	if (VM_KSEGX_ADDRESS <= va && va <= VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) { 
-		pa = mips_ksegx_start + va - VM_KSEGX_ADDRESS;
-	} else
-#endif
-	{
-		KASSERT(MIPS_KSEG0_P(va));
-		pa = MIPS_KSEG0_TO_PHYS(va);
-	}
-#endif
+	bool ok = mm_md_direct_mapped_virt(va, &pa, NULL);
+	KASSERT(ok);
 #ifdef MIPS3_PLUS
 	if (MIPS_CACHE_VIRTUAL_ALIAS)
 		mips_dcache_inv_range(va, PAGE_SIZE);
@@ -2636,19 +2588,10 @@ mips_pmap_map_poolpage(paddr_t pa)
 
 #ifdef _LP64
 	KASSERT(mips_options.mips3_xkphys_cached);
-	va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
-#else
-#ifdef ENABLE_MIPS_KSEGX
-	if (pa >= mips_ksegx_start && pa < mips_ksegx_start + VM_KSEGX_SIZE) {
-		va = VM_KSEGX_ADDRESS + pa - mips_ksegx_start;
-	} else
-#endif
-	if (pa > MIPS_PHYS_MASK)
-		panic("mips_pmap_map_poolpage: "
-		    "pa #%"PRIxPADDR" can not be mapped into KSEG0", pa);
-	else
-		va = MIPS_PHYS_TO_KSEG0(pa);
 #endif
+	if (!mm_md_direct_mapped_phys(pa, &va, NULL))
+		panic("%s: pa #%"PRIxPADDR" can not be direct mapped",
+		    __func__, pa);
 #if !defined(_LP64)
 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
 		/*
@@ -2672,20 +2615,8 @@ paddr_t
 mips_pmap_unmap_poolpage(vaddr_t va)
 {
 	paddr_t pa;
-#if defined(_LP64)
-	KASSERT(MIPS_XKPHYS_P(va));
-	pa = MIPS_XKPHYS_TO_PHYS(va);
-#else
-#ifdef ENABLE_MIPS_KSEGX
-	if (VM_KSEGX_ADDRESS <= va && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
-		pa = mips_ksegx_start + va - VM_KSEGX_ADDRESS;
-	} else
-#endif
-	{
-		KASSERT(MIPS_KSEG0_P(va));
-		pa = MIPS_KSEG0_TO_PHYS(va);
-	}
-#endif
+	bool ok = mm_md_direct_mapped_virt(va, &pa, NULL);
+	KASSERT(ok);
 	struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
 	KASSERT(pg);
 	pmap_clear_mdpage_attributes(VM_PAGE_TO_MD(pg), PG_MD_POOLPAGE);
@@ -2699,9 +2630,3 @@ mips_pmap_unmap_poolpage(vaddr_t va)
 #endif
 	return pa;
 }
-
-
-
-/******************** page table page management ********************/
-
-/* TO BE DONE */

Index: src/sys/arch/mips/mips/pmap_segtab.c
diff -u src/sys/arch/mips/mips/pmap_segtab.c:1.1.2.13 src/sys/arch/mips/mips/pmap_segtab.c:1.1.2.14
--- src/sys/arch/mips/mips/pmap_segtab.c:1.1.2.13	Fri Feb 10 07:14:49 2012
+++ src/sys/arch/mips/mips/pmap_segtab.c	Mon Feb 13 18:20:26 2012
@@ -162,10 +162,8 @@ static inline pt_entry_t * 
 pmap_segmap(struct pmap *pmap, vaddr_t va)
 {
 	union segtab *stp = pmap->pm_segtab;
-	KASSERT(!MIPS_KSEG0_P(va));
-	KASSERT(!MIPS_KSEG1_P(va));
+	KASSERT(!mm_md_direct_mapped_virt(va, NULL, NULL));
 #ifdef _LP64
-	KASSERT(!MIPS_XKPHYS_P(va));
 	stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
 	if (stp == NULL)
 		return NULL;
@@ -238,9 +236,6 @@ pmap_segtab_release(union segtab *stp, u
 		if (MIPS_CACHE_VIRTUAL_ALIAS)
 			mips_dcache_inv_range((vaddr_t)pte, PAGE_SIZE);
 #endif	/* MIPS3_PLUS */
-#ifdef _LP64
-		KASSERT(MIPS_XKPHYS_P(pte));
-#endif
 		paddr_t pa = mips_pmap_unmap_poolpage((vaddr_t)pte);
 		struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
 		uvm_km_pagefree(pg);

Index: src/sys/arch/mips/mips/vm_machdep.c
diff -u src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.26 src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.27
--- src/sys/arch/mips/mips/vm_machdep.c:1.121.6.1.2.26	Thu Feb  9 03:35:59 2012
+++ src/sys/arch/mips/mips/vm_machdep.c	Mon Feb 13 18:20:26 2012
@@ -164,16 +164,7 @@ cpu_lwp_fork(struct lwp *l1, struct lwp 
 	l2->l_md.md_utf = tf;
 	l2->l_md.md_flags = l1->l_md.md_flags & MDP_FPUSED;
 
-	bool direct_mapped_p = MIPS_KSEG0_P(ua2);
-#ifdef ENABLE_MIPS_KSEGX
-	if (!direct_mapped_p)
-		direct_mapped_p = VM_KSEGX_ADDRESS <= ua2
-		    && ua2 < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE;
-#endif
-#ifdef _LP64
-	direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
-#endif
-	if (!direct_mapped_p) {
+	if (!mm_md_direct_mapped_virt(ua2, NULL, NULL)) {
 		pt_entry_t * const pte = kvtopte(ua2);
 		const uint32_t x = (MIPS_HAS_R4K_MMU) ?
 		    (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;
@@ -243,15 +234,12 @@ cpu_uarea_remap(struct lwp *l)
 	 * Grab the starting physical address of the uarea.
 	 */
 	va = (vaddr_t)l->l_addr;
-	if (MIPS_KSEG0_P(va))
-		return;
-#ifdef _LP64
-	if (MIPS_XKPHYS_P(va))
-		return;
-#elif defined(ENABLE_MIPS_KSEGX)
-	if (VM_KSEGX_ADDRESS <= va && va < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
+
+	/*
+	 * If already direct mapped, we're done!
+	 */
+	if (mm_md_direct_mapped_phys(va, NULL, NULL))
 		return;
-#endif
 
 	if (!pmap_extract(pmap_kernel(), va, &pa))
 		panic("%s: pmap_extract(%#"PRIxVADDR") failed", __func__, va);
@@ -347,18 +335,8 @@ cpu_swapin(struct lwp *l)
 	int i, x;
 	vaddr_t kva = (vaddr_t) lwp_getpcb(l);
 
-#ifdef _LP64
-	if (MIPS_XKPHYS_P(kva))
+	if (mm_md_direct_mapped_virt(kva, NULL, NULL))
 		return;
-#else
-	if (MIPS_KSEG0_P(kva))
-		return;
-	
-#ifdef ENABLE_MIPS_KSEGX
-	if (VM_KSEGX_ADDRESS <= kva && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE)
-		return;
-#endif
-#endif
 
 	/*
 	 * Cache the PTEs for the user area in the machine dependent
@@ -510,21 +488,23 @@ vunmapbuf(struct buf *bp, vsize_t len)
 paddr_t
 kvtophys(vaddr_t kva)
 {
-	pt_entry_t *pte;
 	paddr_t phys;
 
-	if (kva >= VM_MIN_KERNEL_ADDRESS) {
-		if (kva >= VM_MAX_KERNEL_ADDRESS)
-			goto overrun;
-
-#ifdef ENABLE_MIPS_KSEGX
-		if (VM_KSEGX_ADDRESS <= kva
-		    && kva < VM_KSEGX_ADDRESS + VM_KSEGX_SIZE) {
-			return mips_ksegx_start + kva - VM_KSEGX_ADDRESS;
-		}
-#endif
+	/*
+	 * When we dumping memory in a crash dump, we try to use a large
+	 * TLB entry to reduce the TLB trashing.
+	 */
+	if (__predict_false(mips_kcore_window_vtophys(kva, &phys)))
+		return phys;
 
-		pte = kvtopte(kva);
+	/*
+	 * If the KVA is direct mapped, we're done!
+	 */
+	if (mm_md_direct_mapped_virt(kva, &phys, NULL))
+		return phys;
+
+	if (VM_MIN_KERNEL_ADDRESS <= kva && kva < VM_MAX_KERNEL_ADDRESS) {
+		pt_entry_t *pte = kvtopte(kva);
 		if ((size_t) (pte - Sysmap) >= Sysmapsize)  {
 			printf("oops: Sysmap overrun, max %d index %zd\n",
 			       Sysmapsize, pte - Sysmap);
@@ -536,20 +516,7 @@ kvtophys(vaddr_t kva)
 		phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
 		return phys;
 	}
-	if (MIPS_KSEG1_P(kva))
-		return MIPS_KSEG1_TO_PHYS(kva);
 
-	if (MIPS_KSEG0_P(kva))
-		return MIPS_KSEG0_TO_PHYS(kva);
-#ifdef _LP64
-	if (MIPS_XKPHYS_P(kva))
-		return MIPS_XKPHYS_TO_PHYS(kva);
-#endif
-overrun:
-	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
-#ifdef DDB
-	Debugger();
-	return 0;	/* XXX */
-#endif
-	panic("kvtophys");
+	panic("%s: Virtual address %#"PRIxVADDR": cannot map to physical\n",
+	    __func__, kva);
 }

Reply via email to