Module Name:    src
Committed By:   cherry
Date:           Fri Dec 23 07:15:28 UTC 2016

Modified Files:
        src/sys/arch/acorn26/acorn26: pmap.c
        src/sys/arch/alpha/alpha: machdep.c pmap.c
        src/sys/arch/amd64/amd64: machdep.c
        src/sys/arch/arm/arm32: bus_dma.c pmap.c
        src/sys/arch/i386/i386: machdep.c
        src/sys/arch/ia64/ia64: machdep.c pmap.c
        src/sys/arch/m68k/m68k: pmap_motorola.c
        src/sys/arch/mips/include: pmap.h
        src/sys/arch/mips/mips: mips_machdep.c pmap_machdep.c
        src/sys/arch/powerpc/ibm4xx: pmap.c
        src/sys/arch/powerpc/isa: isadma_machdep.c
        src/sys/arch/powerpc/oea: pmap.c
        src/sys/arch/powerpc/powerpc: bus_dma.c
        src/sys/arch/sh3/sh3: pmap.c vm_machdep.c
        src/sys/arch/vax/vax: ka650.c pmap.c
        src/sys/arch/x68k/x68k: machdep.c
        src/sys/uvm: files.uvm uvm_init.c uvm_page.c uvm_page.h uvm_pglist.c
            uvm_physseg.h
        src/sys/uvm/pmap: pmap.c

Log Message:
"Make NetBSD great again!"

Introduce uvm_hotplug(9) to the kernel.

Many thanks, in no particular order to:

TNF, for funding the project.

Chuck Silvers - for multiple API reviews and feedback.
Nick Hudson - for testing on multiple architectures and bugfix patches.
Everyone who helped with boot testing.

KeK (http://www.kek.org.in) for hosting the primary developers.


To generate a diff of this commit:
cvs rdiff -u -r1.36 -r1.37 src/sys/arch/acorn26/acorn26/pmap.c
cvs rdiff -u -r1.348 -r1.349 src/sys/arch/alpha/alpha/machdep.c
cvs rdiff -u -r1.260 -r1.261 src/sys/arch/alpha/alpha/pmap.c
cvs rdiff -u -r1.242 -r1.243 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.96 -r1.97 src/sys/arch/arm/arm32/bus_dma.c
cvs rdiff -u -r1.341 -r1.342 src/sys/arch/arm/arm32/pmap.c
cvs rdiff -u -r1.772 -r1.773 src/sys/arch/i386/i386/machdep.c
cvs rdiff -u -r1.36 -r1.37 src/sys/arch/ia64/ia64/machdep.c
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/ia64/ia64/pmap.c
cvs rdiff -u -r1.68 -r1.69 src/sys/arch/m68k/m68k/pmap_motorola.c
cvs rdiff -u -r1.68 -r1.69 src/sys/arch/mips/include/pmap.h
cvs rdiff -u -r1.275 -r1.276 src/sys/arch/mips/mips/mips_machdep.c
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/mips/mips/pmap_machdep.c
cvs rdiff -u -r1.73 -r1.74 src/sys/arch/powerpc/ibm4xx/pmap.c
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/powerpc/isa/isadma_machdep.c
cvs rdiff -u -r1.93 -r1.94 src/sys/arch/powerpc/oea/pmap.c
cvs rdiff -u -r1.46 -r1.47 src/sys/arch/powerpc/powerpc/bus_dma.c
cvs rdiff -u -r1.78 -r1.79 src/sys/arch/sh3/sh3/pmap.c
cvs rdiff -u -r1.76 -r1.77 src/sys/arch/sh3/sh3/vm_machdep.c
cvs rdiff -u -r1.36 -r1.37 src/sys/arch/vax/vax/ka650.c
cvs rdiff -u -r1.183 -r1.184 src/sys/arch/vax/vax/pmap.c
cvs rdiff -u -r1.194 -r1.195 src/sys/arch/x68k/x68k/machdep.c
cvs rdiff -u -r1.27 -r1.28 src/sys/uvm/files.uvm
cvs rdiff -u -r1.47 -r1.48 src/sys/uvm/uvm_init.c
cvs rdiff -u -r1.189 -r1.190 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.80 -r1.81 src/sys/uvm/uvm_page.h
cvs rdiff -u -r1.67 -r1.68 src/sys/uvm/uvm_pglist.c
cvs rdiff -u -r1.3 -r1.4 src/sys/uvm/uvm_physseg.h
cvs rdiff -u -r1.25 -r1.26 src/sys/uvm/pmap/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/acorn26/acorn26/pmap.c
diff -u src/sys/arch/acorn26/acorn26/pmap.c:1.36 src/sys/arch/acorn26/acorn26/pmap.c:1.37
--- src/sys/arch/acorn26/acorn26/pmap.c:1.36	Fri May 11 15:39:17 2012
+++ src/sys/arch/acorn26/acorn26/pmap.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.36 2012/05/11 15:39:17 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.37 2016/12/23 07:15:27 cherry Exp $ */
 /*-
  * Copyright (c) 1997, 1998, 2000 Ben Harris
  * All rights reserved.
@@ -102,7 +102,7 @@
 
 #include <sys/param.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.36 2012/05/11 15:39:17 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.37 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/kernel.h> /* for cold */
 #include <sys/kmem.h>
@@ -293,19 +293,26 @@ pmap_bootstrap(int npages, paddr_t zp_ph
 vaddr_t
 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
 {
-	int i;
 	vaddr_t addr;
+	uvm_physseg_t bank;
+
 	UVMHIST_FUNC("pmap_steal_memory");
 
 	UVMHIST_CALLED(pmaphist);
 	addr = 0;
 	size = round_page(size);
-	for (i = 0; i < vm_nphysseg; i++) {
-		if (VM_PHYSMEM_PTR(i)->avail_start < VM_PHYSMEM_PTR(i)->avail_end) {
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		if (uvm_physseg_get_avail_start(bank) < uvm_physseg_get_avail_end(bank)) {
+			paddr_t avail_start = uvm_physseg_get_avail_start(bank);
+
 			addr = (vaddr_t)
 			    ((char*)MEMC_PHYS_BASE +
-				ptoa(VM_PHYSMEM_PTR(i)->avail_start));
-			VM_PHYSMEM_PTR(i)->avail_start++;
+				ptoa(avail_start));
+			avail_start++;
+			uvm_physseg_set_avail_start(avail_start);
+
 			break;
 		}
 	}

Index: src/sys/arch/alpha/alpha/machdep.c
diff -u src/sys/arch/alpha/alpha/machdep.c:1.348 src/sys/arch/alpha/alpha/machdep.c:1.349
--- src/sys/arch/alpha/alpha/machdep.c:1.348	Thu Dec 22 14:47:54 2016
+++ src/sys/arch/alpha/alpha/machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.c,v 1.348 2016/12/22 14:47:54 cherry Exp $ */
+/* $NetBSD: machdep.c,v 1.349 2016/12/23 07:15:27 cherry Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
@@ -68,7 +68,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.348 2016/12/22 14:47:54 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.349 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -227,7 +227,6 @@ alpha_init(u_long pfn, u_long ptb, u_lon
 	struct mddt *mddtp;
 	struct mddt_cluster *memc;
 	int i, mddtweird;
-	struct vm_physseg *vps;
 	struct pcb *pcb0;
 	vaddr_t kernstart, kernend, v;
 	paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
@@ -611,23 +610,24 @@ nobootinfo:
 	 * Initialize error message buffer (at end of core).
 	 */
 	{
+		paddr_t end;
 		vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
 		vsize_t reqsz = sz;
+		uvm_physseg_t bank;
 
-		vps = VM_PHYSMEM_PTR(vm_nphysseg - 1);
+		bank = uvm_physseg_get_last();
 
 		/* shrink so that it'll fit in the last segment */
-		if ((vps->avail_end - vps->avail_start) < atop(sz))
-			sz = ptoa(vps->avail_end - vps->avail_start);
+		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
+			sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
 
-		vps->end -= atop(sz);
-		vps->avail_end -= atop(sz);
-		msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end));
-		initmsgbuf(msgbufaddr, sz);
+		end = uvm_physseg_get_end(bank);
+		end -= atop(sz);
+
+		uvm_physseg_unplug(end, atop(sz));
+		msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end));
 
-		/* Remove the last segment if it now has no pages. */
-		if (vps->start == vps->end)
-			vm_nphysseg--;
+		initmsgbuf(msgbufaddr, sz);
 
 		/* warn if the message buffer had to be shrunk */
 		if (sz != reqsz)

Index: src/sys/arch/alpha/alpha/pmap.c
diff -u src/sys/arch/alpha/alpha/pmap.c:1.260 src/sys/arch/alpha/alpha/pmap.c:1.261
--- src/sys/arch/alpha/alpha/pmap.c:1.260	Thu Nov  5 06:26:15 2015
+++ src/sys/arch/alpha/alpha/pmap.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.260 2015/11/05 06:26:15 pgoyette Exp $ */
+/* $NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -140,7 +140,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.260 2015/11/05 06:26:15 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.261 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -784,8 +784,8 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
 	 * the fact that BSEARCH sorts the vm_physmem[] array
 	 * for us.
 	 */
-	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+	avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
+	avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));
 	virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE;
 
 #if 0
@@ -1007,9 +1007,11 @@ pmap_virtual_space(vaddr_t *vstartp, vad
 vaddr_t
 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
 {
-	int bank, npgs, x;
+	int npgs;
 	vaddr_t va;
-	paddr_t pa;
+	paddr_t pa; 
+
+	uvm_physseg_t bank;
 
 	size = round_page(size);
 	npgs = atop(size);
@@ -1018,50 +1020,36 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 	printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
 #endif
 
-	for (bank = 0; bank < vm_nphysseg; bank++) {
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
 		if (uvm.page_init_done == true)
 			panic("pmap_steal_memory: called _after_ bootstrap");
 
 #if 0
-		printf("     bank %d: avail_start 0x%lx, start 0x%lx, "
-		    "avail_end 0x%lx\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
+		printf("     bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", "
+		    "avail_end 0x%"PRIxPADDR"\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
 		    VM_PHYSMEM_PTR(bank)->start, VM_PHYSMEM_PTR(bank)->avail_end);
 #endif
 
-		if (VM_PHYSMEM_PTR(bank)->avail_start != VM_PHYSMEM_PTR(bank)->start ||
-		    VM_PHYSMEM_PTR(bank)->avail_start >= VM_PHYSMEM_PTR(bank)->avail_end)
+		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) ||
+		    uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank))
 			continue;
 
 #if 0
-		printf("             avail_end - avail_start = 0x%lx\n",
+		printf("             avail_end - avail_start = 0x%"PRIxPADDR"\n",
 		    VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start);
 #endif
 
-		if ((VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)
+		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)
 		    < npgs)
 			continue;
 
 		/*
 		 * There are enough pages here; steal them!
 		 */
-		pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
-		VM_PHYSMEM_PTR(bank)->avail_start += npgs;
-		VM_PHYSMEM_PTR(bank)->start += npgs;
-
-		/*
-		 * Have we used up this segment?
-		 */
-		if (VM_PHYSMEM_PTR(bank)->avail_start == VM_PHYSMEM_PTR(bank)->end) {
-			if (vm_nphysseg == 1)
-				panic("pmap_steal_memory: out of memory!");
-
-			/* Remove this segment from the list. */
-			vm_nphysseg--;
-			for (x = bank; x < vm_nphysseg; x++) {
-				/* structure copy */
-				VM_PHYSMEM_PTR_SWAP(x, x + 1);
-			}
-		}
+		pa = ptoa(uvm_physseg_get_start(bank));
+		uvm_physseg_unplug(atop(pa), npgs);
 
 		va = ALPHA_PHYS_TO_K0SEG(pa);
 		memset((void *)va, 0, size);

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.242 src/sys/arch/amd64/amd64/machdep.c:1.243
--- src/sys/arch/amd64/amd64/machdep.c:1.242	Thu Dec 22 16:29:05 2016
+++ src/sys/arch/amd64/amd64/machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.242 2016/12/22 16:29:05 bouyer Exp $	*/
+/*	$NetBSD: machdep.c,v 1.243 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -111,7 +111,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.242 2016/12/22 16:29:05 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.243 2016/12/23 07:15:27 cherry Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -789,6 +789,7 @@ sparse_dump_mark(void)
 	paddr_t p, pstart, pend;
 	struct vm_page *pg;
 	int i;
+	uvm_physseg_t upm;
 
 	/*
 	 * Mark all memory pages, then unmark pages that are uninteresting.
@@ -805,10 +806,25 @@ sparse_dump_mark(void)
 			setbit(sparse_dump_physmap, p);
 		}
 	}
-	for (i = 0; i < vm_nphysseg; i++) {
-		struct vm_physseg *seg = VM_PHYSMEM_PTR(i);
+        for (upm = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(upm);
+	     upm = uvm_physseg_get_next(upm)) {
+		paddr_t pfn;
 
-		for (pg = seg->pgs; pg < seg->lastpg; pg++) {
+		if (uvm_physseg_valid_p(upm) == false)
+			break;
+
+		const paddr_t startpfn = uvm_physseg_get_start(upm);
+		const paddr_t endpfn = uvm_physseg_get_end(upm);
+
+		KASSERT(startpfn != -1 && endpfn != -1);
+
+		/*
+		 * We assume that seg->start to seg->end are
+		 * uvm_page_physload()ed
+		 */
+		for (pfn = startpfn; pfn <= endpfn; pfn++) {
+			pg = PHYS_TO_VM_PAGE(ptoa(pfn));
 			if (pg->uanon || (pg->pqflags & PQ_FREE) ||
 			    (pg->uobject && pg->uobject->pgops)) {
 				p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE;
@@ -1452,57 +1468,30 @@ extern vector *IDTVEC(exceptions)[];
 static void
 init_x86_64_msgbuf(void)
 {
-	/* Message buffer is located at end of core. */
-	struct vm_physseg *vps;
-	psize_t sz = round_page(MSGBUFSIZE);
-	psize_t reqsz = sz;
-	int x;
-		
- search_again:
-	vps = NULL;
-
-	for (x = 0; x < vm_nphysseg; x++) {
-		vps = VM_PHYSMEM_PTR(x);
-		if (ctob(vps->avail_end) == avail_end)
-			break;
-	}
-	if (x == vm_nphysseg)
-		panic("init_x86_64: can't find end of memory");
+        /* Message buffer is located at end of core. */
+	psize_t reqsz = round_page(MSGBUFSIZE);
+	psize_t sz = 0;
 
-	/* Shrink so it'll fit in the last segment. */
-	if ((vps->avail_end - vps->avail_start) < atop(sz))
-		sz = ctob(vps->avail_end - vps->avail_start);
-
-	vps->avail_end -= atop(sz);
-	vps->end -= atop(sz);
-            msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
-            msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
-	/* Remove the last segment if it now has no pages. */
-	if (vps->start == vps->end) {
-		for (vm_nphysseg--; x < vm_nphysseg; x++)
-			VM_PHYSMEM_PTR_SWAP(x, x + 1);
-	}
-
-	/* Now find where the new avail_end is. */
-	for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
-		if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
-			avail_end = VM_PHYSMEM_PTR(x)->avail_end;
-	avail_end = ctob(avail_end);
+	for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+		paddr_t stolenpa;
 
-	if (sz == reqsz)
-		return;
+		if (!uvm_page_physget(&stolenpa))
+			break;
 
-	reqsz -= sz;
-	if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
-		/* No more segments available, bail out. */
-		printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
-		    (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
-		return;
+		if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+			+ PAGE_SIZE)) {
+			/* contiguous: append it to current buf alloc */
+			msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+		} else  {
+			/* non-contiguous: start a new msgbuf seg */
+			msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+			msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+		}
 	}
 
-	sz = reqsz;
-	goto search_again;
+	if (sz != reqsz)
+		printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+		    __func__, sz, reqsz);
 }
 
 static void

Index: src/sys/arch/arm/arm32/bus_dma.c
diff -u src/sys/arch/arm/arm32/bus_dma.c:1.96 src/sys/arch/arm/arm32/bus_dma.c:1.97
--- src/sys/arch/arm/arm32/bus_dma.c:1.96	Sat Nov  5 14:26:23 2016
+++ src/sys/arch/arm/arm32/bus_dma.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: bus_dma.c,v 1.96 2016/11/05 14:26:23 skrll Exp $	*/
+/*	$NetBSD: bus_dma.c,v 1.97 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
 #include "opt_arm_bus_space.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.96 2016/11/05 14:26:23 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.97 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1365,11 +1365,11 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma
 		 * The page can only be direct mapped if was allocated out
 		 * of the arm poolpage vm freelist.
 		 */
-		int lcv = vm_physseg_find(atop(pa), NULL);
-		KASSERT(lcv != -1);
+		uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
+		KASSERT(uvm_physseg_valid_p(upm));
 		if (direct_mapable) {
 			direct_mapable =
-			    (arm_poolpage_vmfreelist == VM_PHYSMEM_PTR(lcv)->free_list);
+			    (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
 		}
 #endif
 

Index: src/sys/arch/arm/arm32/pmap.c
diff -u src/sys/arch/arm/arm32/pmap.c:1.341 src/sys/arch/arm/arm32/pmap.c:1.342
--- src/sys/arch/arm/arm32/pmap.c:1.341	Sat Dec 17 14:36:29 2016
+++ src/sys/arch/arm/arm32/pmap.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.341 2016/12/17 14:36:29 flxd Exp $	*/
+/*	$NetBSD: pmap.c,v 1.342 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -217,7 +217,7 @@
 
 #include <arm/locore.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.341 2016/12/17 14:36:29 flxd Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.342 2016/12/23 07:15:27 cherry Exp $");
 
 //#define PMAP_DEBUG
 #ifdef PMAP_DEBUG
@@ -6453,8 +6453,8 @@ pmap_init(void)
 	 * One could argue whether this should be the entire memory or just
 	 * the memory that is useable in a user process.
 	 */
-	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+	avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
+	avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));
 
 	/*
 	 * Now we need to free enough pv_entry structures to allow us to get

Index: src/sys/arch/i386/i386/machdep.c
diff -u src/sys/arch/i386/i386/machdep.c:1.772 src/sys/arch/i386/i386/machdep.c:1.773
--- src/sys/arch/i386/i386/machdep.c:1.772	Thu Dec 22 16:29:05 2016
+++ src/sys/arch/i386/i386/machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.772 2016/12/22 16:29:05 bouyer Exp $	*/
+/*	$NetBSD: machdep.c,v 1.773 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.772 2016/12/22 16:29:05 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.773 2016/12/23 07:15:27 cherry Exp $");
 
 #include "opt_beep.h"
 #include "opt_compat_ibcs2.h"
@@ -1037,57 +1037,30 @@ initgdt(union descriptor *tgdt)
 static void
 init386_msgbuf(void)
 {
-	/* Message buffer is located at end of core. */
-	struct vm_physseg *vps;
-	psize_t sz = round_page(MSGBUFSIZE);
-	psize_t reqsz = sz;
-	unsigned int x;
-
- search_again:
-	vps = NULL;
-	for (x = 0; x < vm_nphysseg; ++x) {
-		vps = VM_PHYSMEM_PTR(x);
-		if (ctob(vps->avail_end) == avail_end) {
-			break;
-		}
-	}
-	if (x == vm_nphysseg)
-		panic("init386: can't find end of memory");
+        /* Message buffer is located at end of core. */
+	psize_t reqsz = round_page(MSGBUFSIZE);
+	psize_t sz = 0;
 
-	/* Shrink so it'll fit in the last segment. */
-	if (vps->avail_end - vps->avail_start < atop(sz))
-		sz = ctob(vps->avail_end - vps->avail_start);
-
-	vps->avail_end -= atop(sz);
-	vps->end -= atop(sz);
-	msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
-	msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
-	/* Remove the last segment if it now has no pages. */
-	if (vps->start == vps->end) {
-		for (--vm_nphysseg; x < vm_nphysseg; x++)
-			VM_PHYSMEM_PTR_SWAP(x, x + 1);
-	}
-
-	/* Now find where the new avail_end is. */
-	for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
-		if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
-			avail_end = VM_PHYSMEM_PTR(x)->avail_end;
-	avail_end = ctob(avail_end);
+	for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+		paddr_t stolenpa;
 
-	if (sz == reqsz)
-		return;
+		if (!uvm_page_physget(&stolenpa))
+			break;
 
-	reqsz -= sz;
-	if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
-		/* No more segments available, bail out. */
-		printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
-		    (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
-		return;
+		if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+			+ PAGE_SIZE)) {
+			/* contiguous: append it to current buf alloc */
+			msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+		} else  {
+			/* non-contiguous: start a new msgbuf seg */
+			msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+			msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+		}
 	}
 
-	sz = reqsz;
-	goto search_again;
+	if (sz != reqsz)
+		printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+		    __func__, sz, reqsz);
 }
 
 #ifndef XEN

Index: src/sys/arch/ia64/ia64/machdep.c
diff -u src/sys/arch/ia64/ia64/machdep.c:1.36 src/sys/arch/ia64/ia64/machdep.c:1.37
--- src/sys/arch/ia64/ia64/machdep.c:1.36	Thu Dec 22 14:47:58 2016
+++ src/sys/arch/ia64/ia64/machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.36 2016/12/22 14:47:58 cherry Exp $	*/
+/*	$NetBSD: machdep.c,v 1.37 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2003,2004 Marcel Moolenaar
@@ -175,18 +175,19 @@ cpu_startup(void)
 	 * Display any holes after the first chunk of extended memory.
 	 */
 	if (bootverbose) {
-		int lcv, sizetmp;
+		int sizetmp, vm_nphysseg;
+		uvm_physseg_t upm;
 
 		printf("Physical memory chunk(s):\n");
-		for (lcv = 0;
-		    lcv < vm_nphysseg || VM_PHYSMEM_PTR(lcv)->avail_end != 0;
-		    lcv++) {
-			sizetmp = VM_PHYSMEM_PTR(lcv)->avail_end -
-			    VM_PHYSMEM_PTR(lcv)->avail_start;
+		for (vm_nphysseg = 0, upm = uvm_physseg_get_first();
+		     uvm_physseg_valid_p(upm);
+		     vm_nphysseg++, upm = uvm_physseg_get_next(upm)) {
+			sizetmp = uvm_physseg_get_avail_end(upm) -
+			    uvm_physseg_get_avail_start(upm);
 
 			printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n",
-			    ptoa(VM_PHYSMEM_PTR(lcv)->avail_start),
-				ptoa(VM_PHYSMEM_PTR(lcv)->avail_end) - 1,
+			    ptoa(uvm_physseg_get_avail_start(upm)),
+			    ptoa(uvm_physseg_get_avail_end(upm)) - 1,
 				    ptoa(sizetmp), sizetmp);
 		}
 		printf("Total number of segments: vm_nphysseg = %d \n",

Index: src/sys/arch/ia64/ia64/pmap.c
diff -u src/sys/arch/ia64/ia64/pmap.c:1.32 src/sys/arch/ia64/ia64/pmap.c:1.33
--- src/sys/arch/ia64/ia64/pmap.c:1.32	Mon Mar 10 13:47:45 2014
+++ src/sys/arch/ia64/ia64/pmap.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.32 2014/03/10 13:47:45 martin Exp $ */
+/* $NetBSD: pmap.c,v 1.33 2016/12/23 07:15:27 cherry Exp $ */
 
 
 /*-
@@ -85,7 +85,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.32 2014/03/10 13:47:45 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.33 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -94,6 +94,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.3
 #include <sys/lock.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <machine/pal.h>
 #include <machine/atomic.h>
@@ -316,47 +317,33 @@ pmap_steal_vhpt_memory(vsize_t);
 vaddr_t
 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
 {
-	int lcv, npgs, x;
+	int npgs;
+	uvm_physseg_t upm;
 	vaddr_t va;
 	paddr_t pa;
 
 	size = round_page(size);
 	npgs = atop(size);
 
-	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+	for (upm = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(upm);
+	     upm = uvm_physseg_get_next(upm)) {
 		if (uvm.page_init_done == true)
 			panic("pmap_steal_memory: called _after_ bootstrap");
 
-		if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start ||
-		    VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+		if (uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm) ||
+		    uvm_physseg_get_avail_start(upm) >= uvm_physseg_get_avail_end(upm))
 			continue;
 
-		if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
+		if ((uvm_physseg_get_avail_end(upm) - uvm_physseg_get_avail_start(upm))
 		    < npgs)
 			continue;
 
 		/*
 		 * There are enough pages here; steal them!
 		 */
-		pa = ptoa(VM_PHYSMEM_PTR(lcv)->avail_start);
-		VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
-		VM_PHYSMEM_PTR(lcv)->start += npgs;
-
-		/*
-		 * Have we used up this segment?
-		 */
-		if (VM_PHYSMEM_PTR(lcv)->avail_start ==
-		    VM_PHYSMEM_PTR(lcv)->end) {
-			if (vm_nphysseg == 1)
-				panic("pmap_steal_memory: out of memory!");
-
-			/* Remove this segment from the list. */
-			vm_nphysseg--;
-			for (x = lcv; x < vm_nphysseg; x++) {
-				/* structure copy */
-				VM_PHYSMEM_PTR_SWAP(x, x + 1);
-			}
-		}
+		pa = ptoa(uvm_physseg_get_start(bank));
+		uvm_physseg_unplug(atop(pa), npgs);
 
 		va = IA64_PHYS_TO_RR7(pa);
 		memset((void *)va, 0, size);
@@ -380,31 +367,34 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 static vaddr_t
 pmap_steal_vhpt_memory(vsize_t size)
 {
-	int lcv, npgs, x;
+	int npgs;
+	uvm_physseg_t upm;
 	vaddr_t va;
-	paddr_t pa;
+	paddr_t tmppa, pa = 0;
 	paddr_t vhpt_start = 0, start1, start2, end1, end2;
 
 	size = round_page(size);
 	npgs = atop(size);
 
-	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+	for (upm = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(upm);
+	     upm = uvm_physseg_get_next(upm)) {
 		if (uvm.page_init_done == true)
 			panic("pmap_vhpt_steal_memory: called _after_ bootstrap");
 
-		if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start || /* XXX: ??? */
-		    VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+		if (uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm) || /* XXX: ??? */
+		    uvm_physseg_get_avail_start(upm) >= uvm_physseg_get_avail_end(upm))
 			continue;
 
 		/* Break off a VHPT sized, aligned chunk off this segment. */
 
-		start1 = VM_PHYSMEM_PTR(lcv)->avail_start;
+		start1 = uvm_physseg_get_avail_start(upm);
 
 		/* Align requested start address on requested size boundary */
 		end1 = vhpt_start = roundup(start1, npgs);
 
 		start2 = vhpt_start + npgs;
-		end2 = VM_PHYSMEM_PTR(lcv)->avail_end;
+		end2 = uvm_physseg_get_avail_end(upm);
 
 		/* Case 1: Doesn't fit. skip this segment */
 
@@ -423,7 +413,7 @@ pmap_steal_vhpt_memory(vsize_t size)
 		 */
 		if (start1 == end1 &&
 		    start2 == end2 &&
-		    vm_nphysseg == 1) {
+		    uvm_physseg_get_first() == uvm_physseg_get_last() /* single segment */) {
 #ifdef DEBUG
 			printf("pmap_vhpt_steal_memory: out of memory!");
 #endif
@@ -431,10 +421,12 @@ pmap_steal_vhpt_memory(vsize_t size)
 		}
 
 		/* Remove this segment from the list. */
-		vm_nphysseg--;
-		for (x = lcv; x < vm_nphysseg; x++)
-			/* structure copy */
-			VM_PHYSMEM_PTR_SWAP(x, x + 1);
+		if (uvm_physseg_unplug(uvm_physseg_get_start(upm),
+			uvm_physseg_get_end(upm) - uvm_physseg_get_start(upm)) == false) {
+			panic("%s: uvm_physseg_unplug(%"PRIxPADDR", %"PRIxPADDR") failed\n",
+			    __func__, uvm_physseg_get_start(upm),
+			    uvm_physseg_get_end(upm) - uvm_physseg_get_start(upm));
+		}
 
 		/* Case 2: Perfect fit - skip segment reload. */
 

Index: src/sys/arch/m68k/m68k/pmap_motorola.c
diff -u src/sys/arch/m68k/m68k/pmap_motorola.c:1.68 src/sys/arch/m68k/m68k/pmap_motorola.c:1.69
--- src/sys/arch/m68k/m68k/pmap_motorola.c:1.68	Thu Dec 22 14:47:58 2016
+++ src/sys/arch/m68k/m68k/pmap_motorola.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_motorola.c,v 1.68 2016/12/22 14:47:58 cherry Exp $        */
+/*	$NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $        */
 
 /*-
  * Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -119,7 +119,7 @@
 #include "opt_m68k_arch.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.68 2016/12/22 14:47:58 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.69 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -133,6 +133,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_motorol
 #include <machine/pcb.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <m68k/cacheops.h>
 
@@ -295,10 +296,11 @@ struct pool	pmap_pv_pool;	/* memory pool
 static inline struct pv_header *
 pa_to_pvh(paddr_t pa)
 {
-	int bank, pg = 0;	/* XXX gcc4 -Wuninitialized */
-
-	bank = vm_physseg_find(atop((pa)), &pg);
-	return &VM_PHYSMEM_PTR(bank)->pmseg.pvheader[pg];
+	uvm_physseg_t bank = 0;	/* XXX gcc4 -Wuninitialized */
+	psize_t pg = 0;
+	
+	bank = uvm_physseg_find(atop((pa)), &pg);
+	return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
 }
 
 /*
@@ -412,7 +414,7 @@ pmap_init(void)
 	struct pv_header *pvh;
 	int		rv;
 	int		npages;
-	int		bank;
+	uvm_physseg_t	bank;
 
 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
 
@@ -434,8 +436,10 @@ pmap_init(void)
 	 * Allocate memory for random pmap data structures.  Includes the
 	 * initial segment table, pv_head_table and pmap_attributes.
 	 */
-	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
-		page_cnt += VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
+	for (page_cnt = 0, bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank))
+		page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
 	s = M68K_STSIZE;					/* Segtabzero */
 	s += page_cnt * sizeof(struct pv_header);	/* pv table */
 	s = round_page(s);
@@ -461,9 +465,11 @@ pmap_init(void)
 	 * assign them to the memory segments.
 	 */
 	pvh = pv_table;
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		npages = VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
-		VM_PHYSMEM_PTR(bank)->pmseg.pvheader = pvh;
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+		uvm_physseg_get_pmseg(bank)->pvheader = pvh;
 		pvh += npages;
 	}
 
@@ -1704,7 +1710,8 @@ pmap_collect1(pmap_t pmap, paddr_t start
 static void
 pmap_collect(void)
 {
-	int bank, s;
+	int s;
+	uvm_physseg_t bank;
 
 	/*
 	 * XXX This is very bogus.  We should handle kernel PT
@@ -1712,9 +1719,11 @@ pmap_collect(void)
 	 */
 
 	s = splvm();
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		pmap_collect1(pmap_kernel(), ptoa(VM_PHYSMEM_PTR(bank)->start),
-		    ptoa(VM_PHYSMEM_PTR(bank)->end));
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
+		    ptoa(uvm_physseg_get_end(bank)));
 	}
 	splx(s);
 }

Index: src/sys/arch/mips/include/pmap.h
diff -u src/sys/arch/mips/include/pmap.h:1.68 src/sys/arch/mips/include/pmap.h:1.69
--- src/sys/arch/mips/include/pmap.h:1.68	Mon Jul 11 16:15:35 2016
+++ src/sys/arch/mips/include/pmap.h	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.68 2016/07/11 16:15:35 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.69 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -116,7 +116,7 @@ typedef uint32_t pt_entry_t;
 #define PMAP_SEGTAB_ALIGN __aligned(sizeof(void *)*NSEGPG) __section(".data1")
 #endif   
 
-struct vm_physseg;
+#include <uvm/uvm_physseg.h>
 
 void	pmap_md_init(void);
 void	pmap_md_icache_sync_all(void);
@@ -125,7 +125,7 @@ void	pmap_md_page_syncicache(struct vm_p
 bool	pmap_md_vca_add(struct vm_page *, vaddr_t, pt_entry_t *);
 void	pmap_md_vca_clean(struct vm_page *, int);
 void	pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
-bool	pmap_md_ok_to_steal_p(const struct vm_physseg *, size_t);
+bool	pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
 
 static inline bool

Index: src/sys/arch/mips/mips/mips_machdep.c
diff -u src/sys/arch/mips/mips/mips_machdep.c:1.275 src/sys/arch/mips/mips/mips_machdep.c:1.276
--- src/sys/arch/mips/mips/mips_machdep.c:1.275	Thu Dec 22 07:56:38 2016
+++ src/sys/arch/mips/mips/mips_machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_machdep.c,v 1.275 2016/12/22 07:56:38 mrg Exp $	*/
+/*	$NetBSD: mips_machdep.c,v 1.276 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -111,7 +111,7 @@
  */
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.275 2016/12/22 07:56:38 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.276 2016/12/23 07:15:27 cherry Exp $");
 
 #define __INTR_PRIVATE
 #include "opt_cputype.h"
@@ -145,6 +145,7 @@ __KERNEL_RCSID(0, "$NetBSD: mips_machdep
 #endif
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <dev/cons.h>
 #include <dev/mm.h>
@@ -2008,38 +2009,34 @@ mips_init_msgbuf(void)
 {
 	vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
 	vsize_t reqsz = sz;
-	u_int bank = vm_nphysseg - 1;
-	struct vm_physseg *vps = VM_PHYSMEM_PTR(bank);
+	uvm_physseg_t bank = uvm_physseg_get_last();
 #ifndef _LP64
 	/*
 	 * Fist the physical segment that can be mapped to KSEG0
 	 */
-	for (; vps >= vm_physmem; vps--, bank--) {
-		if (vps->avail_start + atop(sz) <= atop(MIPS_PHYS_MASK))
+	for (; uvm_physseg_valid_p(bank); bank = uvm_physseg_get_prev(bank)) {
+		if (uvm_physseg_get_avail_start(bank) + atop(sz) <= atop(MIPS_PHYS_MASK))
 			break;
 	}
 #endif
 
+	paddr_t start = uvm_physseg_get_start(bank);
+	paddr_t end = uvm_physseg_get_end(bank);
+	
 	/* shrink so that it'll fit in the last segment */
-	if ((vps->avail_end - vps->avail_start) < atop(sz))
-		sz = ptoa(vps->avail_end - vps->avail_start);
+	if ((end - start) < atop(sz))
+		sz = ptoa(end - start);
 
-	vps->end -= atop(sz);
-	vps->avail_end -= atop(sz);
+	end -= atop(sz);
+	uvm_physseg_unplug(end, atop(sz));
+	
 #ifdef _LP64
-	msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(vps->end));
+	msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(end));
 #else
-	msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(vps->end));
+	msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(end));
 #endif
 	initmsgbuf(msgbufaddr, sz);
 
-	/* Remove the [last] segment if it now has no pages. */
-	if (vps->start == vps->end) {
-		for (vm_nphysseg--; bank < vm_nphysseg - 1; bank++) {
-			VM_PHYSMEM_PTR_SWAP(bank, bank + 1);
-		}
-	}
-
 	/* warn if the message buffer had to be shrunk */
 	if (sz != reqsz)
 		printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "

Index: src/sys/arch/mips/mips/pmap_machdep.c
diff -u src/sys/arch/mips/mips/pmap_machdep.c:1.11 src/sys/arch/mips/mips/pmap_machdep.c:1.12
--- src/sys/arch/mips/mips/pmap_machdep.c:1.11	Mon Sep  5 06:59:25 2016
+++ src/sys/arch/mips/mips/pmap_machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_machdep.c,v 1.11 2016/09/05 06:59:25 skrll Exp $	*/
+/*	$NetBSD: pmap_machdep.c,v 1.12 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.11 2016/09/05 06:59:25 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.12 2016/12/23 07:15:27 cherry Exp $");
 
 /*
  *	Manages physical address maps.
@@ -133,6 +133,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_machdep
 #endif
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <mips/cache.h>
 #include <mips/cpuregs.h>
@@ -306,12 +307,12 @@ pmap_md_vca_page_wbinv(struct vm_page *p
 }
 
 bool
-pmap_md_ok_to_steal_p(const struct vm_physseg *seg, size_t npgs)
+pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
 {
 #ifndef _LP64
-	if (seg->avail_start + npgs >= atop(MIPS_PHYS_MASK + 1)) {
-		aprint_debug("%s: seg %zu: not enough in KSEG0 for %zu pages\n",
-		    __func__, seg - VM_PHYSMEM_PTR(0), npgs);
+	if (uvm_physseg_get_avail_start(bank) + npgs >= atop(MIPS_PHYS_MASK + 1)) {
+		aprint_debug("%s: seg not enough in KSEG0 for %zu pages\n",
+		    __func__, npgs);
 		return false;
 	}
 #endif
@@ -393,8 +394,8 @@ pmap_bootstrap(void)
 	 * for us.  Must do this before uvm_pageboot_alloc()
 	 * can be called.
 	 */
-	pmap_limits.avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-	pmap_limits.avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+	pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
+	pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
 	pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;
 
 #ifndef _LP64

Index: src/sys/arch/powerpc/ibm4xx/pmap.c
diff -u src/sys/arch/powerpc/ibm4xx/pmap.c:1.73 src/sys/arch/powerpc/ibm4xx/pmap.c:1.74
--- src/sys/arch/powerpc/ibm4xx/pmap.c:1.73	Thu Dec 22 14:47:58 2016
+++ src/sys/arch/powerpc/ibm4xx/pmap.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.73 2016/12/22 14:47:58 cherry Exp $	*/
+/*	$NetBSD: pmap.c,v 1.74 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*
  * Copyright 2001 Wasabi Systems, Inc.
@@ -67,7 +67,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2016/12/22 14:47:58 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.74 2016/12/23 07:15:27 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/cpu.h>
@@ -199,7 +199,7 @@ pa_to_pv(paddr_t pa)
 {
 	int bank, pg;
 
-	bank = vm_physseg_find(atop(pa), &pg);
+	bank = uvm_physseg_find(atop(pa), &pg);
 	if (bank == -1)
 		return NULL;
 	return &VM_PHYSMEM_PTR(bank)->pmseg.pvent[pg];
@@ -210,7 +210,7 @@ pa_to_attr(paddr_t pa)
 {
 	int bank, pg;
 
-	bank = vm_physseg_find(atop(pa), &pg);
+	bank = uvm_physseg_find(atop(pa), &pg);
 	if (bank == -1)
 		return NULL;
 	return &VM_PHYSMEM_PTR(bank)->pmseg.attrs[pg];

Index: src/sys/arch/powerpc/isa/isadma_machdep.c
diff -u src/sys/arch/powerpc/isa/isadma_machdep.c:1.10 src/sys/arch/powerpc/isa/isadma_machdep.c:1.11
--- src/sys/arch/powerpc/isa/isadma_machdep.c:1.10	Fri Feb 26 18:17:39 2016
+++ src/sys/arch/powerpc/isa/isadma_machdep.c	Fri Dec 23 07:15:27 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: isadma_machdep.c,v 1.10 2016/02/26 18:17:39 christos Exp $	*/
+/*	$NetBSD: isadma_machdep.c,v 1.11 2016/12/23 07:15:27 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.10 2016/02/26 18:17:39 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: isadma_machdep.c,v 1.11 2016/12/23 07:15:27 cherry Exp $");
 
 #define ISA_DMA_STATS
 
@@ -168,9 +168,11 @@ _isa_bus_dmamap_create(bus_dma_tag_t t, 
 	size_t cookiesize;
 	paddr_t avail_end = 0;
 
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
-			avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		if (avail_end < uvm_physseg_get_avail_end(bank) << PGSHIFT)
+			avail_end = uvm_physseg_get_avail_end(bank) << PGSHIFT;
 	}
 
 	/* Call common function to create the basic map. */
@@ -598,9 +600,11 @@ _isa_bus_dmamem_alloc(bus_dma_tag_t t, b
 	paddr_t high, avail_end = 0;
 	int bank;
 
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
-			avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		if (avail_end < uvm_physseg_get_avail_end(bank) << PGSHIFT)
+			avail_end = uvm_physseg_get_avail_end(bank) << PGSHIFT;
 	}
 
 	if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)

Index: src/sys/arch/powerpc/oea/pmap.c
diff -u src/sys/arch/powerpc/oea/pmap.c:1.93 src/sys/arch/powerpc/oea/pmap.c:1.94
--- src/sys/arch/powerpc/oea/pmap.c:1.93	Sun Feb 14 18:07:49 2016
+++ src/sys/arch/powerpc/oea/pmap.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.93 2016/02/14 18:07:49 dholland Exp $	*/
+/*	$NetBSD: pmap.c,v 1.94 2016/12/23 07:15:28 cherry Exp $	*/
 /*-
  * Copyright (c) 2001 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -63,7 +63,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.93 2016/02/14 18:07:49 dholland Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.94 2016/12/23 07:15:28 cherry Exp $");
 
 #define	PMAP_NOOPNAMES
 
@@ -81,6 +81,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.9
 #include <sys/atomic.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <machine/powerpc.h>
 #include <powerpc/bat.h>
@@ -2909,9 +2910,9 @@ pmap_steal_memory(vsize_t vsize, vaddr_t
 {
 	vsize_t size;
 	vaddr_t va;
-	paddr_t pa = 0;
-	int npgs, bank;
-	struct vm_physseg *ps;
+	paddr_t start, end, pa = 0;
+	int npgs, freelist;
+	uvm_physseg_t bank;
 
 	if (uvm.page_init_done == true)
 		panic("pmap_steal_memory: called _after_ bootstrap");
@@ -2926,11 +2927,18 @@ pmap_steal_memory(vsize_t vsize, vaddr_t
 	 * PA 0 will never be among those given to UVM so we can use it
 	 * to indicate we couldn't steal any memory.
 	 */
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		ps = VM_PHYSMEM_PTR(bank);
-		if (ps->free_list == VM_FREELIST_FIRST256 && 
-		    ps->avail_end - ps->avail_start >= npgs) {
-			pa = ptoa(ps->avail_start);
+
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+
+		freelist = uvm_physseg_get_free_list(bank);
+		start = uvm_physseg_get_start(bank);
+		end = uvm_physseg_get_end(bank);
+		
+		if (freelist == VM_FREELIST_FIRST256 &&
+		    (end - start) >= npgs) {
+			pa = ptoa(start);
 			break;
 		}
 	}
@@ -2938,25 +2946,7 @@ pmap_steal_memory(vsize_t vsize, vaddr_t
 	if (pa == 0)
 		panic("pmap_steal_memory: no approriate memory to steal!");
 
-	ps->avail_start += npgs;
-	ps->start += npgs;
-
-	/*
-	 * If we've used up all the pages in the segment, remove it and
-	 * compact the list.
-	 */
-	if (ps->avail_start == ps->end) {
-		/*
-		 * If this was the last one, then a very bad thing has occurred
-		 */
-		if (--vm_nphysseg == 0)
-			panic("pmap_steal_memory: out of memory!");
-
-		printf("pmap_steal_memory: consumed bank %d\n", bank);
-		for (; bank < vm_nphysseg; bank++, ps++) {
-			ps[0] = ps[1];
-		}
-	}
+	uvm_physseg_unplug(start, npgs);
 
 	va = (vaddr_t) pa;
 	memset((void *) va, 0, size);
@@ -2964,9 +2954,10 @@ pmap_steal_memory(vsize_t vsize, vaddr_t
 #ifdef DEBUG
 	if (pmapdebug && npgs > 1) {
 		u_int cnt = 0;
-		for (bank = 0; bank < vm_nphysseg; bank++) {
-			ps = VM_PHYSMEM_PTR(bank);
-			cnt += ps->avail_end - ps->avail_start;
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank);
 		}
 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
 		    npgs, pmap_pages_stolen, cnt);
@@ -3446,15 +3437,18 @@ pmap_bootstrap(paddr_t kernelstart, padd
 #ifdef DEBUG
 	if (pmapdebug & PMAPDEBUG_BOOT) {
 		u_int cnt;
-		int bank;
+		uvm_physseg_t bank;
 		char pbuf[9];
-		for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
-			cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
+		for (cnt = 0, bank = uvm_physseg_get_first();
+		     uvm_physseg_valid_p(bank);
+		     bank = uvm_physseg_get_next(bank)) {
+			cnt += uvm_physseg_get_avail_end(bank) -
+			    uvm_physseg_get_avail_start(bank);
 			printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
 			    bank,
-			    ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
-			    ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
-			    ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
+			    ptoa(uvm_physseg_get_avail_start(bank)),
+			    ptoa(uvm_physseg_get_avail_end(bank)),
+			    ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
 		}
 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",

Index: src/sys/arch/powerpc/powerpc/bus_dma.c
diff -u src/sys/arch/powerpc/powerpc/bus_dma.c:1.46 src/sys/arch/powerpc/powerpc/bus_dma.c:1.47
--- src/sys/arch/powerpc/powerpc/bus_dma.c:1.46	Wed Feb  1 09:54:03 2012
+++ src/sys/arch/powerpc/powerpc/bus_dma.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp $	*/
+/*	$NetBSD: bus_dma.c,v 1.47 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -32,7 +32,7 @@
 
 #define _POWERPC_BUS_DMA_PRIVATE
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.47 2016/12/23 07:15:28 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -45,6 +45,7 @@ __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 
 #include <sys/intr.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #ifdef PPC_BOOKE
 #define	EIEIO	__asm volatile("mbar\t0")
@@ -544,13 +545,15 @@ int
 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
 {
 	paddr_t start = 0xffffffff, end = 0;
-	int bank;
+	uvm_physseg_t bank;
 
-	for (bank = 0; bank < vm_nphysseg; bank++) {
-		if (start > ptoa(VM_PHYSMEM_PTR(bank)->avail_start))
-			start = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
-		if (end < ptoa(VM_PHYSMEM_PTR(bank)->avail_end))
-			end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		if (start > ptoa(uvm_physseg_get_avail_start(bank)))
+			start = ptoa(uvm_physseg_get_avail_start(bank));
+		if (end < ptoa(uvm_physseg_get_avail_end(bank)))
+			end = ptoa(uvm_physseg_get_avail_end(bank));
 	}
 
 	return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,

Index: src/sys/arch/sh3/sh3/pmap.c
diff -u src/sys/arch/sh3/sh3/pmap.c:1.78 src/sys/arch/sh3/sh3/pmap.c:1.79
--- src/sys/arch/sh3/sh3/pmap.c:1.78	Sat Sep  3 09:07:54 2016
+++ src/sys/arch/sh3/sh3/pmap.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.78 2016/09/03 09:07:54 christos Exp $	*/
+/*	$NetBSD: pmap.c,v 1.79 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2002 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.78 2016/09/03 09:07:54 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.79 2016/12/23 07:15:28 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -39,6 +39,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7
 #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <sh3/mmu.h>
 #include <sh3/cache.h>
@@ -107,8 +108,8 @@ pmap_bootstrap(void)
 	/* Steal msgbuf area */
 	initmsgbuf((void *)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
 
-	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+	avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
+	avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
 	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
 
 	pmap_kernel()->pm_refcnt = 1;
@@ -126,39 +127,28 @@ pmap_bootstrap(void)
 vaddr_t
 pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
 {
-	struct vm_physseg *bank;
-	int i, j, npage;
+	int npage;
 	paddr_t pa;
 	vaddr_t va;
+	uvm_physseg_t bank;
 
 	KDASSERT(!uvm.page_init_done);
 
 	size = round_page(size);
 	npage = atop(size);
 
-	bank = NULL;
-	for (i = 0; i < vm_nphysseg; i++) {
-		bank = VM_PHYSMEM_PTR(i);
-		if (npage <= bank->avail_end - bank->avail_start)
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+		if (npage <= uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank))
 			break;
 	}
-	KDASSERT(i != vm_nphysseg);
-	KDASSERT(bank != NULL);
 
-	/* Steal pages */
-	pa = ptoa(bank->avail_start);
-	bank->avail_start += npage;
-	bank->start += npage;
-
-	/* GC memory bank */
-	if (bank->avail_start == bank->end) {
-		/* Remove this segment from the list. */
-		vm_nphysseg--;
-		KDASSERT(vm_nphysseg > 0);
-		for (j = i; i < vm_nphysseg; j++)
-			VM_PHYSMEM_PTR_SWAP(j, j + 1);
-	}
+	KDASSERT(uvm_physseg_valid_p(bank));
 
+	/* Steal pages */
+	pa = ptoa(uvm_physseg_get_start(bank));
+	uvm_physseg_unplug(start, npage);
 	va = SH3_PHYS_TO_P1SEG(pa);
 	memset((void *)va, 0, size);
 

Index: src/sys/arch/sh3/sh3/vm_machdep.c
diff -u src/sys/arch/sh3/sh3/vm_machdep.c:1.76 src/sys/arch/sh3/sh3/vm_machdep.c:1.77
--- src/sys/arch/sh3/sh3/vm_machdep.c:1.76	Thu Nov  7 21:45:04 2013
+++ src/sys/arch/sh3/sh3/vm_machdep.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: vm_machdep.c,v 1.76 2013/11/07 21:45:04 christos Exp $	*/
+/*	$NetBSD: vm_machdep.c,v 1.77 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*-
  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
@@ -81,7 +81,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.76 2013/11/07 21:45:04 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.77 2016/12/23 07:15:28 cherry Exp $");
 
 #include "opt_kstack_debug.h"
 
@@ -102,6 +102,7 @@ __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c
 
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_page.h>
+#include <uvm/uvm_physseg.h>
 
 #include <sh3/locore.h>
 #include <sh3/cpu.h>
@@ -387,7 +388,7 @@ int
 mm_md_physacc(paddr_t pa, vm_prot_t prot)
 {
 
-	if (atop(pa) < vm_physmem[0].start || PHYS_TO_VM_PAGE(pa) != NULL) {
+	if (atop(pa) < uvm_physseg_get_start(uvm_physseg_get_first()) || PHYS_TO_VM_PAGE(pa) != NULL) {
 		return 0;
 	}
 	return EFAULT;

Index: src/sys/arch/vax/vax/ka650.c
diff -u src/sys/arch/vax/vax/ka650.c:1.36 src/sys/arch/vax/vax/ka650.c:1.37
--- src/sys/arch/vax/vax/ka650.c:1.36	Tue Dec 14 23:44:49 2010
+++ src/sys/arch/vax/vax/ka650.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: ka650.c,v 1.36 2010/12/14 23:44:49 matt Exp $	*/
+/*	$NetBSD: ka650.c,v 1.37 2016/12/23 07:15:28 cherry Exp $	*/
 /*
  * Copyright (c) 1988 The Regents of the University of California.
  * All rights reserved.
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ka650.c,v 1.36 2010/12/14 23:44:49 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ka650.c,v 1.37 2016/12/23 07:15:28 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -106,7 +106,7 @@ ka650_conf(void)
 
 	ka650setcache(CACHEON);
 	if (ctob(physmem) > ka650merr_ptr->merr_qbmbr) {
-		printf("physmem(0x%x) > qbmbr(0x%x)\n",
+		printf("physmem(%"PRIxPSIZE") > qbmbr(0x%x)\n",
 		    ctob(physmem), (int)ka650merr_ptr->merr_qbmbr);
 		panic("qbus map unprotected");
 	}

Index: src/sys/arch/vax/vax/pmap.c
diff -u src/sys/arch/vax/vax/pmap.c:1.183 src/sys/arch/vax/vax/pmap.c:1.184
--- src/sys/arch/vax/vax/pmap.c:1.183	Thu Dec 22 14:48:00 2016
+++ src/sys/arch/vax/vax/pmap.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.183 2016/12/22 14:48:00 cherry Exp $	   */
+/*	$NetBSD: pmap.c,v 1.184 2016/12/23 07:15:28 cherry Exp $	   */
 /*
  * Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
  * All rights reserved.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.183 2016/12/22 14:48:00 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.184 2016/12/23 07:15:28 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_cputype.h"
@@ -51,6 +51,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1
 #include <sys/mutex.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #ifdef PMAPDEBUG
 #include <dev/cons.h>
@@ -475,6 +476,7 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 {
 	vaddr_t v;
 	int npgs;
+	uvm_physseg_t bank;
 
 	PMDEBUG(("pmap_steal_memory: size 0x%lx start %p end %p\n",
 		    size, vstartp, vendp));
@@ -490,10 +492,10 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 	/*
 	 * A vax only have one segment of memory.
 	 */
+	bank = uvm_physseg_get_first();
 
-	v = (VM_PHYSMEM_PTR(0)->avail_start << PGSHIFT) | KERNBASE;
-	VM_PHYSMEM_PTR(0)->avail_start += npgs;
-	VM_PHYSMEM_PTR(0)->start += npgs;
+	v = (uvm_physseg_get_start(bank) << PGSHIFT) | KERNBASE;
+	uvm_physseg_unplug(uvm_physseg_get_start(bank), npgs);
 	memset((void *)v, 0, size);
 	return v;
 }

Index: src/sys/arch/x68k/x68k/machdep.c
diff -u src/sys/arch/x68k/x68k/machdep.c:1.194 src/sys/arch/x68k/x68k/machdep.c:1.195
--- src/sys/arch/x68k/x68k/machdep.c:1.194	Fri Dec  2 12:43:07 2016
+++ src/sys/arch/x68k/x68k/machdep.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.194 2016/12/02 12:43:07 tsutsui Exp $	*/
+/*	$NetBSD: machdep.c,v 1.195 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*
  * Copyright (c) 1988 University of Utah.
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.194 2016/12/02 12:43:07 tsutsui Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.195 2016/12/23 07:15:28 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -102,6 +102,7 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 
 
 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #include <machine/bus.h>
 #include <machine/autoconf.h>
@@ -553,10 +554,7 @@ cpu_init_kcore_hdr(void)
 {
 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
 	struct m68k_kcore_hdr *m = &h->un._m68k;
-	psize_t size;
-#ifdef EXTENDED_MEMORY
-	int i, seg;
-#endif
+	uvm_physseg_t i;
 
 	memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
 
@@ -605,20 +603,25 @@ cpu_init_kcore_hdr(void)
 	/*
 	 * X68k has multiple RAM segments on some models.
 	 */
-	size = phys_basemem_seg.end - phys_basemem_seg.start;
-	m->ram_segs[0].start = phys_basemem_seg.start;
-	m->ram_segs[0].size  = size;
-#ifdef EXTENDED_MEMORY
-	seg = 1;
-	for (i = 0; i < EXTMEM_SEGS; i++) {
-		size = phys_extmem_seg[i].end - phys_extmem_seg[i].start;
-		if (size == 0)
-			continue;
-		m->ram_segs[seg].start = phys_extmem_seg[i].start;
-		m->ram_segs[seg].size  = size;
-		seg++;
+	m->ram_segs[0].start = lowram;
+	m->ram_segs[0].size = mem_size - lowram;
+
+	i = uvm_physseg_get_first();
+	
+        for (uvm_physseg_get_next(i); uvm_physseg_valid_p(i); i = uvm_physseg_get_next(i)) {
+		if (uvm_physseg_valid_p(i) == false)
+			break;
+
+		const paddr_t startpfn = uvm_physseg_get_start(i);
+		const paddr_t endpfn = uvm_physseg_get_end(i);
+
+		KASSERT(startpfn != -1 && endpfn != -1);
+
+		m->ram_segs[i].start = 
+		    ctob(startpfn);
+		m->ram_segs[i].size  =			
+		    ctob(endpfn - startpfn);
 	}
-#endif
 }
 
 /*
@@ -1249,11 +1252,14 @@ cpu_intr_p(void)
 int
 mm_md_physacc(paddr_t pa, vm_prot_t prot)
 {
-	int i;
+	uvm_physseg_t i;
+
+	for (i = uvm_physseg_get_first(); uvm_physseg_valid_p(i); i = uvm_physseg_get_next(i)) {
+		if (uvm_physseg_valid_p(i) == false)
+			break;
 
-	for (i = 0; i < vm_nphysseg; i++) {
-		if (ctob(vm_physmem[i].start) <= pa &&
-		    pa < ctob(vm_physmem[i].end))
+		if (ctob(uvm_physseg_get_start(i)) <= pa &&
+		    pa < ctob(uvm_physseg_get_end(i)))
 			return 0;
 	}
 	return EFAULT;

Index: src/sys/uvm/files.uvm
diff -u src/sys/uvm/files.uvm:1.27 src/sys/uvm/files.uvm:1.28
--- src/sys/uvm/files.uvm:1.27	Thu Dec  1 02:09:03 2016
+++ src/sys/uvm/files.uvm	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-#	$NetBSD: files.uvm,v 1.27 2016/12/01 02:09:03 mrg Exp $
+#	$NetBSD: files.uvm,v 1.28 2016/12/23 07:15:28 cherry Exp $
 
 #
 # UVM options
@@ -15,6 +15,7 @@ defparam opt_pagermap.h		PAGER_MAP_SIZE
 defflag				PDPOLICY_CLOCKPRO
 defparam			USER_VA0_DISABLE_DEFAULT
 defflag opt_uvm_page_trkown.h	UVM_PAGE_TRKOWN
+defflag opt_uvm_hotplug.h	UVM_HOTPLUG
 
 define	uvm
 defflag	opt_uvm.h			UVM
@@ -42,6 +43,7 @@ file	uvm/uvm_pdaemon.c		uvm
 file	uvm/uvm_pdpolicy_clock.c	!pdpolicy_clockpro
 file	uvm/uvm_pdpolicy_clockpro.c	pdpolicy_clockpro
 file	uvm/uvm_pglist.c		uvm
+file	uvm/uvm_physseg.c		uvm
 file	uvm/uvm_readahead.c		uvm
 file	uvm/uvm_stat.c                	uvm
 file	uvm/uvm_swap.c			vmswap

Index: src/sys/uvm/uvm_init.c
diff -u src/sys/uvm/uvm_init.c:1.47 src/sys/uvm/uvm_init.c:1.48
--- src/sys/uvm/uvm_init.c:1.47	Thu Dec 22 12:55:21 2016
+++ src/sys/uvm/uvm_init.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_init.c,v 1.47 2016/12/22 12:55:21 cherry Exp $	*/
+/*	$NetBSD: uvm_init.c,v 1.48 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.47 2016/12/22 12:55:21 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.48 2016/12/23 07:15:28 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -46,6 +46,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_pdpolicy.h>
+#include <uvm/uvm_physseg.h>
 #include <uvm/uvm_readahead.h>
 
 /*
@@ -77,6 +78,7 @@ void
 uvm_md_init(void)
 {
 	uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
+	uvm_physseg_init();
 }
 
 /*

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.189 src/sys/uvm/uvm_page.c:1.190
--- src/sys/uvm/uvm_page.c:1.189	Thu Dec 22 16:05:15 2016
+++ src/sys/uvm/uvm_page.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.190 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.190 2016/12/23 07:15:28 cherry Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -81,24 +81,13 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v
 #include <sys/proc.h>
 #include <sys/atomic.h>
 #include <sys/cpu.h>
+#include <sys/extent.h>
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_ddb.h>
 #include <uvm/uvm_pdpolicy.h>
 
 /*
- * global vars... XXXCDC: move to uvm. structure.
- */
-
-/*
- * physical memory config is stored in vm_physmem.
- */
-
-struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
-int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
-#define	vm_nphysmem	vm_nphysseg
-
-/*
  * Some supported CPUs in a given architecture don't support all
  * of the things necessary to do idle page zero'ing efficiently.
  * We therefore provide a way to enable it from machdep code here.
@@ -146,6 +135,18 @@ vaddr_t uvm_zerocheckkva;
 #endif /* DEBUG */
 
 /*
+ * These functions are reserved for uvm(9) internal use and are not
+ * exported in the header file uvm_physseg.h
+ *
+ * Thus they are redefined here.
+ */
+void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
+void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
+
+/* returns a pgs array */
+struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
+
+/*
  * local prototypes
  */
 
@@ -337,11 +338,9 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 	static struct uvm_cpu boot_cpu;
 	psize_t freepages, pagecount, bucketcount, n;
 	struct pgflbucket *bucketarray, *cpuarray;
-	struct vm_physseg *seg;
 	struct vm_page *pagearray;
+	uvm_physseg_t bank;
 	int lcv;
-	u_int i;
-	paddr_t paddr;
 
 	KASSERT(ncpu <= 1);
 	CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
@@ -369,7 +368,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 	 * now is to allocate vm_page structures for this memory.
 	 */
 
-	if (vm_nphysmem == 0)
+	if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
 		panic("uvm_page_bootstrap: no memory pre-allocated");
 
 	/*
@@ -381,9 +380,11 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 	 */
 
 	freepages = 0;
-	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-		seg = VM_PHYSMEM_PTR(lcv);
-		freepages += (seg->end - seg->start);
+
+	for (bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank) ;
+	     bank = uvm_physseg_get_next(bank)) {
+		freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
 	}
 
 	/*
@@ -428,31 +429,20 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 	/*
 	 * init the vm_page structures and put them in the correct place.
 	 */
+	/* First init the extent */
 
-	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-		seg = VM_PHYSMEM_PTR(lcv);
-		n = seg->end - seg->start;
+	for (bank = uvm_physseg_get_first(),
+		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+
+		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+		uvm_physseg_seg_alloc_from_slab(bank, n);
+		uvm_physseg_init_seg(bank, pagearray);
 
 		/* set up page array pointers */
-		seg->pgs = pagearray;
 		pagearray += n;
 		pagecount -= n;
-		seg->lastpg = seg->pgs + n;
-
-		/* init and free vm_pages (we've already zeroed them) */
-		paddr = ctob(seg->start);
-		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
-			seg->pgs[i].phys_addr = paddr;
-#ifdef __HAVE_VM_PAGE_MD
-			VM_MDPAGE_INIT(&seg->pgs[i]);
-#endif
-			if (atop(paddr) >= seg->avail_start &&
-			    atop(paddr) < seg->avail_end) {
-				uvmexp.npages++;
-				/* add page to free pool */
-				uvm_pagefree(&seg->pgs[i]);
-			}
-		}
 	}
 
 	/*
@@ -625,92 +615,42 @@ static bool uvm_page_physget_freelist(pa
 static bool
 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
 {
-	struct vm_physseg *seg;
-	int lcv, x;
+	uvm_physseg_t lcv;
 
 	/* pass 1: try allocating from a matching end */
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+	for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_prev(lcv))
 #else
-	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+		for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
 #endif
 	{
-		seg = VM_PHYSMEM_PTR(lcv);
-
 		if (uvm.page_init_done == true)
 			panic("uvm_page_physget: called _after_ bootstrap");
 
-		if (seg->free_list != freelist)
-			continue;
+		/* Try to match at front or back on unused segment */
+		if (uvm_page_physunload(lcv, freelist, paddrp) == false) {
+			if (paddrp == NULL) /* freelist fail, try next */
+				continue;
+		} else
+			return true;
 
-		/* try from front */
-		if (seg->avail_start == seg->start &&
-		    seg->avail_start < seg->avail_end) {
-			*paddrp = ctob(seg->avail_start);
-			seg->avail_start++;
-			seg->start++;
-			/* nothing left?   nuke it */
-			if (seg->avail_start == seg->end) {
-				if (vm_nphysmem == 1)
-				    panic("uvm_page_physget: out of memory!");
-				vm_nphysmem--;
-				for (x = lcv ; x < vm_nphysmem ; x++)
-					/* structure copy */
-					VM_PHYSMEM_PTR_SWAP(x, x + 1);
-			}
-			return (true);
-		}
-
-		/* try from rear */
-		if (seg->avail_end == seg->end &&
-		    seg->avail_start < seg->avail_end) {
-			*paddrp = ctob(seg->avail_end - 1);
-			seg->avail_end--;
-			seg->end--;
-			/* nothing left?   nuke it */
-			if (seg->avail_end == seg->start) {
-				if (vm_nphysmem == 1)
-				    panic("uvm_page_physget: out of memory!");
-				vm_nphysmem--;
-				for (x = lcv ; x < vm_nphysmem ; x++)
-					/* structure copy */
-					VM_PHYSMEM_PTR_SWAP(x, x + 1);
-			}
-			return (true);
-		}
-	}
 
 	/* pass2: forget about matching ends, just allocate something */
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+		for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
 #else
-	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+		for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
 #endif
 	{
-		seg = VM_PHYSMEM_PTR(lcv);
-
-		/* any room in this bank? */
-		if (seg->avail_start >= seg->avail_end)
-			continue;  /* nope */
-
-		*paddrp = ctob(seg->avail_start);
-		seg->avail_start++;
-		/* truncate! */
-		seg->start = seg->avail_start;
-
-		/* nothing left?   nuke it */
-		if (seg->avail_start == seg->end) {
-			if (vm_nphysmem == 1)
-				panic("uvm_page_physget: out of memory!");
-			vm_nphysmem--;
-			for (x = lcv ; x < vm_nphysmem ; x++)
-				/* structure copy */
-				VM_PHYSMEM_PTR_SWAP(x, x + 1);
-		}
-		return (true);
+		/* Try the front regardless. */
+		if (uvm_page_physunload_force(lcv, freelist, paddrp) == false) {
+			if (paddrp == NULL) /* freelist fail, try next */
+				continue;
+		} else
+			return true;
 	}
-
-	return (false);        /* whoops! */
+	}
+	return false;
 }
 
 bool
@@ -727,230 +667,6 @@ uvm_page_physget(paddr_t *paddrp)
 #endif /* PMAP_STEAL_MEMORY */
 
 /*
- * uvm_page_physload: load physical memory into VM system
- *
- * => all args are PFs
- * => all pages in start/end get vm_page structures
- * => areas marked by avail_start/avail_end get added to the free page pool
- * => we are limited to VM_PHYSSEG_MAX physical memory segments
- */
-
-uvm_physseg_t
-uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
-    paddr_t avail_end, int free_list)
-{
-	int preload, lcv;
-	psize_t npages;
-	struct vm_page *pgs;
-	struct vm_physseg *ps;
-
-	if (uvmexp.pagesize == 0)
-		panic("uvm_page_physload: page size not set!");
-	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
-		panic("uvm_page_physload: bad free list %d", free_list);
-	if (start >= end)
-		panic("uvm_page_physload: start >= end");
-
-	/*
-	 * do we have room?
-	 */
-
-	if (vm_nphysmem == VM_PHYSSEG_MAX) {
-		printf("uvm_page_physload: unable to load physical memory "
-		    "segment\n");
-		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
-		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
-		printf("\tincrease VM_PHYSSEG_MAX\n");
-		return 0;
-	}
-
-	/*
-	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
-	 * called yet, so kmem is not available).
-	 */
-
-	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-		if (VM_PHYSMEM_PTR(lcv)->pgs)
-			break;
-	}
-	preload = (lcv == vm_nphysmem);
-
-	/*
-	 * if VM is already running, attempt to kmem_alloc vm_page structures
-	 */
-
-	if (!preload) {
-		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
-	} else {
-		pgs = NULL;
-		npages = 0;
-	}
-
-	/*
-	 * now insert us in the proper place in vm_physmem[]
-	 */
-
-#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
-	/* random: put it at the end (easy!) */
-	ps = VM_PHYSMEM_PTR(vm_nphysmem);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-	{
-		int x;
-		/* sort by address for binary search */
-		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
-			if (start < VM_PHYSMEM_PTR(lcv)->start)
-				break;
-		ps = VM_PHYSMEM_PTR(lcv);
-		/* move back other entries, if necessary ... */
-		for (x = vm_nphysmem ; x > lcv ; x--)
-			/* structure copy */
-			VM_PHYSMEM_PTR_SWAP(x, x - 1);
-	}
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-	{
-		int x;
-		/* sort by largest segment first */
-		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
-			if ((end - start) >
-			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
-				break;
-		ps = VM_PHYSMEM_PTR(lcv);
-		/* move back other entries, if necessary ... */
-		for (x = vm_nphysmem ; x > lcv ; x--)
-			/* structure copy */
-			VM_PHYSMEM_PTR_SWAP(x, x - 1);
-	}
-#else
-	panic("uvm_page_physload: unknown physseg strategy selected!");
-#endif
-
-	ps->start = start;
-	ps->end = end;
-	ps->avail_start = avail_start;
-	ps->avail_end = avail_end;
-	if (preload) {
-		ps->pgs = NULL;
-	} else {
-		ps->pgs = pgs;
-		ps->lastpg = pgs + npages;
-	}
-	ps->free_list = free_list;
-	vm_nphysmem++;
-
-	if (!preload) {
-		uvmpdpol_reinit();
-	}
-
-	return 0;
-}
-
-/*
- * when VM_PHYSSEG_MAX is 1, we can simplify these functions
- */
-
-#if VM_PHYSSEG_MAX == 1
-static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
-#else
-static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
-#endif
-
-/*
- * vm_physseg_find: find vm_physseg structure that belongs to a PA
- */
-int
-vm_physseg_find(paddr_t pframe, int *offp)
-{
-
-#if VM_PHYSSEG_MAX == 1
-	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
-#else
-	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
-#endif
-}
-
-#if VM_PHYSSEG_MAX == 1
-static inline int
-vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-
-	/* 'contig' case */
-	if (pframe >= segs[0].start && pframe < segs[0].end) {
-		if (offp)
-			*offp = pframe - segs[0].start;
-		return(0);
-	}
-	return(-1);
-}
-
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-
-static inline int
-vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-	/* binary search for it */
-	u_int	start, len, guess;
-
-	/*
-	 * if try is too large (thus target is less than try) we reduce
-	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
-	 *
-	 * if the try is too small (thus target is greater than try) then
-	 * we set the new start to be (try + 1).   this means we need to
-	 * reduce the length to (round(len/2) - 1).
-	 *
-	 * note "adjust" below which takes advantage of the fact that
-	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
-	 * for any value of len we may have
-	 */
-
-	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
-		guess = start + (len / 2);	/* try in the middle */
-
-		/* start past our try? */
-		if (pframe >= segs[guess].start) {
-			/* was try correct? */
-			if (pframe < segs[guess].end) {
-				if (offp)
-					*offp = pframe - segs[guess].start;
-				return guess;            /* got it */
-			}
-			start = guess + 1;	/* next time, start here */
-			len--;			/* "adjust" */
-		} else {
-			/*
-			 * pframe before try, just reduce length of
-			 * region, done in "for" loop
-			 */
-		}
-	}
-	return(-1);
-}
-
-#else
-
-static inline int
-vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-	/* linear search for it */
-	int	lcv;
-
-	for (lcv = 0; lcv < nsegs; lcv++) {
-		if (pframe >= segs[lcv].start &&
-		    pframe < segs[lcv].end) {
-			if (offp)
-				*offp = pframe - segs[lcv].start;
-			return(lcv);		   /* got it */
-		}
-	}
-	return(-1);
-}
-#endif
-
-/*
  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
  * back from an I/O mapping (ugh!).   used in some MD code as well.
  */
@@ -958,12 +674,12 @@ struct vm_page *
 uvm_phys_to_vm_page(paddr_t pa)
 {
 	paddr_t pf = atop(pa);
-	int	off;
-	int	psi;
+	paddr_t	off;
+	uvm_physseg_t	upm;
 
-	psi = vm_physseg_find(pf, &off);
-	if (psi != -1)
-		return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
+	upm = uvm_physseg_find(pf, &off);
+	if (upm != UVM_PHYSSEG_TYPE_INVALID)
+		return uvm_physseg_get_pg(upm, off);
 	return(NULL);
 }
 
@@ -987,7 +703,8 @@ uvm_page_recolor(int newncolors)
 	struct vm_page *pg;
 	vsize_t bucketcount;
 	size_t bucketmemsize, oldbucketmemsize;
-	int lcv, color, i, ocolors;
+	int color, i, ocolors;
+	int lcv;
 	struct uvm_cpu *ucpu;
 
 	KASSERT(((newncolors - 1) & newncolors) == 0);
@@ -1221,7 +938,8 @@ struct vm_page *
 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
     int flags, int strat, int free_list)
 {
-	int lcv, try1, try2, zeroit = 0, color;
+	int try1, try2, zeroit = 0, color;
+	int lcv;
 	struct uvm_cpu *ucpu;
 	struct vm_page *pg;
 	lwp_t *l;
@@ -2007,7 +1725,7 @@ bool
 uvm_pageismanaged(paddr_t pa)
 {
 
-	return (vm_physseg_find(atop(pa), NULL) != -1);
+	return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
 }
 
 /*
@@ -2017,11 +1735,11 @@ uvm_pageismanaged(paddr_t pa)
 int
 uvm_page_lookup_freelist(struct vm_page *pg)
 {
-	int lcv;
+	uvm_physseg_t upm;
 
-	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
-	KASSERT(lcv != -1);
-	return (VM_PHYSMEM_PTR(lcv)->free_list);
+	upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
+	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
+	return uvm_physseg_get_free_list(upm);
 }
 
 /*
@@ -2137,7 +1855,8 @@ uvm_page_printit(struct vm_page *pg, boo
 void
 uvm_page_printall(void (*pr)(const char *, ...))
 {
-	unsigned i;
+	uvm_physseg_t i;
+	paddr_t pfn;
 	struct vm_page *pg;
 
 	(*pr)("%18s %4s %4s %18s %18s"
@@ -2145,8 +1864,14 @@ uvm_page_printall(void (*pr)(const char 
 	    " OWNER"
 #endif
 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
-	for (i = 0; i < vm_nphysmem; i++) {
-		for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
+	for (i = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(i);
+	     i = uvm_physseg_get_next(i)) {
+		for (pfn = uvm_physseg_get_start(i);
+		     pfn <= uvm_physseg_get_end(i);
+		     pfn++) {
+			pg = PHYS_TO_VM_PAGE(ptoa(pfn));
+
 			(*pr)("%18p %04x %04x %18p %18p",
 			    pg, pg->flags, pg->pqflags, pg->uobject,
 			    pg->uanon);

Index: src/sys/uvm/uvm_page.h
diff -u src/sys/uvm/uvm_page.h:1.80 src/sys/uvm/uvm_page.h:1.81
--- src/sys/uvm/uvm_page.h:1.80	Mon Mar 23 07:59:12 2015
+++ src/sys/uvm/uvm_page.h	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.80 2015/03/23 07:59:12 riastradh Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.81 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -294,24 +294,6 @@ struct vm_page {
 #define VM_PSTRAT_BSEARCH	2
 #define VM_PSTRAT_BIGFIRST	3
 
-/*
- * vm_physseg: describes one segment of physical memory
- */
-struct vm_physseg {
-	paddr_t	start;			/* PF# of first page in segment */
-	paddr_t	end;			/* (PF# of last page in segment) + 1 */
-	paddr_t	avail_start;		/* PF# of first free page in segment */
-	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
-	struct	vm_page *pgs;		/* vm_page structures (from start) */
-	struct	vm_page *lastpg;	/* vm_page structure for end */
-	int	free_list;		/* which free list they belong on */
-	u_int	start_hint;		/* start looking for free pages here */
-					/* protected by uvm_fpageqlock */
-#ifdef __HAVE_PMAP_PHYSSEG
-	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
-#endif
-};
-
 #ifdef _KERNEL
 
 /*
@@ -321,21 +303,6 @@ struct vm_physseg {
 extern bool vm_page_zero_enable;
 
 /*
- * physical memory config is stored in vm_physmem.
- */
-
-#define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
-#if VM_PHYSSEG_MAX == 1
-#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
-#else
-#define VM_PHYSMEM_PTR_SWAP(i, j) \
-	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
-#endif
-
-extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
-extern int vm_nphysseg;
-
-/*
  * prototypes: the following prototypes define the interface to pages
  */
 
@@ -366,10 +333,13 @@ bool uvm_page_locked_p(struct vm_page *)
 
 int uvm_page_lookup_freelist(struct vm_page *);
 
-int vm_physseg_find(paddr_t, int *);
 struct vm_page *uvm_phys_to_vm_page(paddr_t);
 paddr_t uvm_vm_page_to_phys(const struct vm_page *);
 
+#if !defined(PMAP_STEAL_MEMORY)
+bool uvm_page_physget(paddr_t *);
+#endif
+
 /*
  * macros
  */

Index: src/sys/uvm/uvm_pglist.c
diff -u src/sys/uvm/uvm_pglist.c:1.67 src/sys/uvm/uvm_pglist.c:1.68
--- src/sys/uvm/uvm_pglist.c:1.67	Sun Oct 26 01:42:07 2014
+++ src/sys/uvm/uvm_pglist.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pglist.c,v 1.67 2014/10/26 01:42:07 christos Exp $	*/
+/*	$NetBSD: uvm_pglist.c,v 1.68 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.67 2014/10/26 01:42:07 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.68 2016/12/23 07:15:28 cherry Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -116,16 +116,15 @@ uvm_pglist_add(struct vm_page *pg, struc
 }
 
 static int
-uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
     paddr_t alignment, paddr_t boundary, struct pglist *rlist)
 {
 	signed int candidate, limit, candidateidx, end, idx, skip;
-	struct vm_page *pgs;
 	int pagemask;
 	bool second_pass;
 #ifdef DEBUG
 	paddr_t idxpa, lastidxpa;
-	int cidx = 0;	/* XXX: GCC */
+	paddr_t cidx = 0;	/* XXX: GCC */
 #endif
 #ifdef PGALLOC_VERBOSE
 	printf("pgalloc: contig %d pgs from psi %zd\n", num, ps - vm_physmem);
@@ -140,26 +139,26 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 	/*
 	 * Make sure that physseg falls within with range to be allocated from.
 	 */
-	if (high <= ps->avail_start || low >= ps->avail_end)
+	if (high <= uvm_physseg_get_avail_start(psi) || low >= uvm_physseg_get_avail_end(psi))
 		return 0;
 
 	/*
 	 * We start our search at the just after where the last allocation
 	 * succeeded.
 	 */
-	candidate = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
-	limit = min(high, ps->avail_end);
+	candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi) +
+		uvm_physseg_get_start_hint(psi)), alignment);
+	limit = min(high, uvm_physseg_get_avail_end(psi));
 	pagemask = ~((boundary >> PAGE_SHIFT) - 1);
 	skip = 0;
 	second_pass = false;
-	pgs = ps->pgs;
 
 	for (;;) {
 		bool ok = true;
 		signed int cnt;
 
 		if (candidate + num > limit) {
-			if (ps->start_hint == 0 || second_pass) {
+			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
 				/*
 				 * We've run past the allowable range.
 				 */
@@ -171,8 +170,9 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 			 * is were we started.
 			 */
 			second_pass = true;
-			candidate = roundup2(max(low, ps->avail_start), alignment);
-			limit = min(limit, ps->avail_start + ps->start_hint);
+			candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi)), alignment);
+			limit = min(limit, uvm_physseg_get_avail_start(psi) +
+			    uvm_physseg_get_start_hint(psi));
 			skip = 0;
 			continue;
 		}
@@ -192,16 +192,16 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 		 * Make sure this is a managed physical page.
 		 */
 
-		if (vm_physseg_find(candidate, &cidx) != ps - vm_physmem)
+		if (uvm_physseg_find(candidate, &cidx) != psi)
 			panic("pgalloc contig: botch1");
-		if (cidx != candidate - ps->start)
+		if (cidx != candidate - uvm_physseg_get_start(psi))
 			panic("pgalloc contig: botch2");
-		if (vm_physseg_find(candidate + num - 1, &cidx) != ps - vm_physmem)
+		if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
 			panic("pgalloc contig: botch3");
-		if (cidx != candidate - ps->start + num - 1)
+		if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
 			panic("pgalloc contig: botch4");
 #endif
-		candidateidx = candidate - ps->start;
+		candidateidx = candidate - uvm_physseg_get_start(psi);
 		end = candidateidx + num;
 
 		/*
@@ -220,15 +220,15 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 		 * testing most of those pages again in the next pass.
 		 */
 		for (idx = end - 1; idx >= candidateidx + skip; idx--) {
-			if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
+			if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
 				ok = false;
 				break;
 			}
 
 #ifdef DEBUG
 			if (idx > candidateidx) {
-				idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
-				lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
+				idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
+				lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
 				if ((lastidxpa + PAGE_SIZE) != idxpa) {
 					/*
 					 * Region not contiguous.
@@ -249,7 +249,7 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 
 		if (ok) {
 			while (skip-- > 0) {
-				KDASSERT(VM_PAGE_IS_FREE(&pgs[candidateidx + skip]));
+				KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
 			}
 #ifdef PGALLOC_VERBOSE
 			printf(": ok\n");
@@ -280,19 +280,22 @@ uvm_pglistalloc_c_ps(struct vm_physseg *
 	/*
 	 * we have a chunk of memory that conforms to the requested constraints.
 	 */
-	for (idx = candidateidx, pgs += idx; idx < end; idx++, pgs++)
-		uvm_pglist_add(pgs, rlist);
+	for (idx = candidateidx; idx < end; idx++)
+		uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);
 
 	/*
 	 * the next time we need to search this segment, start after this
 	 * chunk of pages we just allocated.
 	 */
-	ps->start_hint = candidate + num - ps->avail_start;
-	KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+	uvm_physseg_set_start_hint(psi, candidate + num -
+	    uvm_physseg_get_avail_start(psi));
+	KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
+	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
 	    "%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
 	    candidate + num,
-	    ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
-	    ps->avail_end - ps->avail_start);
+	    uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
+	    uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
+	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
 
 #ifdef PGALLOC_VERBOSE
 	printf("got %d pgs\n", num);
@@ -304,10 +307,10 @@ static int
 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
     paddr_t boundary, struct pglist *rlist)
 {
-	int fl, psi;
-	struct vm_physseg *ps;
+	int fl;
 	int error;
 
+	uvm_physseg_t psi;
 	/* Default to "lose". */
 	error = ENOMEM;
 
@@ -322,17 +325,16 @@ uvm_pglistalloc_contig(int num, paddr_t 
 
 	for (fl = 0; fl < VM_NFREELIST; fl++) {
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-		for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
+
 #else
-		for (psi = 0 ; psi < vm_nphysseg ; psi++)
+		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
 #endif
 		{
-			ps = &vm_physmem[psi];
-
-			if (ps->free_list != fl)
+			if (uvm_physseg_get_free_list(psi) != fl)
 				continue;
 
-			num -= uvm_pglistalloc_c_ps(ps, num, low, high,
+			num -= uvm_pglistalloc_c_ps(psi, num, low, high,
 						    alignment, boundary, rlist);
 			if (num == 0) {
 #ifdef PGALLOC_VERBOSE
@@ -358,59 +360,62 @@ out:
 }
 
 static int
-uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
     struct pglist *rlist)
 {
 	int todo, limit, candidate;
 	struct vm_page *pg;
 	bool second_pass;
 #ifdef PGALLOC_VERBOSE
-	printf("pgalloc: simple %d pgs from psi %zd\n", num, ps - vm_physmem);
+	printf("pgalloc: simple %d pgs from psi %zd\n", num, psi);
 #endif
 
 	KASSERT(mutex_owned(&uvm_fpageqlock));
-	KASSERT(ps->start <= ps->avail_start);
-	KASSERT(ps->start <= ps->avail_end);
-	KASSERT(ps->avail_start <= ps->end);
-	KASSERT(ps->avail_end <= ps->end);
+	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
+	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
+	KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
+	KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));
 
 	low = atop(low);
 	high = atop(high);
 	todo = num;
-	candidate = max(low, ps->avail_start + ps->start_hint);
-	limit = min(high, ps->avail_end);
-	pg = &ps->pgs[candidate - ps->start];
+	candidate = max(low, uvm_physseg_get_avail_start(psi) +
+	    uvm_physseg_get_start_hint(psi));
+	limit = min(high, uvm_physseg_get_avail_end(psi));
+	pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
 	second_pass = false;
 
 	/*
 	 * Make sure that physseg falls within with range to be allocated from.
 	 */
-	if (high <= ps->avail_start || low >= ps->avail_end)
+	if (high <= uvm_physseg_get_avail_start(psi) ||
+	    low >= uvm_physseg_get_avail_end(psi))
 		return 0;
 
 again:
 	for (;; candidate++, pg++) {
 		if (candidate >= limit) {
-			if (ps->start_hint == 0 || second_pass) {
+			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
 				candidate = limit - 1;
 				break;
 			}
 			second_pass = true;
-			candidate = max(low, ps->avail_start);
-			limit = min(limit, ps->avail_start + ps->start_hint);
-			pg = &ps->pgs[candidate - ps->start];
+			candidate = max(low, uvm_physseg_get_avail_start(psi));
+			limit = min(limit, uvm_physseg_get_avail_start(psi) +
+			    uvm_physseg_get_start_hint(psi));
+			pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
 			goto again;
 		}
 #if defined(DEBUG)
 		{
-			int cidx = 0;
-			const int bank = vm_physseg_find(candidate, &cidx);
-			KDASSERTMSG(bank == ps - vm_physmem,
-			    "vm_physseg_find(%#x) (%d) != ps %zd",
-			     candidate, bank, ps - vm_physmem);
-			KDASSERTMSG(cidx == candidate - ps->start,
-			    "vm_physseg_find(%#x): %#x != off %"PRIxPADDR,
-			     candidate, cidx, candidate - ps->start);
+			paddr_t cidx = 0;
+			const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
+			KDASSERTMSG(bank == psi,
+			    "uvm_physseg_find(%#x) (%"PRIxPHYSMEM ") != psi %"PRIxPHYSMEM,
+			     candidate, bank, psi);
+			KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
+			    "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR,
+			     candidate, cidx, candidate - uvm_physseg_get_start(psi));
 		}
 #endif
 		if (VM_PAGE_IS_FREE(pg) == 0)
@@ -426,12 +431,16 @@ again:
 	 * The next time we need to search this segment,
 	 * start just after the pages we just allocated.
 	 */
-	ps->start_hint = candidate + 1 - ps->avail_start;
-	KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+	uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
+	KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
+	    uvm_physseg_get_avail_start(psi),
 	    "%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
 	    candidate + 1,
-	    ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
-	    ps->avail_end - ps->avail_start);
+	    uvm_physseg_get_start_hint(psi),
+	    uvm_physseg_get_start_hint(psi),
+	    uvm_physseg_get_avail_end(psi),
+	    uvm_physseg_get_avail_start(psi),
+	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
 
 #ifdef PGALLOC_VERBOSE
 	printf("got %d pgs\n", num - todo);
@@ -443,8 +452,9 @@ static int
 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
     struct pglist *rlist, int waitok)
 {
-	int fl, psi, error;
-	struct vm_physseg *ps;
+	int fl, error;
+
+	uvm_physseg_t psi;
 
 	/* Default to "lose". */
 	error = ENOMEM;
@@ -461,17 +471,16 @@ again:
 
 	for (fl = 0; fl < VM_NFREELIST; fl++) {
 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-		for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
+
 #else
-		for (psi = 0 ; psi < vm_nphysseg ; psi++)
+		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
 #endif
 		{
-			ps = &vm_physmem[psi];
-
-			if (ps->free_list != fl)
+			if (uvm_physseg_get_free_list(psi) != fl)
 				continue;
 
-			num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist);
+			num -= uvm_pglistalloc_s_ps(psi, num, low, high, rlist);
 			if (num == 0) {
 				error = 0;
 				goto out;

Index: src/sys/uvm/uvm_physseg.h
diff -u src/sys/uvm/uvm_physseg.h:1.3 src/sys/uvm/uvm_physseg.h:1.4
--- src/sys/uvm/uvm_physseg.h:1.3	Thu Dec 22 15:54:35 2016
+++ src/sys/uvm/uvm_physseg.h	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_physseg.h,v 1.3 2016/12/22 15:54:35 cherry Exp $ */
+/* $NetBSD: uvm_physseg.h,v 1.4 2016/12/23 07:15:28 cherry Exp $ */
 
 /*
  * Consolidated API from uvm_page.c and others.
@@ -9,10 +9,8 @@
 #define _UVM_UVM_PHYSSEG_H_
 
 #if defined(_KERNEL_OPT)
-#if notyet
 #include "opt_uvm_hotplug.h"
 #endif
-#endif
 
 #include <sys/cdefs.h>
 #include <sys/param.h>

Index: src/sys/uvm/pmap/pmap.c
diff -u src/sys/uvm/pmap/pmap.c:1.25 src/sys/uvm/pmap/pmap.c:1.26
--- src/sys/uvm/pmap/pmap.c:1.25	Thu Dec  1 02:15:08 2016
+++ src/sys/uvm/pmap/pmap.c	Fri Dec 23 07:15:28 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.25 2016/12/01 02:15:08 mrg Exp $	*/
+/*	$NetBSD: pmap.c,v 1.26 2016/12/23 07:15:28 cherry Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.25 2016/12/01 02:15:08 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.26 2016/12/23 07:15:28 cherry Exp $");
 
 /*
  *	Manages physical address maps.
@@ -112,6 +112,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2
 #include <sys/atomic.h>
 
 #include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>
 
 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
     && !defined(PMAP_NO_PV_UNCACHED)
@@ -452,37 +453,39 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 	size_t npgs;
 	paddr_t pa;
 	vaddr_t va;
-	struct vm_physseg *maybe_seg = NULL;
-	u_int maybe_bank = vm_nphysseg;
+
+	uvm_physseg_t maybe_bank = UVM_PHYSMEM_TYPE_INVALID;
 
 	size = round_page(size);
 	npgs = atop(size);
 
 	aprint_debug("%s: need %zu pages\n", __func__, npgs);
 
-	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
-		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
+	for (uvm_physseg_t bank = uvm_physseg_get_first();
+	     uvm_physseg_valid_p(bank);
+	     bank = uvm_physseg_get_next(bank)) {
+
 		if (uvm.page_init_done == true)
 			panic("pmap_steal_memory: called _after_ bootstrap");
 
-		aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
+		aprint_debug("%s: seg %"PRIxPHYSMEM": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
 		    __func__, bank,
-		    seg->avail_start, seg->start,
-		    seg->avail_end, seg->end);
+		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
+		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
 
-		if (seg->avail_start != seg->start
-		    || seg->avail_start >= seg->avail_end) {
-			aprint_debug("%s: seg %u: bad start\n", __func__, bank);
+		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
+		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
+			aprint_debug("%s: seg %"PRIxPHYSMEM": bad start\n", __func__, bank);
 			continue;
 		}
 
-		if (seg->avail_end - seg->avail_start < npgs) {
-			aprint_debug("%s: seg %u: too small for %zu pages\n",
+		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
+			aprint_debug("%s: seg %"PRIxPHYSMEM": too small for %zu pages\n",
 			    __func__, bank, npgs);
 			continue;
 		}
 
-		if (!pmap_md_ok_to_steal_p(seg, npgs)) {
+		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
 			continue;
 		}
 
@@ -490,44 +493,24 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 		 * Always try to allocate from the segment with the least
 		 * amount of space left.
 		 */
-#define VM_PHYSMEM_SPACE(s)	((s)->avail_end - (s)->avail_start)
-		if (maybe_seg == NULL
-		    || VM_PHYSMEM_SPACE(seg) < VM_PHYSMEM_SPACE(maybe_seg)) {
-			maybe_seg = seg;
+#define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
+		if (uvm_physseg_valid_p(maybe_bank) == false
+		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
 			maybe_bank = bank;
 		}
 	}
 
-	if (maybe_seg) {
-		struct vm_physseg * const seg = maybe_seg;
-		u_int bank = maybe_bank;
+	if (uvm_physseg_valid_p(maybe_bank)) {
+		const uvm_physseg_t bank = maybe_bank;
 
 		/*
 		 * There are enough pages here; steal them!
 		 */
-		pa = ptoa(seg->avail_start);
-		seg->avail_start += npgs;
-		seg->start += npgs;
-
-		/*
-		 * Have we used up this segment?
-		 */
-		if (seg->avail_start == seg->end) {
-			if (vm_nphysseg == 1)
-				panic("pmap_steal_memory: out of memory!");
+		pa = ptoa(uvm_physseg_get_start(bank));
+		uvm_physseg_unplug(atop(pa), npgs);
 
-			aprint_debug("%s: seg %u: %zu pages stolen (removed)\n",
-			    __func__, bank, npgs);
-			/* Remove this segment from the list. */
-			vm_nphysseg--;
-			for (u_int x = bank; x < vm_nphysseg; x++) {
-				/* structure copy */
-				VM_PHYSMEM_PTR_SWAP(x, x + 1);
-			}
-		} else {
-			aprint_debug("%s: seg %u: %zu pages stolen (%#"PRIxPADDR" left)\n",
-			    __func__, bank, npgs, VM_PHYSMEM_SPACE(seg));
-		}
+		aprint_debug("%s: seg %"PRIxPHYSMEM": %zu pages stolen (%#"PRIxPADDR" left)\n",
+		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
 
 		va = pmap_md_map_poolpage(pa, size);
 		memset((void *)va, 0, size);

Reply via email to