Module Name:    src
Committed By:   ryo
Date:           Tue Mar 19 16:05:49 UTC 2019

Modified Files:
        src/sys/arch/aarch64/aarch64: pmap.c
        src/sys/arch/aarch64/include: asan.h pmap.h

Log Message:
- free L1-L3 pages that has been emptied by pmap_remove().
- if no memories, pmap_enter will return correctly ENOMEM if PMAP_CANFAIL, or 
wait until available any memories if !PMAP_CANFAIL.

These changes improves the stability when we use a huge virtual memory spaces 
with mmap.


To generate a diff of this commit:
cvs rdiff -u -r1.35 -r1.36 src/sys/arch/aarch64/aarch64/pmap.c
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/aarch64/include/asan.h
cvs rdiff -u -r1.21 -r1.22 src/sys/arch/aarch64/include/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/aarch64/aarch64/pmap.c
diff -u src/sys/arch/aarch64/aarch64/pmap.c:1.35 src/sys/arch/aarch64/aarch64/pmap.c:1.36
--- src/sys/arch/aarch64/aarch64/pmap.c:1.35	Wed Feb  6 05:33:41 2019
+++ src/sys/arch/aarch64/aarch64/pmap.c	Tue Mar 19 16:05:49 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $	*/
+/*	$NetBSD: pmap.c,v 1.36 2019/03/19 16:05:49 ryo Exp $	*/
 
 /*
  * Copyright (c) 2017 Ryo Shimizu <r...@nerv.org>
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.36 2019/03/19 16:05:49 ryo Exp $");
 
 #include "opt_arm_debug.h"
 #include "opt_ddb.h"
@@ -191,11 +191,15 @@ struct pv_entry {
 
 #define L3INDEXMASK	(L3_SIZE * Ln_ENTRIES - 1)
 
+void atomic_add_16(volatile uint16_t *, int16_t);
+uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
+
 static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
 static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
 static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
 static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
-static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, struct pv_entry **);
+static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool,
+    struct pv_entry **);
 static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
 
 static struct pmap kernel_pmap;
@@ -434,8 +438,19 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
 	kpm->pm_l0table = l0;
 	kpm->pm_l0table_pa = l0pa;
 	kpm->pm_activated = true;
-	SLIST_INIT(&kpm->pm_vmlist);
+	TAILQ_INIT(&kpm->pm_vmlist);
 	mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
+
+	CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
+	CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long));
+#define PMSTAT_INC_WIRED_COUNT(pm)	\
+	atomic_inc_ulong(&(pm)->pm_stats.wired_count)
+#define PMSTAT_DEC_WIRED_COUNT(pm)	\
+	atomic_dec_ulong(&(pm)->pm_stats.wired_count)
+#define PMSTAT_INC_RESIDENT_COUNT(pm)	\
+	atomic_inc_ulong(&(pm)->pm_stats.resident_count)
+#define PMSTAT_DEC_RESIDENT_COUNT(pm)	\
+	atomic_dec_ulong(&(pm)->pm_stats.resident_count)
 }
 
 inline static int
@@ -544,39 +559,61 @@ pmap_reference(struct pmap *pm)
 	atomic_inc_uint(&pm->pm_refcnt);
 }
 
-pd_entry_t *
-pmap_alloc_pdp(struct pmap *pm, paddr_t *pap)
+paddr_t
+pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, bool waitok)
 {
 	paddr_t pa;
+	struct vm_page *pg;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
 
 	if (uvm.page_init_done) {
-		struct vm_page *pg;
-
+ retry:
 		pg = uvm_pagealloc(NULL, 0, NULL,
 		    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
-		if (pg == NULL)
-			panic("%s: cannot allocate L3 table", __func__);
-		pa = VM_PAGE_TO_PHYS(pg);
+		if (pg == NULL) {
+			if (waitok) {
+				uvm_wait("pmap_alloc_pdp");
+				goto retry;
+			}
+			return POOL_PADDR_INVALID;
+		}
 
-		SLIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+		TAILQ_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+		pg->flags &= ~PG_BUSY;	/* never busy */
+		pg->wire_count = 1;	/* max = 1 + Ln_ENTRIES = 513 */
+		pa = VM_PAGE_TO_PHYS(pg);
 		PMAP_COUNT(pdp_alloc);
 
+		VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL;
+
 	} else {
 		/* uvm_pageboot_alloc() returns AARCH64 KSEG address */
+		pg = NULL;
 		pa = AARCH64_KVA_TO_PA(
 		    uvm_pageboot_alloc(Ln_TABLE_SIZE));
 		PMAP_COUNT(pdp_alloc_boot);
 	}
-	if (pap != NULL)
-		*pap = pa;
+	if (pgp != NULL)
+		*pgp = pg;
 
-	UVMHIST_LOG(pmaphist, "pa=%llx, va=%llx",
-	    pa, AARCH64_PA_TO_KVA(pa), 0, 0);
+	UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx",
+	    pa, pg, 0, 0);
 
-	return (void *)AARCH64_PA_TO_KVA(pa);
+	return pa;
+}
+
+static void
+pmap_free_pdp(struct pmap *pm, struct vm_page *pg)
+{
+	TAILQ_REMOVE(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+	pg->flags |= PG_BUSY;
+	pg->wire_count = 0;
+	VM_MDPAGE_INIT(pg);
+
+	uvm_pagefree(pg);
+	PMAP_COUNT(pdp_free);
 }
 
 static void
@@ -584,9 +621,8 @@ _pmap_free_pdp_all(struct pmap *pm)
 {
 	struct vm_page *pg, *tmp;
 
-	SLIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
-		uvm_pagefree(pg);
-		PMAP_COUNT(pdp_free);
+	TAILQ_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
+		pmap_free_pdp(pm, pg);
 	}
 }
 
@@ -617,7 +653,7 @@ pmap_extract_coherency(struct pmap *pm, 
 bool
 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
 {
-	static pt_entry_t *ptep, pte;
+	pt_entry_t *ptep, pte;
 	paddr_t pa;
 	vsize_t blocksize = 0;
 	extern char __kernel_text[];
@@ -660,6 +696,9 @@ vtophys(vaddr_t va)
 	return pa;
 }
 
+/*
+ * return pointer of the pte. regardess of whether the entry is valid or not.
+ */
 static pt_entry_t *
 _pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs)
 {
@@ -1238,9 +1277,11 @@ pmap_create(void)
 	memset(pm, 0, sizeof(*pm));
 	pm->pm_refcnt = 1;
 	pm->pm_asid = -1;
-	SLIST_INIT(&pm->pm_vmlist);
+	TAILQ_INIT(&pm->pm_vmlist);
 	mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
-	pm->pm_l0table = pmap_alloc_pdp(pm, &pm->pm_l0table_pa);
+	pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, true);
+	KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID);
+	pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa);
 	KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
 
 	UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
@@ -1282,11 +1323,115 @@ pmap_destroy(struct pmap *pm)
 	PMAP_COUNT(destroy);
 }
 
+static inline void
+_pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep)
+{
+	if ((pm != pmap_kernel()) && (pg != NULL))
+		VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep;
+}
+
+/*
+ * increment reference counter of the page descriptor page.
+ * the reference counter should be equal to
+ *  1 + num of valid entries the page has.
+ */
+static inline void
+_pmap_pdp_addref(struct pmap *pm, paddr_t pdppa, struct vm_page *pdppg_hint)
+{
+	struct vm_page *pg;
+
+	/* kernel L0-L3 page will be never freed */
+	if (pm == pmap_kernel())
+		return;
+	/* no need for L0 page */
+	if (pm->pm_l0table_pa == pdppa)
+		return;
+
+	pg = pdppg_hint;
+	if (pg == NULL)
+		pg = PHYS_TO_VM_PAGE(pdppa);
+	KASSERT(pg != NULL);
+
+	CTASSERT(sizeof(pg->wire_count) == sizeof(uint16_t));
+	atomic_add_16(&pg->wire_count, 1);
+
+	KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
+	    "pg=%p, wire_count=%d", pg, pg->wire_count);
+}
+
+/*
+ * decrement reference counter of the page descriptr page.
+ * if reference counter is 1(=empty), pages will be freed, and return true.
+ * otherwise return false.
+ * kernel page, or L0 page descriptor page will be never freed.
+ */
+static bool
+_pmap_pdp_delref(struct pmap *pm, paddr_t pdppa, bool do_free_pdp)
+{
+	struct vm_page *pg;
+	bool removed;
+	uint16_t wirecount;
+
+	/* kernel L0-L3 page will be never freed */
+	if (pm == pmap_kernel())
+		return false;
+	/* no need for L0 page */
+	if (pm->pm_l0table_pa == pdppa)
+		return false;
+
+	pg = PHYS_TO_VM_PAGE(pdppa);
+	KASSERT(pg != NULL);
+
+	wirecount = atomic_add_16_nv(&pg->wire_count, -1);
+
+	if (!do_free_pdp)
+		return false;
+
+	/* if no reference, free pdp */
+	removed = false;
+	while (wirecount == 1) {
+		pd_entry_t *ptep_in_parent, opte;;
+
+		ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
+		if (ptep_in_parent == NULL) {
+			/* no parent */
+			pmap_free_pdp(pm, pg);
+			removed = true;
+			break;
+		}
+
+		/* unlink from parent */
+		opte = atomic_swap_64(ptep_in_parent, 0);
+		KASSERT(lxpde_valid(opte));
+		wirecount = atomic_add_16_nv(&pg->wire_count, -1); /* 1 -> 0 */
+		KASSERT(wirecount == 0);
+		pmap_free_pdp(pm, pg);
+		removed = true;
+
+		/* L3->L2->L1. no need for L0 */
+		pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
+		if (pdppa == pm->pm_l0table_pa)
+			break;
+
+		pg = PHYS_TO_VM_PAGE(pdppa);
+		KASSERT(pg != NULL);
+		KASSERTMSG(pg->wire_count >= 1,
+		    "wire_count=%d", pg->wire_count);
+		/* decrement wire_count of parent */
+		wirecount = atomic_add_16_nv(&pg->wire_count, -1);
+		KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
+		    "pm=%p[%d], pg=%p, wire_count=%d",
+		    pm, pm->pm_asid, pg, pg->wire_count);
+	}
+
+	return removed;
+}
+
 static int
 _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
     u_int flags, bool kenter)
 {
-	struct vm_page *pg;
+	struct vm_page *pg, *pdppg, *pdppg0;
 	struct pv_entry *spv, *opv = NULL;
 	pd_entry_t pde;
 	pt_entry_t attr, pte, *ptep;
@@ -1294,12 +1439,12 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 	pt_entry_t opte;
 #endif
 	pd_entry_t *l0, *l1, *l2, *l3;
-	paddr_t pdppa;
+	paddr_t pdppa, pdppa0;
 	uint32_t mdattr;
 	unsigned int idx;
 	int error = 0;
 	const bool user = (pm != pmap_kernel());
-	bool need_sync_icache;
+	bool need_sync_icache, exists;
 	bool l3only = true;
 
 	UVMHIST_FUNC(__func__);
@@ -1362,36 +1507,67 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 	idx = l0pde_index(va);
 	pde = l0[idx];
 	if (!l0pde_valid(pde)) {
-		pmap_alloc_pdp(pm, &pdppa);
-		KASSERT(pdppa != POOL_PADDR_INVALID);
+		/* no need to increment L0 occupancy. L0 page never freed */
+		pdppa = pmap_alloc_pdp(pm, &pdppg, false);	/* L1 pdp */
+		if (pdppa == POOL_PADDR_INVALID) {
+			if (flags & PMAP_CANFAIL) {
+				error = ENOMEM;
+				goto done;
+			}
+			panic("%s: cannot allocate L1 table", __func__);
+		}
 		atomic_swap_64(&l0[idx], pdppa | L0_TABLE);
+		_pmap_pdp_setparent(pm, pdppg, &l0[idx]);
 		l3only = false;
 	} else {
 		pdppa = l0pde_pa(pde);
+		pdppg = NULL;
 	}
 	l1 = (void *)AARCH64_PA_TO_KVA(pdppa);
 
 	idx = l1pde_index(va);
 	pde = l1[idx];
 	if (!l1pde_valid(pde)) {
-		pmap_alloc_pdp(pm, &pdppa);
-		KASSERT(pdppa != POOL_PADDR_INVALID);
+		pdppa0 = pdppa;
+		pdppg0 = pdppg;
+		pdppa = pmap_alloc_pdp(pm, &pdppg, false);	/* L2 pdp */
+		if (pdppa == POOL_PADDR_INVALID) {
+			if (flags & PMAP_CANFAIL) {
+				error = ENOMEM;
+				goto done;
+			}
+			panic("%s: cannot allocate L2 table", __func__);
+		}
 		atomic_swap_64(&l1[idx], pdppa | L1_TABLE);
+		_pmap_pdp_addref(pm, pdppa0, pdppg0);	/* L1 occupancy++ */
+		_pmap_pdp_setparent(pm, pdppg, &l1[idx]);
 		l3only = false;
 	} else {
 		pdppa = l1pde_pa(pde);
+		pdppg = NULL;
 	}
 	l2 = (void *)AARCH64_PA_TO_KVA(pdppa);
 
 	idx = l2pde_index(va);
 	pde = l2[idx];
 	if (!l2pde_valid(pde)) {
-		pmap_alloc_pdp(pm, &pdppa);
-		KASSERT(pdppa != POOL_PADDR_INVALID);
+		pdppa0 = pdppa;
+		pdppg0 = pdppg;
+		pdppa = pmap_alloc_pdp(pm, &pdppg, false);	/* L3 pdp */
+		if (pdppa == POOL_PADDR_INVALID) {
+			if (flags & PMAP_CANFAIL) {
+				error = ENOMEM;
+				goto done;
+			}
+			panic("%s: cannot allocate L3 table", __func__);
+		}
 		atomic_swap_64(&l2[idx], pdppa | L2_TABLE);
+		_pmap_pdp_addref(pm, pdppa0, pdppg0);	/* L2 occupancy++ */
+		_pmap_pdp_setparent(pm, pdppg, &l2[idx]);
 		l3only = false;
 	} else {
 		pdppa = l2pde_pa(pde);
+		pdppg = NULL;
 	}
 	l3 = (void *)AARCH64_PA_TO_KVA(pdppa);
 
@@ -1441,9 +1617,14 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 				opv = _pmap_remove_pv(opg, pm, va, pte);
 		}
 
-		if (pte & LX_BLKPAG_OS_WIRED)
-			pm->pm_stats.wired_count--;
-		pm->pm_stats.resident_count--;
+		if (pte & LX_BLKPAG_OS_WIRED) {
+			PMSTAT_DEC_WIRED_COUNT(pm);
+		}
+		PMSTAT_DEC_RESIDENT_COUNT(pm);
+
+		exists = true;	/* already exists */
+	} else {
+		exists = false;
 	}
 
 	/*
@@ -1517,10 +1698,13 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
 		atomic_swap_64(ptep, pte);
 		AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
 	}
+	if (!exists)
+		_pmap_pdp_addref(pm, pdppa, pdppg);	/* L3 occupancy++ */
 
-	if (pte & LX_BLKPAG_OS_WIRED)
-		pm->pm_stats.wired_count++;
-	pm->pm_stats.resident_count++;
+	if (pte & LX_BLKPAG_OS_WIRED) {
+		PMSTAT_INC_WIRED_COUNT(pm);
+	}
+	PMSTAT_INC_RESIDENT_COUNT(pm);
 
  done:
 	pm_unlock(pm);
@@ -1559,6 +1743,7 @@ _pmap_remove(struct pmap *pm, vaddr_t sv
 	paddr_t pa;
 	vaddr_t va;
 	vsize_t blocksize = 0;
+	bool pdpremoved;
 
 	UVMHIST_FUNC(__func__);
 	UVMHIST_CALLED(pmaphist);
@@ -1591,12 +1776,26 @@ _pmap_remove(struct pmap *pm, vaddr_t sv
 			}
 		}
 
-		atomic_swap_64(ptep, 0);
-		AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
+		pte = atomic_swap_64(ptep, 0);
+		if (!lxpde_valid(pte))
+			continue;
+
+		pdpremoved = _pmap_pdp_delref(pm,
+		    AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep)), true);
+		AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, !pdpremoved);
 
-		if ((pte & LX_BLKPAG_OS_WIRED) != 0)
-			pm->pm_stats.wired_count--;
-		pm->pm_stats.resident_count--;
+		if (pdpremoved) {
+			/*
+			 * this Ln page table page has been removed.
+			 * skip to next Ln table
+			 */
+			blocksize *= Ln_ENTRIES;
+		}
+
+		if ((pte & LX_BLKPAG_OS_WIRED) != 0) {
+			PMSTAT_DEC_WIRED_COUNT(pm);
+		}
+		PMSTAT_DEC_RESIDENT_COUNT(pm);
 	}
 }
 
@@ -1644,13 +1843,18 @@ pmap_page_protect(struct vm_page *pg, vm
 		TAILQ_FOREACH_SAFE(pv, &md->mdpg_pvhead, pv_link, pvtmp) {
 
 			opte = atomic_swap_64(pv->pv_ptep, 0);
-			AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid,
-			    pv->pv_va, true);
-
-			if ((opte & LX_BLKPAG_OS_WIRED) != 0)
-				pv->pv_pmap->pm_stats.wired_count--;
-			pv->pv_pmap->pm_stats.resident_count--;
+			if (lxpde_valid(opte)) {
+				_pmap_pdp_delref(pv->pv_pmap,
+				    AARCH64_KVA_TO_PA(trunc_page(
+				    (vaddr_t)pv->pv_ptep)), false);
+				AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid,
+				    pv->pv_va, true);
 
+				if ((opte & LX_BLKPAG_OS_WIRED) != 0) {
+					PMSTAT_DEC_WIRED_COUNT(pv->pv_pmap);
+				}
+				PMSTAT_DEC_RESIDENT_COUNT(pv->pv_pmap);
+			}
 			TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
 			PMAP_COUNT(pv_remove);
 
@@ -1702,7 +1906,7 @@ pmap_unwire(struct pmap *pm, vaddr_t va)
 		pte &= ~LX_BLKPAG_OS_WIRED;
 		atomic_swap_64(ptep, pte);
 
-		pm->pm_stats.wired_count--;
+		PMSTAT_DEC_WIRED_COUNT(pm);
 	}
 	pm_unlock(pm);
 }

Index: src/sys/arch/aarch64/include/asan.h
diff -u src/sys/arch/aarch64/include/asan.h:1.4 src/sys/arch/aarch64/include/asan.h:1.5
--- src/sys/arch/aarch64/include/asan.h:1.4	Sat Nov 10 18:30:58 2018
+++ src/sys/arch/aarch64/include/asan.h	Tue Mar 19 16:05:49 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: asan.h,v 1.4 2018/11/10 18:30:58 ryo Exp $	*/
+/*	$NetBSD: asan.h,v 1.5 2019/03/19 16:05:49 ryo Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -70,7 +70,7 @@ __md_palloc(void)
 	if (__predict_false(__md_early))
 		pa = (paddr_t)bootpage_alloc();
 	else
-		pmap_alloc_pdp(pmap_kernel(), &pa);
+		pa = pmap_alloc_pdp(pmap_kernel(), NULL, false);
 
 	return pa;
 }

Index: src/sys/arch/aarch64/include/pmap.h
diff -u src/sys/arch/aarch64/include/pmap.h:1.21 src/sys/arch/aarch64/include/pmap.h:1.22
--- src/sys/arch/aarch64/include/pmap.h:1.21	Wed Feb  6 05:33:41 2019
+++ src/sys/arch/aarch64/include/pmap.h	Tue Mar 19 16:05:49 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.21 2019/02/06 05:33:41 ryo Exp $ */
+/* $NetBSD: pmap.h,v 1.22 2019/03/19 16:05:49 ryo Exp $ */
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -72,7 +72,7 @@ struct pmap {
 	pd_entry_t *pm_l0table;			/* L0 table: 512G*512 */
 	paddr_t pm_l0table_pa;
 
-	SLIST_HEAD(, vm_page) pm_vmlist;	/* for L[0123] tables */
+	TAILQ_HEAD(, vm_page) pm_vmlist;	/* for L[0123] tables */
 
 	struct pmap_statistics pm_stats;
 	unsigned int pm_refcnt;
@@ -83,9 +83,11 @@ struct pmap {
 struct pv_entry;
 struct vm_page_md {
 	kmutex_t mdpg_pvlock;
-	SLIST_ENTRY(vm_page) mdpg_vmlist;	/* L[0-3] table vm_page list */
+	TAILQ_ENTRY(vm_page) mdpg_vmlist;	/* L[0123] table vm_page list */
 	TAILQ_HEAD(, pv_entry) mdpg_pvhead;
 
+	pd_entry_t *mdpg_ptep_parent;	/* for page descriptor page only */
+
 	/* VM_PROT_READ means referenced, VM_PROT_WRITE means modified */
 	uint32_t mdpg_flags;
 };
@@ -185,7 +187,7 @@ const struct pmap_devmap *pmap_devmap_fi
 vaddr_t pmap_devmap_phystov(paddr_t);
 paddr_t pmap_devmap_vtophys(paddr_t);
 
-pd_entry_t *pmap_alloc_pdp(struct pmap *, paddr_t *);
+paddr_t pmap_alloc_pdp(struct pmap *, struct vm_page **, bool);
 
 #define L1_TRUNC_BLOCK(x)	((x) & L1_FRAME)
 #define L1_ROUND_BLOCK(x)	L1_TRUNC_BLOCK((x) + L1_SIZE - 1)

Reply via email to