Module Name:    src
Committed By:   uebayasi
Date:           Thu Feb 25 03:44:17 UTC 2010

Modified Files:
        src/sys/arch/alpha/alpha [uebayasi-xip]: pmap.c
        src/sys/arch/alpha/include [uebayasi-xip]: pmap.h

Log Message:
Use VM_PAGE_TO_MD().  Only compile tested.


To generate a diff of this commit:
cvs rdiff -u -r1.252 -r1.252.2.1 src/sys/arch/alpha/alpha/pmap.c
cvs rdiff -u -r1.75 -r1.75.2.1 src/sys/arch/alpha/include/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/alpha/alpha/pmap.c
diff -u src/sys/arch/alpha/alpha/pmap.c:1.252 src/sys/arch/alpha/alpha/pmap.c:1.252.2.1
--- src/sys/arch/alpha/alpha/pmap.c:1.252	Thu Nov 26 00:19:11 2009
+++ src/sys/arch/alpha/alpha/pmap.c	Thu Feb 25 03:44:16 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.252 2009/11/26 00:19:11 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.252.2.1 2010/02/25 03:44:16 uebayasi Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -137,10 +137,12 @@
 #include "opt_lockdebug.h"
 #include "opt_sysv.h"
 #include "opt_multiprocessor.h"
+#include "opt_device_page.h"
+#include "opt_xip.h"
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.252 2009/11/26 00:19:11 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.252.2.1 2010/02/25 03:44:16 uebayasi Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1401,6 +1403,7 @@
 void
 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pmap_t pmap;
 	pv_entry_t pv, nextpv;
 	bool needkisync = false;
@@ -1427,7 +1430,7 @@
 		PMAP_HEAD_TO_MAP_LOCK();
 		lock = pmap_pvh_lock(pg);
 		mutex_enter(lock);
-		for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
+		for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) {
 			PMAP_LOCK(pv->pv_pmap);
 			if (*pv->pv_pte & (PG_KWE | PG_UWE)) {
 				*pv->pv_pte &= ~(PG_KWE | PG_UWE);
@@ -1452,7 +1455,7 @@
 	PMAP_HEAD_TO_MAP_LOCK();
 	lock = pmap_pvh_lock(pg);
 	mutex_enter(lock);
-	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = nextpv) {
+	for (pv = md->pvh_list; pv != NULL; pv = nextpv) {
 		nextpv = pv->pv_next;
 		pmap = pv->pv_pmap;
 
@@ -1793,6 +1796,7 @@
 	 */
 	npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V;
 	if (pg != NULL) {
+		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 		int attrs;
 
 #ifdef DIAGNOSTIC
@@ -1802,10 +1806,10 @@
 		lock = pmap_pvh_lock(pg);
 		mutex_enter(lock);
 		if (flags & VM_PROT_WRITE)
-			pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
+			md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
 		else if (flags & VM_PROT_ALL)
-			pg->mdpage.pvh_attrs |= PGA_REFERENCED;
-		attrs = pg->mdpage.pvh_attrs;
+			md->pvh_attrs |= PGA_REFERENCED;
+		attrs = md->pvh_attrs;
 		mutex_exit(lock);
 
 		/*
@@ -2282,6 +2286,7 @@
 bool
 pmap_clear_modify(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	bool rv = false;
 	long cpu_id = cpu_number();
 	kmutex_t *lock;
@@ -2295,10 +2300,10 @@
 	lock = pmap_pvh_lock(pg);
 	mutex_enter(lock);
 
-	if (pg->mdpage.pvh_attrs & PGA_MODIFIED) {
+	if (md->pvh_attrs & PGA_MODIFIED) {
 		rv = true;
 		pmap_changebit(pg, PG_FOW, ~0, cpu_id);
-		pg->mdpage.pvh_attrs &= ~PGA_MODIFIED;
+		md->pvh_attrs &= ~PGA_MODIFIED;
 	}
 
 	mutex_exit(lock);
@@ -2315,6 +2320,7 @@
 bool
 pmap_clear_reference(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	bool rv = false;
 	long cpu_id = cpu_number();
 	kmutex_t *lock;
@@ -2328,10 +2334,10 @@
 	lock = pmap_pvh_lock(pg);
 	mutex_enter(lock);
 
-	if (pg->mdpage.pvh_attrs & PGA_REFERENCED) {
+	if (md->pvh_attrs & PGA_REFERENCED) {
 		rv = true;
 		pmap_changebit(pg, PG_FOR | PG_FOW | PG_FOE, ~0, cpu_id);
-		pg->mdpage.pvh_attrs &= ~PGA_REFERENCED;
+		md->pvh_attrs &= ~PGA_REFERENCED;
 	}
 
 	mutex_exit(lock);
@@ -2539,6 +2545,7 @@
 static void
 pmap_changebit(struct vm_page *pg, u_long set, u_long mask, long cpu_id)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv;
 	pt_entry_t *pte, npte;
 	vaddr_t va;
@@ -2554,7 +2561,7 @@
 	/*
 	 * Loop over all current mappings setting/clearing as apropos.
 	 */
-	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
+	for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) {
 		va = pv->pv_va;
 
 		PMAP_LOCK(pv->pv_pmap);
@@ -2681,16 +2688,17 @@
 	 *	(2) if it was a write fault, mark page as modified.
 	 */
 	pg = PHYS_TO_VM_PAGE(pa);
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
 	PMAP_HEAD_TO_MAP_LOCK();
 	lock = pmap_pvh_lock(pg);
 	mutex_enter(lock);
 
 	if (type == ALPHA_MMCSR_FOW) {
-		pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
+		md->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
 		faultoff = PG_FOR | PG_FOW;
 	} else {
-		pg->mdpage.pvh_attrs |= PGA_REFERENCED;
+		md->pvh_attrs |= PGA_REFERENCED;
 		faultoff = PG_FOR;
 		if (exec) {
 			faultoff |= PG_FOE;
@@ -2713,16 +2721,18 @@
 pmap_pv_dump(paddr_t pa)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 	pv_entry_t pv;
 	kmutex_t *lock;
 
 	pg = PHYS_TO_VM_PAGE(pa);
+	md = VM_PAGE_TO_MD(pg);
 
 	lock = pmap_pvh_lock(pg);
 	mutex_enter(lock);
 
-	printf("pa 0x%lx (attrs = 0x%x):\n", pa, pg->mdpage.pvh_attrs);
-	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next)
+	printf("pa 0x%lx (attrs = 0x%x):\n", pa, md->pvh_attrs);
+	for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next)
 		printf("     pmap %p, va 0x%lx\n",
 		    pv->pv_pmap, pv->pv_va);
 	printf("\n");
@@ -2773,6 +2783,7 @@
 pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, pt_entry_t *pte,
     bool dolock)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t newpv;
 	kmutex_t *lock;
 
@@ -2797,7 +2808,7 @@
 	/*
 	 * Make sure the entry doesn't already exist.
 	 */
-	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
+	for (pv = md->pvh_list; pv != NULL; pv = pv->pv_next) {
 		if (pmap == pv->pv_pmap && va == pv->pv_va) {
 			printf("pmap = %p, va = 0x%lx\n", pmap, va);
 			panic("pmap_pv_enter: already in pv table");
@@ -2809,8 +2820,8 @@
 	/*
 	 * ...and put it in the list.
 	 */
-	newpv->pv_next = pg->mdpage.pvh_list;
-	pg->mdpage.pvh_list = newpv;
+	newpv->pv_next = md->pvh_list;
+	md->pvh_list = newpv;
 
 	if (dolock) {
 		mutex_exit(lock);
@@ -2827,6 +2838,7 @@
 static void
 pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t va, bool dolock)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, *pvp;
 	kmutex_t *lock;
 
@@ -2840,7 +2852,7 @@
 	/*
 	 * Find the entry to remove.
 	 */
-	for (pvp = &pg->mdpage.pvh_list, pv = *pvp;
+	for (pvp = &md->pvh_list, pv = *pvp;
 	     pv != NULL; pvp = &pv->pv_next, pv = *pvp)
 		if (pmap == pv->pv_pmap && va == pv->pv_va)
 			break;
@@ -2910,9 +2922,10 @@
 	if (pg != NULL) {
 		pa = VM_PAGE_TO_PHYS(pg);
 #ifdef DEBUG
-		if (pg->mdpage.pvh_refcnt != 0) {
+		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+		if (md->pvh_refcnt != 0) {
 			printf("pmap_physpage_alloc: page 0x%lx has "
-			    "%d references\n", pa, pg->mdpage.pvh_refcnt);
+			    "%d references\n", pa, md->pvh_refcnt);
 			panic("pmap_physpage_alloc");
 		}
 #endif
@@ -2936,7 +2949,8 @@
 		panic("pmap_physpage_free: bogus physical page address");
 
 #ifdef DEBUG
-	if (pg->mdpage.pvh_refcnt != 0)
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+	if (md->pvh_refcnt != 0)
 		panic("pmap_physpage_free: page still has references");
 #endif
 
@@ -2952,14 +2966,16 @@
 pmap_physpage_addref(void *kva)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 	paddr_t pa;
 
 	pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
 	pg = PHYS_TO_VM_PAGE(pa);
+	md = VM_PAGE_TO_MD(pg);
 
-	KASSERT((int)pg->mdpage.pvh_refcnt >= 0);
+	KASSERT((int)md->pvh_refcnt >= 0);
 
-	return atomic_inc_uint_nv(&pg->mdpage.pvh_refcnt);
+	return atomic_inc_uint_nv(&md->pvh_refcnt);
 }
 
 /*
@@ -2971,14 +2987,16 @@
 pmap_physpage_delref(void *kva)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 	paddr_t pa;
 
 	pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
 	pg = PHYS_TO_VM_PAGE(pa);
+	md = VM_PAGE_TO_MD(pg);
 
-	KASSERT((int)pg->mdpage.pvh_refcnt > 0);
+	KASSERT((int)md->pvh_refcnt > 0);
 
-	return atomic_dec_uint_nv(&pg->mdpage.pvh_refcnt);
+	return atomic_dec_uint_nv(&md->pvh_refcnt);
 }
 
 /******************** page table page management ********************/

Index: src/sys/arch/alpha/include/pmap.h
diff -u src/sys/arch/alpha/include/pmap.h:1.75 src/sys/arch/alpha/include/pmap.h:1.75.2.1
--- src/sys/arch/alpha/include/pmap.h:1.75	Mon Oct 26 03:51:43 2009
+++ src/sys/arch/alpha/include/pmap.h	Thu Feb 25 03:44:17 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.75 2009/10/26 03:51:43 thorpej Exp $ */
+/* $NetBSD: pmap.h,v 1.75.2.1 2010/02/25 03:44:17 uebayasi Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
@@ -223,9 +223,9 @@
 }
 
 #define	pmap_is_referenced(pg)						\
-	(((pg)->mdpage.pvh_attrs & PGA_REFERENCED) != 0)
+	((VM_PAGE_TO_MD(pg)->pvh_attrs & PGA_REFERENCED) != 0)
 #define	pmap_is_modified(pg)						\
-	(((pg)->mdpage.pvh_attrs & PGA_MODIFIED) != 0)
+	((VM_PAGE_TO_MD(pg)->pvh_attrs & PGA_MODIFIED) != 0)
 
 #define	PMAP_STEAL_MEMORY		/* enable pmap_steal_memory() */
 #define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */

Reply via email to