Module Name:    src
Committed By:   uebayasi
Date:           Thu Feb 25 04:46:28 UTC 2010

Modified Files:
        src/sys/arch/mips/mips [uebayasi-xip]: pmap.c

Log Message:
Use VM_PAGE_TO_MD().  Only compile tested.


To generate a diff of this commit:
cvs rdiff -u -r1.188 -r1.188.2.1 src/sys/arch/mips/mips/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.188 src/sys/arch/mips/mips/pmap.c:1.188.2.1
--- src/sys/arch/mips/mips/pmap.c:1.188	Mon Dec 14 00:46:07 2009
+++ src/sys/arch/mips/mips/pmap.c	Thu Feb 25 04:46:28 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.188 2009/12/14 00:46:07 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.188.2.1 2010/02/25 04:46:28 uebayasi Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188 2009/12/14 00:46:07 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188.2.1 2010/02/25 04:46:28 uebayasi Exp $");
 
 /*
  *	Manages physical address maps.
@@ -265,6 +265,7 @@
 mips_flushcache_allpvh(paddr_t pa)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 	struct pv_entry *pv;
 
 	pg = PHYS_TO_VM_PAGE(pa);
@@ -277,7 +278,8 @@
 		return;
 	}
 
-	pv = pg->mdpage.pvh_list;
+	md = VM_PAGE_TO_MD(pg);
+	pv = md->pvh_list;
 
 #if defined(MIPS3_NO_PV_UNCACHED)
 	/* No current mapping.  Cache was flushed by pmap_remove_pv() */
@@ -522,7 +524,7 @@
 	for (bank = 0; bank < vm_nphysseg; bank++) {
 		s = vm_physmem[bank].end - vm_physmem[bank].start;
 		for (i = 0; i < s; i++)
-			vm_physmem[bank].pgs[i].mdpage.pvh_list = pv++;
+			VM_PAGE_TO_MD(&vm_physmem[bank].pgs[i])->pvh_list = pv++;
 	}
 
 	/*
@@ -861,6 +863,7 @@
 void
 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv;
 	vaddr_t va;
 
@@ -878,7 +881,7 @@
 	/* copy_on_write */
 	case VM_PROT_READ:
 	case VM_PROT_READ|VM_PROT_EXECUTE:
-		pv = pg->mdpage.pvh_list;
+		pv = md->pvh_list;
 		/*
 		 * Loop over all current mappings setting/clearing as appropos.
 		 */
@@ -894,7 +897,7 @@
 
 	/* remove_all */
 	default:
-		pv = pg->mdpage.pvh_list;
+		pv = md->pvh_list;
 		while (pv->pv_pmap != NULL) {
 			pmap_remove(pv->pv_pmap, pv->pv_va,
 			    pv->pv_va + PAGE_SIZE);
@@ -1079,6 +1082,7 @@
 static void
 pmap_page_cache(struct vm_page *pg, int mode)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv;
 	pt_entry_t *pte;
 	unsigned entry;
@@ -1090,7 +1094,7 @@
 		printf("pmap_page_uncache(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
 #endif
 	newmode = mode & PV_UNCACHED ? MIPS3_PG_UNCACHED : MIPS3_PG_CACHED;
-	pv = pg->mdpage.pvh_list;
+	pv = md->pvh_list;
 	asid = pv->pv_pmap->pm_asid;
 	needupdate = (pv->pv_pmap->pm_asidgen == pmap_asid_generation);
 
@@ -1189,7 +1193,8 @@
 	pg = PHYS_TO_VM_PAGE(pa);
 
 	if (pg) {
-		int *attrs = &pg->mdpage.pvh_attrs;
+		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+		int *attrs = &md->pvh_attrs;
 
 		/* Set page referenced/modified status based on flags */
 		if (flags & VM_PROT_WRITE)
@@ -1643,7 +1648,8 @@
 #if defined(MIPS3_PLUS)	/* XXX mmu XXX */
 	pg = PHYS_TO_VM_PAGE(phys);
 	if (mips_cache_virtual_alias) {
-		pv = pg->mdpage.pvh_list;
+		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+		pv = md->pvh_list;
 		if ((pv->pv_flags & PV_UNCACHED) == 0 &&
 		    mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
@@ -1745,6 +1751,7 @@
 bool
 pmap_clear_reference(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	int *attrp;
 	bool rv;
 
@@ -1753,7 +1760,7 @@
 		printf("pmap_clear_reference(%#"PRIxPADDR")\n",
 		    VM_PAGE_TO_PHYS(pg));
 #endif
-	attrp = &pg->mdpage.pvh_attrs;
+	attrp = &md->pvh_attrs;
 	rv = *attrp & PGA_REFERENCED;
 	*attrp &= ~PGA_REFERENCED;
 	return rv;
@@ -1768,8 +1775,9 @@
 bool
 pmap_is_referenced(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
-	return pg->mdpage.pvh_attrs & PGA_REFERENCED;
+	return md->pvh_attrs & PGA_REFERENCED;
 }
 
 /*
@@ -1778,6 +1786,7 @@
 bool
 pmap_clear_modify(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	struct pmap *pmap;
 	struct pv_entry *pv;
 	pt_entry_t *pte;
@@ -1790,13 +1799,13 @@
 	if (pmapdebug & PDB_FOLLOW)
 		printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
 #endif
-	attrp = &pg->mdpage.pvh_attrs;
+	attrp = &md->pvh_attrs;
 	rv = *attrp & PGA_MODIFIED;
 	*attrp &= ~PGA_MODIFIED;
 	if (!rv) {
 		return rv;
 	}
-	pv = pg->mdpage.pvh_list;
+	pv = md->pvh_list;
 	if (pv->pv_pmap == NULL) {
 		return true;
 	}
@@ -1846,8 +1855,9 @@
 bool
 pmap_is_modified(struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
-	return pg->mdpage.pvh_attrs & PGA_MODIFIED;
+	return md->pvh_attrs & PGA_MODIFIED;
 }
 
 /*
@@ -1859,9 +1869,11 @@
 pmap_set_modified(paddr_t pa)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 
 	pg = PHYS_TO_VM_PAGE(pa);
-	pg->mdpage.pvh_attrs |= PGA_MODIFIED | PGA_REFERENCED;
+	md = VM_PAGE_TO_MD(pg);
+	md->pvh_attrs |= PGA_MODIFIED | PGA_REFERENCED;
 }
 
 /******************** misc. functions ********************/
@@ -1911,9 +1923,10 @@
 void
 pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, npv;
 
-	pv = pg->mdpage.pvh_list;
+	pv = md->pvh_list;
 #ifdef DEBUG
 	if (pmapdebug & PDB_ENTER)
 		printf("pmap_enter: pv %p: was %#"PRIxVADDR"/%p/%p\n",
@@ -2063,6 +2076,7 @@
 void
 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg)
 {
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pv_entry_t pv, npv;
 	int last;
 
@@ -2072,7 +2086,7 @@
 		    VM_PAGE_TO_PHYS(pg));
 #endif
 
-	pv = pg->mdpage.pvh_list;
+	pv = md->pvh_list;
 
 	/*
 	 * If it is the first entry on the list, it is actually
@@ -2116,7 +2130,7 @@
 		 * removed.  If it was, then reenable caching.
 		 */
 
-		pv = pg->mdpage.pvh_list;
+		pv = md->pvh_list;
 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
 			if (mips_cache_indexof(pv->pv_va ^ npv->pv_va))
 				break;
@@ -2139,6 +2153,7 @@
 pmap_pv_page_alloc(struct pool *pp, int flags)
 {
 	struct vm_page *pg;
+	struct vm_page_md *md;
 	paddr_t phys;
 #if defined(MIPS3_PLUS)
 	pv_entry_t pv;
@@ -2149,6 +2164,7 @@
 	if (pg == NULL)
 		return NULL;
 
+	md = VM_PAGE_TO_MD(pg);
 	phys = VM_PAGE_TO_PHYS(pg);
 #ifdef _LP64
 	KASSERT(mips3_xkphys_cached);
@@ -2159,7 +2175,7 @@
 #if defined(MIPS3_PLUS)
 	if (mips_cache_virtual_alias) {
 		pg = PHYS_TO_VM_PAGE(phys);
-		pv = pg->mdpage.pvh_list;
+		pv = md->pvh_list;
 		if ((pv->pv_flags & PV_UNCACHED) == 0 &&
 		    mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
@@ -2248,7 +2264,8 @@
 #if defined(MIPS3_PLUS)
 	if (mips_cache_virtual_alias) {
 		pg = PHYS_TO_VM_PAGE(pa);
-		pv = pg->mdpage.pvh_list;
+		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+		pv = md->pvh_list;
 		if ((pv->pv_flags & PV_UNCACHED) == 0 &&
 		    mips_cache_indexof(pv->pv_va) != mips_cache_indexof(va))
 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);

Reply via email to