Module Name:    src
Committed By:   uebayasi
Date:           Tue Feb  9 07:42:27 UTC 2010

Modified Files:
        src/sys/arch/arm/include/arm32 [uebayasi-xip]: vmparam.h
        src/sys/arch/powerpc/powerpc [uebayasi-xip]: rtas.c
        src/sys/uvm [uebayasi-xip]: uvm_page.c uvm_page.h

Log Message:
Kill vm_page::phys_addr.


To generate a diff of this commit:
cvs rdiff -u -r1.24 -r1.24.2.1 src/sys/arch/arm/include/arm32/vmparam.h
cvs rdiff -u -r1.8 -r1.8.24.1 src/sys/arch/powerpc/powerpc/rtas.c
cvs rdiff -u -r1.153.2.3 -r1.153.2.4 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.59.2.2 -r1.59.2.3 src/sys/uvm/uvm_page.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/include/arm32/vmparam.h
diff -u src/sys/arch/arm/include/arm32/vmparam.h:1.24 src/sys/arch/arm/include/arm32/vmparam.h:1.24.2.1
--- src/sys/arch/arm/include/arm32/vmparam.h:1.24	Fri Mar  6 20:31:47 2009
+++ src/sys/arch/arm/include/arm32/vmparam.h	Tue Feb  9 07:42:26 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: vmparam.h,v 1.24 2009/03/06 20:31:47 joerg Exp $	*/
+/*	$NetBSD: vmparam.h,v 1.24.2.1 2010/02/09 07:42:26 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@@ -121,7 +121,7 @@
  */
 #if ARM_MMU_V6 > 0
 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
-	(pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
+	(pg)->mdpage.pvh_attrs = VM_PAGE_TO_PHYS(pg) & arm_cache_prefer_mask
 #else
 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
 	(pg)->mdpage.pvh_attrs = 0

Index: src/sys/arch/powerpc/powerpc/rtas.c
diff -u src/sys/arch/powerpc/powerpc/rtas.c:1.8 src/sys/arch/powerpc/powerpc/rtas.c:1.8.24.1
--- src/sys/arch/powerpc/powerpc/rtas.c:1.8	Tue Apr  8 02:33:03 2008
+++ src/sys/arch/powerpc/powerpc/rtas.c	Tue Feb  9 07:42:26 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: rtas.c,v 1.8 2008/04/08 02:33:03 garbled Exp $ */
+/*	$NetBSD: rtas.c,v 1.8.24.1 2010/02/09 07:42:26 uebayasi Exp $ */
 
 /*
  * CHRP RTAS support routines
@@ -9,7 +9,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rtas.c,v 1.8 2008/04/08 02:33:03 garbled Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rtas.c,v 1.8.24.1 2010/02/09 07:42:26 uebayasi Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -123,7 +123,7 @@
 	    &pglist, 1, 0))
 		goto fail;
 
-	sc->ra_base_pa = TAILQ_FIRST(&pglist)->phys_addr;
+	sc->ra_base_pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
 
 	ih = OF_open("/rtas");
 	if (ih == -1)

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.153.2.3 src/sys/uvm/uvm_page.c:1.153.2.4
--- src/sys/uvm/uvm_page.c:1.153.2.3	Mon Feb  8 06:14:57 2010
+++ src/sys/uvm/uvm_page.c	Tue Feb  9 07:42:26 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.153.2.3 2010/02/08 06:14:57 uebayasi Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.153.2.4 2010/02/09 07:42:26 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.3 2010/02/08 06:14:57 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.153.2.4 2010/02/09 07:42:26 uebayasi Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -441,7 +441,6 @@
 		/* init and free vm_pages (we've already zeroed them) */
 		paddr = ptoa(vm_physmem[lcv].start);
 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
-			vm_physmem[lcv].pgs[i].phys_addr = paddr;
 #ifdef __HAVE_VM_PAGE_MD
 			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
 #endif
@@ -791,11 +790,10 @@
 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
 			return;
 		}
-		/* zero data, init phys_addr and free_list, and free pages */
+		/* zero data, init free_list, and free pages */
 		memset(pgs, 0, sizeof(struct vm_page) * npages);
 		for (lcv = 0, paddr = ptoa(start) ;
 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
-			pgs[lcv].phys_addr = paddr;
 			pgs[lcv].free_list = free_list;
 			if (atop(paddr) >= avail_start &&
 			    atop(paddr) <= avail_end)
@@ -874,14 +872,18 @@
 
 #if VM_PHYSSEG_MAX == 1
 #define	VM_PHYSSEG_FIND	vm_physseg_find_contig
+#define	VM_PHYSSEG_FIND_BY_PG	vm_physseg_find_by_pg_contig
 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
 #define	VM_PHYSSEG_FIND	vm_physseg_find_bsearch
+#define	VM_PHYSSEG_FIND_BY_PG	vm_physseg_find_by_pg_bsearch
 #else
 #define	VM_PHYSSEG_FIND	vm_physseg_find_linear
+#define	VM_PHYSSEG_FIND_BY_PG	vm_physseg_find_by_pg_linear
 #endif
 
 static inline int VM_PHYSSEG_FIND(struct vm_physseg *, int, int,
     paddr_t, struct vm_page *, int *);
+static inline struct vm_physseg *VM_PHYSSEG_FIND_BY_PG(const struct vm_page *);
 static inline bool vm_physseg_within_p(struct vm_physseg *, int, paddr_t,
     struct vm_page *);
 static inline bool vm_physseg_ge_p(struct vm_physseg *, int, paddr_t,
@@ -1034,6 +1036,96 @@
 	return(NULL);
 }
 
+paddr_t
+uvm_vm_page_to_phys(const struct vm_page *pg)
+{
+	struct vm_physseg *seg;
+
+	seg = VM_PHYSSEG_FIND_BY_PG(pg);
+	return (seg->start + pg - seg->pgs) * PAGE_SIZE;
+}
+
+#if VM_PHYSSEG_MAX == 1
+static inline struct vm_physseg *
+vm_physseg_find_by_pg_contig(const struct vm_page *pg)
+{
+	struct vm_physseg *seg;
+
+	/* 'contig' case */
+	if (pg >= vm_physmem[0].pgs && pg < vm_physmem[0].endpg) {
+		seg = &vm_physmem[0];
+		return seg;
+	}
+	return(-1);
+}
+
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+
+static inline struct vm_physseg *
+vm_physseg_find_by_pg_contig(const struct vm_page *pg)
+{
+	struct vm_physseg *seg;
+
+	/* binary search for it */
+	u_int	start, len, try;
+
+	/*
+	 * if try is too large (thus target is less than try) we reduce
+	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
+	 *
+	 * if the try is too small (thus target is greater than try) then
+	 * we set the new start to be (try + 1).   this means we need to
+	 * reduce the length to (round(len/2) - 1).
+	 *
+	 * note "adjust" below which takes advantage of the fact that
+	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
+	 * for any value of len we may have
+	 */
+
+	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
+		try = start + (len / 2);	/* try in the middle */
+
+		/* start past our try? */
+		if (pg >= vm_physmem[try].pgs[0]) {
+			/* was try correct? */
+			if (pg < vm_physmem[try].endpg) {
+				seg = &vm_physmem[try];
+				return seg;
+			}
+			start = try + 1;	/* next time, start here */
+			len--;			/* "adjust" */
+		} else {
+			/*
+			 * pframe before try, just reduce length of
+			 * region, done in "for" loop
+			 */
+		}
+	}
+	panic("invalid pg=%p\n", pg);
+}
+
+#else
+
+static inline struct vm_physseg *
+vm_physseg_find_by_pg_linear(const struct vm_page *pg)
+{
+	struct vm_physseg *seg;
+
+	/* linear search for it */
+	int	lcv;
+
+	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+		if (pg >= vm_physmem[lcv].pgs &&
+		    pg < vm_physmem[lcv].endpg) {
+			seg = &vm_physmem[lcv];
+			return seg;
+		}
+	}
+	panic("invalid pg=%p\n", pg);
+}
+
+#endif
+
 /*
  * uvm_page_recolor: Recolor the pages if the new bucket count is
  * larger than the old one.

Index: src/sys/uvm/uvm_page.h
diff -u src/sys/uvm/uvm_page.h:1.59.2.2 src/sys/uvm/uvm_page.h:1.59.2.3
--- src/sys/uvm/uvm_page.h:1.59.2.2	Mon Feb  8 05:53:05 2010
+++ src/sys/uvm/uvm_page.h	Tue Feb  9 07:42:26 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.h,v 1.59.2.2 2010/02/08 05:53:05 uebayasi Exp $	*/
+/*	$NetBSD: uvm_page.h,v 1.59.2.3 2010/02/09 07:42:26 uebayasi Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -142,7 +142,6 @@
 						 * to modify: [O _and_ P] */
 	uint16_t		wire_count;	/* wired down map refs [P] */
 	uint16_t		pqflags;	/* page queue flags [P] */
-	paddr_t			phys_addr;	/* physical address of page */
 
 #ifdef __HAVE_VM_PAGE_MD
 	struct vm_page_md	mdpage;		/* pmap-specific data */
@@ -291,6 +290,7 @@
 
 int vm_physseg_find(paddr_t, int *);
 struct vm_page *uvm_phys_to_vm_page(paddr_t);
+paddr_t uvm_vm_page_to_phys(const struct vm_page *);
 
 /*
  * macros
@@ -298,7 +298,7 @@
 
 #define UVM_PAGE_TREE_PENALTY	4	/* XXX: a guess */
 
-#define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
+#define VM_PAGE_TO_PHYS(entry)	uvm_vm_page_to_phys(entry)
 
 /*
  * Compute the page color bucket for a given page.

Reply via email to