Module Name: src Committed By: yamt Date: Sun Nov 6 22:05:01 UTC 2011
Modified Files: src/sys/nfs [yamt-pagecache]: nfs_subs.c src/sys/ufs/lfs [yamt-pagecache]: lfs_vnops.c src/sys/uvm [yamt-pagecache]: uvm_aobj.c uvm_loan.c uvm_object.c uvm_object.h uvm_page.c uvm_page.h uvm_pglist.c Log Message: remove pg->listq and uobj->memq To generate a diff of this commit: cvs rdiff -u -r1.221.2.1 -r1.221.2.2 src/sys/nfs/nfs_subs.c cvs rdiff -u -r1.238.2.1 -r1.238.2.2 src/sys/ufs/lfs/lfs_vnops.c cvs rdiff -u -r1.116.2.2 -r1.116.2.3 src/sys/uvm/uvm_aobj.c cvs rdiff -u -r1.81.2.1 -r1.81.2.2 src/sys/uvm/uvm_loan.c cvs rdiff -u -r1.11.2.1 -r1.11.2.2 src/sys/uvm/uvm_object.c cvs rdiff -u -r1.31.2.1 -r1.31.2.2 src/sys/uvm/uvm_object.h cvs rdiff -u -r1.178.2.1 -r1.178.2.2 src/sys/uvm/uvm_page.c cvs rdiff -u -r1.73.2.1 -r1.73.2.2 src/sys/uvm/uvm_page.h cvs rdiff -u -r1.62 -r1.62.2.1 src/sys/uvm/uvm_pglist.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/nfs/nfs_subs.c diff -u src/sys/nfs/nfs_subs.c:1.221.2.1 src/sys/nfs/nfs_subs.c:1.221.2.2 --- src/sys/nfs/nfs_subs.c:1.221.2.1 Wed Nov 2 21:53:59 2011 +++ src/sys/nfs/nfs_subs.c Sun Nov 6 22:05:01 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: nfs_subs.c,v 1.221.2.1 2011/11/02 21:53:59 yamt Exp $ */ +/* $NetBSD: nfs_subs.c,v 1.221.2.2 2011/11/06 22:05:01 yamt Exp $ */ /* * Copyright (c) 1989, 1993 @@ -70,7 +70,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.221.2.1 2011/11/02 21:53:59 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v 1.221.2.2 2011/11/06 22:05:01 yamt Exp $"); #ifdef _KERNEL_OPT #include "opt_nfs.h" @@ -100,6 +100,7 @@ __KERNEL_RCSID(0, "$NetBSD: nfs_subs.c,v #include <sys/atomic.h> #include <uvm/uvm.h> +#include <uvm/uvm_page_array.h> #include <nfs/rpcv2.h> #include <nfs/nfsproto.h> @@ -1745,6 +1746,9 @@ nfs_clearcommit(struct mount *mp) rw_enter(&nmp->nm_writeverflock, RW_WRITER); mutex_enter(&mntvnode_lock); TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { + struct uvm_page_array a; + voff_t off; + KASSERT(vp->v_mount == mp); if (vp->v_type != VREG) continue; @@ -1756,9 +1760,15 @@ nfs_clearcommit(struct mount *mp) np = VTONFS(vp); np->n_commitflags &= ~(NFS_COMMIT_PUSH_VALID | NFS_COMMIT_PUSHED_VALID); - TAILQ_FOREACH(pg, &vp->v_uobj.memq, listq.queue) { + uvm_page_array_init(&a); + off = 0; + while ((pg = uvm_page_array_fill_and_peek(&a, &vp->v_uobj, off, + false)) != NULL) { pg->flags &= ~PG_NEEDCOMMIT; + uvm_page_array_advance(&a); + off = pg->offset + PAGE_SIZE; } + uvm_page_array_fini(&a); mutex_exit(vp->v_interlock); } mutex_exit(&mntvnode_lock); Index: src/sys/ufs/lfs/lfs_vnops.c diff -u src/sys/ufs/lfs/lfs_vnops.c:1.238.2.1 src/sys/ufs/lfs/lfs_vnops.c:1.238.2.2 --- src/sys/ufs/lfs/lfs_vnops.c:1.238.2.1 Wed Nov 2 21:54:00 2011 +++ src/sys/ufs/lfs/lfs_vnops.c Sun Nov 6 22:05:01 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: lfs_vnops.c,v 1.238.2.1 2011/11/02 21:54:00 yamt Exp $ */ +/* $NetBSD: lfs_vnops.c,v 1.238.2.2 2011/11/06 22:05:01 yamt Exp $ */ /*- * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. @@ -60,7 +60,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.238.2.1 2011/11/02 21:54:00 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.238.2.2 2011/11/06 22:05:01 yamt Exp $"); #ifdef _KERNEL_OPT #include "opt_compat_netbsd.h" @@ -1836,10 +1836,8 @@ check_dirty(struct lfs *fs, struct vnode off_t startoffset, off_t endoffset, off_t blkeof, int flags, int checkfirst, struct vm_page **pgp) { - int by_list; - struct vm_page *curpg = NULL; /* XXX: gcc */ struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg; - off_t soff = 0; /* XXX: gcc */ + off_t soff; voff_t off; int i; int nonexistent; @@ -1851,39 +1849,10 @@ check_dirty(struct lfs *fs, struct vnode ASSERT_MAYBE_SEGLOCK(fs); top: - by_list = (vp->v_uobj.uo_npages <= - ((endoffset - startoffset) >> PAGE_SHIFT) * - UVM_PAGE_TREE_PENALTY); any_dirty = 0; - if (by_list) { - curpg = TAILQ_FIRST(&vp->v_uobj.memq); - } else { - soff = startoffset; - } - while (by_list || soff < MIN(blkeof, endoffset)) { - if (by_list) { - /* - * Find the first page in a block. Skip - * blocks outside our area of interest or beyond - * the end of file. - */ - KASSERT(curpg == NULL - || (curpg->flags & PG_MARKER) == 0); - if (pages_per_block > 1) { - while (curpg && - ((curpg->offset & fs->lfs_bmask) || - curpg->offset >= vp->v_size || - curpg->offset >= endoffset)) { - curpg = TAILQ_NEXT(curpg, listq.queue); - KASSERT(curpg == NULL || - (curpg->flags & PG_MARKER) == 0); - } - } - if (curpg == NULL) - break; - soff = curpg->offset; - } + soff = startoffset; + while (soff < MIN(blkeof, endoffset)) { /* * Mark all pages in extended range busy; find out if any @@ -1891,15 +1860,11 @@ check_dirty(struct lfs *fs, struct vnode */ nonexistent = dirty = 0; for (i = 0; i == 0 || i < pages_per_block; i++) { - if (by_list && pages_per_block <= 1) { - pgs[i] = pg = curpg; - } else { - off = soff + (i << PAGE_SHIFT); - pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off); - if (pg == NULL) { - ++nonexistent; - continue; - } + off = soff + (i << PAGE_SHIFT); + pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off); + if (pg == NULL) { + ++nonexistent; + continue; } KASSERT(pg != NULL); @@ -1936,11 +1901,7 @@ check_dirty(struct lfs *fs, struct vnode dirty += tdirty; } if (pages_per_block > 0 && nonexistent >= pages_per_block) { - if (by_list) { - curpg = TAILQ_NEXT(curpg, listq.queue); - } else { - soff += fs->lfs_bsize; - } + soff += fs->lfs_bsize; continue; } @@ -1981,11 +1942,7 @@ check_dirty(struct lfs *fs, struct vnode if (checkfirst && any_dirty) break; - if (by_list) { - curpg = TAILQ_NEXT(curpg, listq.queue); - } else { - soff += MAX(PAGE_SIZE, fs->lfs_bsize); - } + soff += MAX(PAGE_SIZE, fs->lfs_bsize); } return any_dirty; @@ -2074,8 +2031,7 @@ lfs_putpages(void *v) * If there are no pages, don't do anything. */ if (vp->v_uobj.uo_npages == 0) { - if (TAILQ_EMPTY(&vp->v_uobj.memq) && - (vp->v_iflag & VI_ONWORKLST) && + if ((vp->v_iflag & VI_ONWORKLST) && LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { vp->v_iflag &= ~VI_WRMAPDIRTY; vn_syncer_remove_from_worklist(vp); Index: src/sys/uvm/uvm_aobj.c diff -u src/sys/uvm/uvm_aobj.c:1.116.2.2 src/sys/uvm/uvm_aobj.c:1.116.2.3 --- src/sys/uvm/uvm_aobj.c:1.116.2.2 Sun Nov 6 10:15:11 2011 +++ src/sys/uvm/uvm_aobj.c Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_aobj.c,v 1.116.2.2 2011/11/06 10:15:11 yamt Exp $ */ +/* $NetBSD: uvm_aobj.c,v 1.116.2.3 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and @@ -38,7 +38,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.2 2011/11/06 10:15:11 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.3 2011/11/06 22:05:00 yamt Exp $"); #include "opt_uvmhist.h" @@ -147,7 +147,7 @@ static struct pool uao_swhash_elt_pool; */ struct uvm_aobj { - struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */ + struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */ pgoff_t u_pages; /* number of pages in entire object */ int u_flags; /* the flags (see uvm_aobj.h) */ int *u_swslots; /* array of offset->swapslot mappings */ @@ -664,16 +664,8 @@ uao_detach_locked(struct uvm_object *uob uvm_page_array_init(&a); mutex_enter(&uvm_pageqlock); - while (/*CONSTCOND*/true) { - pg = uvm_page_array_peek(&a); - if (pg == NULL) { - int error = uvm_page_array_fill(&a, uobj, 0, false); - if (error != 0) { - break; - } - pg = uvm_page_array_peek(&a); - KASSERT(pg != NULL); - } + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, false)) + != NULL) { uvm_page_array_advance(&a); pmap_page_protect(pg, VM_PROT_NONE); if (pg->flags & PG_BUSY) { @@ -712,30 +704,12 @@ uao_detach_locked(struct uvm_object *uob * or block. * => if PGO_ALLPAGE is set, then all pages in the object are valid targets * for flushing. - * => NOTE: we rely on the fact that the object's memq is a TAILQ and - * that new pages are inserted on the tail end of the list. thus, - * we can make a complete pass through the object in one go by starting - * at the head and working towards the tail (new pages are put in - * front of us). * => NOTE: we are allowed to lock the page queues, so the caller * must not be holding the lock on them [e.g. pagedaemon had * better not call us with the queues locked] * => we return 0 unless we encountered some sort of I/O error * XXXJRT currently never happens, as we never directly initiate * XXXJRT I/O - * - * note on page traversal: - * we can traverse the pages in an object either by going down the - * linked list in "uobj->memq", or we can go over the address range - * by page doing hash table lookups for each address. depending - * on how many pages are in the object it may be cheaper to do one - * or the other. we set "by_list" to true if we are using memq. - * if the cost of a hash lookup was equal to the cost of the list - * traversal we could compare the number of pages in the start->stop - * range to the total number of pages in the object. however, it - * seems that a hash table lookup is more expensive than the linked - * list traversal, so we multiply the number of pages in the - * start->stop range by a penalty which we define below. */ static int @@ -783,16 +757,8 @@ uao_put(struct uvm_object *uobj, voff_t /* locked: uobj */ uvm_page_array_init(&a); curoff = start; - while (curoff < stop) { - pg = uvm_page_array_peek(&a); - if (pg == NULL) { - int error = uvm_page_array_fill(&a, uobj, curoff, - false); - if (error != 0) { - break; - } - pg = uvm_page_array_peek(&a); - } + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, false)) != + NULL) { if (pg->offset >= stop) { break; } Index: src/sys/uvm/uvm_loan.c diff -u src/sys/uvm/uvm_loan.c:1.81.2.1 src/sys/uvm/uvm_loan.c:1.81.2.2 --- src/sys/uvm/uvm_loan.c:1.81.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_loan.c Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_loan.c,v 1.81.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -32,7 +32,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.1 2011/11/02 21:54:01 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.2 2011/11/06 22:05:00 yamt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -845,8 +845,8 @@ again: * first, get ahold of our single zero page. */ - if (__predict_false((pg = - TAILQ_FIRST(&uvm_loanzero_object.memq)) == NULL)) { + pg = uvm_pagelookup(&uvm_loanzero_object, 0); + if (__predict_false(pg == NULL)) { while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL, UVM_PGA_ZERO)) == NULL) { mutex_exit(uvm_loanzero_object.vmobjlock); Index: src/sys/uvm/uvm_object.c diff -u src/sys/uvm/uvm_object.c:1.11.2.1 src/sys/uvm/uvm_object.c:1.11.2.2 --- src/sys/uvm/uvm_object.c:1.11.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_object.c Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_object.c,v 1.11.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc. @@ -37,7 +37,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.1 2011/11/02 21:54:01 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $"); #include "opt_ddb.h" @@ -48,6 +48,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_object.c #include <uvm/uvm.h> #include <uvm/uvm_ddb.h> +#include <uvm/uvm_page_array.h> /* Page count to fetch per single step. */ #define FETCH_PAGECOUNT 16 @@ -68,7 +69,6 @@ uvm_obj_init(struct uvm_object *uo, cons uo->vmobjlock = NULL; } uo->pgops = ops; - TAILQ_INIT(&uo->memq); LIST_INIT(&uo->uo_ubc); uo->uo_npages = 0; uo->uo_refs = refs; @@ -238,8 +238,10 @@ void uvm_object_printit(struct uvm_object *uobj, bool full, void (*pr)(const char *, ...)) { + struct uvm_page_array a; struct vm_page *pg; int cnt = 0; + voff_t off; (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ", uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages); @@ -252,16 +254,22 @@ uvm_object_printit(struct uvm_object *uo return; } (*pr)(" PAGES <pg,offset>:\n "); - TAILQ_FOREACH(pg, &uobj->memq, listq.queue) { + uvm_page_array_init(&a); + off = 0; + while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, false)) + != NULL) { cnt++; (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); if ((cnt % 3) == 0) { (*pr)("\n "); } + off = pg->offset + PAGE_SIZE; + uvm_page_array_advance(&a); } if ((cnt % 3) != 0) { (*pr)("\n"); } + uvm_page_array_fini(&a); } #endif /* DDB || DEBUGPRINT */ Index: src/sys/uvm/uvm_object.h diff -u src/sys/uvm/uvm_object.h:1.31.2.1 src/sys/uvm/uvm_object.h:1.31.2.2 --- src/sys/uvm/uvm_object.h:1.31.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_object.h Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_object.h,v 1.31.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_object.h,v 1.31.2.2 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -43,10 +43,9 @@ */ struct uvm_object { - kmutex_t * vmobjlock; /* lock on memq */ + kmutex_t * vmobjlock; /* lock on uo_pages */ const struct uvm_pagerops *pgops; /* pager ops */ - struct pglist memq; /* pages in this object */ - int uo_npages; /* # of pages in memq */ + int uo_npages; /* # of pages in uo_pages */ unsigned uo_refs; /* reference count */ struct radix_tree uo_pages; /* tree of pages */ LIST_HEAD(,ubc_map) uo_ubc; /* ubc mappings */ Index: src/sys/uvm/uvm_page.c diff -u src/sys/uvm/uvm_page.c:1.178.2.1 src/sys/uvm/uvm_page.c:1.178.2.2 --- src/sys/uvm/uvm_page.c:1.178.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_page.c Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.c,v 1.178.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_page.c,v 1.178.2.2 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.1 2011/11/02 21:54:01 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.2 2011/11/06 22:05:00 yamt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -192,11 +192,6 @@ uvm_pageinsert_list(struct uvm_object *u } else if (UVM_OBJ_IS_AOBJ(uobj)) { atomic_inc_uint(&uvmexp.anonpages); } - - if (where) - TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue); - else - TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue); pg->flags |= PG_TABLED; uobj->uo_npages++; } @@ -268,7 +263,6 @@ uvm_pageremove_list(struct uvm_object *u /* object should be locked */ uobj->uo_npages--; - TAILQ_REMOVE(&uobj->memq, pg, listq.queue); pg->flags &= ~PG_TABLED; pg->uobject = NULL; } @@ -1014,13 +1008,13 @@ uvm_page_recolor(int newncolors) lcv].pgfl_buckets[color].pgfl_queues[i])) != NULL) { LIST_REMOVE(pg, pageq.list); /* global */ - LIST_REMOVE(pg, listq.list); /* cpu */ + LIST_REMOVE(pg, u.cpulist); /* cpu */ LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ i], pg, pageq.list); LIST_INSERT_HEAD(&pgfl.pgfl_buckets[ VM_PGCOLOR_BUCKET(pg)].pgfl_queues[ - i], pg, listq.list); + i], pg, u.cpulist); } } } @@ -1133,7 +1127,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, gotit: LIST_REMOVE(pg, pageq.list); /* global list */ - LIST_REMOVE(pg, listq.list); /* per-cpu list */ + LIST_REMOVE(pg, u.cpulist); /* per-cpu list */ uvmexp.free--; /* update zero'd page count */ @@ -1600,7 +1594,7 @@ uvm_pagefree(struct vm_page *pg) ucpu = curcpu()->ci_data.cpu_uvm; pg->offset = (uintptr_t)ucpu; pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue]; - LIST_INSERT_HEAD(pgfl, pg, listq.list); + LIST_INSERT_HEAD(pgfl, pg, u.cpulist); ucpu->pages[queue]++; if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) { ucpu->page_idle_zero = vm_page_zero_enable; @@ -1759,7 +1753,7 @@ uvm_pageidlezero(void) goto quit; } LIST_REMOVE(pg, pageq.list); /* global list */ - LIST_REMOVE(pg, listq.list); /* per-cpu list */ + LIST_REMOVE(pg, u.cpulist); /* per-cpu list */ ucpu->pages[PGFL_UNKNOWN]--; uvmexp.free--; KASSERT(pg->pqflags == PQ_FREE); @@ -1782,7 +1776,7 @@ uvm_pageidlezero(void) PGFL_UNKNOWN], pg, pageq.list); LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ nextbucket].pgfl_queues[ - PGFL_UNKNOWN], pg, listq.list); + PGFL_UNKNOWN], pg, u.cpulist); ucpu->pages[PGFL_UNKNOWN]++; uvmexp.free++; uvmexp.zeroaborts++; @@ -1805,7 +1799,7 @@ uvm_pageidlezero(void) pg, pageq.list); LIST_INSERT_HEAD(&pgfl->pgfl_buckets[ nextbucket].pgfl_queues[PGFL_ZEROS], - pg, listq.list); + pg, u.cpulist); ucpu->pages[PGFL_ZEROS]++; uvmexp.free++; uvmexp.zeropages++; @@ -2135,11 +2129,7 @@ uvm_page_printit(struct vm_page *pg, boo uobj = pg->uobject; if (uobj) { (*pr)(" checking object list\n"); - TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) { - if (tpg == pg) { - break; - } - } + tpg = uvm_pagelookup(uobj, pg->offset); if (tpg) (*pr)(" page found on object list\n"); else Index: src/sys/uvm/uvm_page.h diff -u src/sys/uvm/uvm_page.h:1.73.2.1 src/sys/uvm/uvm_page.h:1.73.2.2 --- src/sys/uvm/uvm_page.h:1.73.2.1 Wed Nov 2 21:54:01 2011 +++ src/sys/uvm/uvm_page.h Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.h,v 1.73.2.1 2011/11/02 21:54:01 yamt Exp $ */ +/* $NetBSD: uvm_page.h,v 1.73.2.2 2011/11/06 22:05:00 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -116,16 +116,15 @@ struct vm_page { } pageq; /* queue info for FIFO * queue or free list (P) */ - /* - * listq.list is used for per-cpu freelist. - */ union { - TAILQ_ENTRY(vm_page) queue; - LIST_ENTRY(vm_page) list; - } listq; /* pages in same object (O)*/ - - struct vm_anon *uanon; /* anon (O,P) */ - struct uvm_object *uobject; /* object (O,P) */ + struct { + struct vm_anon *o_anon; /* anon (O,P) */ + struct uvm_object *o_object; /* object (O,P) */ + } owner; +#define uanon u.owner.o_anon +#define uobject u.owner.o_object + LIST_ENTRY(vm_page) cpulist; + } u; voff_t offset; /* offset into object (O,P) */ uint16_t flags; /* object flags [O] */ uint16_t loan_count; /* number of active loans Index: src/sys/uvm/uvm_pglist.c diff -u src/sys/uvm/uvm_pglist.c:1.62 src/sys/uvm/uvm_pglist.c:1.62.2.1 --- src/sys/uvm/uvm_pglist.c:1.62 Tue Sep 27 01:02:39 2011 +++ src/sys/uvm/uvm_pglist.c Sun Nov 6 22:05:00 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pglist.c,v 1.62 2011/09/27 01:02:39 jym Exp $ */ +/* $NetBSD: uvm_pglist.c,v 1.62.2.1 2011/11/06 22:05:00 yamt Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. @@ -35,7 +35,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.62 2011/09/27 01:02:39 jym Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.62.2.1 2011/11/06 22:05:00 yamt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -102,7 +102,7 @@ uvm_pglist_add(struct vm_page *pg, struc panic("uvm_pglistalloc: page not on freelist"); #endif LIST_REMOVE(pg, pageq.list); /* global */ - LIST_REMOVE(pg, listq.list); /* cpu */ + LIST_REMOVE(pg, u.cpulist); /* cpu */ uvmexp.free--; if (pg->flags & PG_ZERO) uvmexp.zeropages--; @@ -581,7 +581,7 @@ uvm_pglistfree(struct pglist *list) LIST_INSERT_HEAD(&uvm.page_free[index].pgfl_buckets[color]. pgfl_queues[queue], pg, pageq.list); LIST_INSERT_HEAD(&ucpu->page_free[index].pgfl_buckets[color]. - pgfl_queues[queue], pg, listq.list); + pgfl_queues[queue], pg, u.cpulist); uvmexp.free++; if (iszero) uvmexp.zeropages++;