Module Name: src Committed By: yamt Date: Fri Feb 17 08:18:57 UTC 2012
Modified Files: src/sys/miscfs/genfs [yamt-pagecache]: genfs_io.c src/sys/ufs/ufs [yamt-pagecache]: ufs_inode.c src/sys/uvm [yamt-pagecache]: uvm_bio.c uvm_page.c uvm_page.h uvm_vnode.c Log Message: byebye PG_HOLE as it turned out to be unnecessary. To generate a diff of this commit: cvs rdiff -u -r1.53.2.12 -r1.53.2.13 src/sys/miscfs/genfs/genfs_io.c cvs rdiff -u -r1.88.2.1 -r1.88.2.2 src/sys/ufs/ufs/ufs_inode.c cvs rdiff -u -r1.79.2.1 -r1.79.2.2 src/sys/uvm/uvm_bio.c cvs rdiff -u -r1.178.2.12 -r1.178.2.13 src/sys/uvm/uvm_page.c cvs rdiff -u -r1.73.2.8 -r1.73.2.9 src/sys/uvm/uvm_page.h cvs rdiff -u -r1.97.2.4 -r1.97.2.5 src/sys/uvm/uvm_vnode.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/miscfs/genfs/genfs_io.c diff -u src/sys/miscfs/genfs/genfs_io.c:1.53.2.12 src/sys/miscfs/genfs/genfs_io.c:1.53.2.13 --- src/sys/miscfs/genfs/genfs_io.c:1.53.2.12 Sun Feb 5 08:23:41 2012 +++ src/sys/miscfs/genfs/genfs_io.c Fri Feb 17 08:18:57 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: genfs_io.c,v 1.53.2.12 2012/02/05 08:23:41 yamt Exp $ */ +/* $NetBSD: genfs_io.c,v 1.53.2.13 2012/02/17 08:18:57 yamt Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.12 2012/02/05 08:23:41 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.13 2012/02/17 08:18:57 yamt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -377,7 +377,7 @@ startover: * it's caller's responsibility to allocate blocks * beforehand for the overwrite case. */ - pg->flags &= ~(PG_RDONLY|PG_HOLE); + pg->flags &= ~PG_RDONLY; /* * mark the page DIRTY. * otherwise another thread can do putpages and pull @@ -399,7 +399,7 @@ startover: struct vm_page *pg = pgs[ridx + i]; if ((pg->flags & PG_FAKE) || - (memwrite && (pg->flags & (PG_RDONLY|PG_HOLE)) != 0)) { + (memwrite && (pg->flags & PG_RDONLY) != 0)) { break; } } @@ -527,7 +527,7 @@ startover: size_t b; KASSERT((offset & (PAGE_SIZE - 1)) == 0); - if ((pgs[pidx]->flags & PG_HOLE)) { + if ((pgs[pidx]->flags & PG_RDONLY)) { sawhole = true; } b = MIN(PAGE_SIZE, bytes); @@ -581,13 +581,13 @@ startover: /* * if this block isn't allocated, zero it instead of * reading it. unless we are going to allocate blocks, - * mark the pages we zeroed PG_HOLE. + * mark the pages we zeroed PG_RDONLY. */ if (blkno == (daddr_t)-1) { int holepages = (round_page(offset + iobytes) - trunc_page(offset)) >> PAGE_SHIFT; - UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0); + UVMHIST_LOG(ubchist, "lbn 0x%x -> RDONLY", lbn,0,0,0); sawhole = true; memset((char *)kva + (offset - startoffset), 0, @@ -597,7 +597,7 @@ startover: if (!blockalloc) { mutex_enter(uobj->vmobjlock); for (i = 0; i < holepages; i++) { - pgs[pidx + i]->flags |= PG_HOLE; + pgs[pidx + i]->flags |= PG_RDONLY; } mutex_exit(uobj->vmobjlock); } @@ -650,7 +650,7 @@ loopdone: /* * if this we encountered a hole then we have to do a little more work. - * if blockalloc is false, we marked the page PG_HOLE so that future + * if blockalloc is false, we marked the page PG_RDONLY so that future * write accesses to the page will fault again. * if blockalloc is true, we must make sure that the backing store for * the page is completely allocated while the pages are locked. @@ -669,7 +669,7 @@ loopdone: if (pg == NULL) { continue; } - pg->flags &= ~PG_HOLE; + pg->flags &= ~PG_RDONLY; uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); UVMHIST_LOG(ubchist, "mark dirty pg %p", pg,0,0,0); @@ -734,7 +734,7 @@ out: KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN); pg->flags &= ~PG_FAKE; } - KASSERT(!blockalloc || (pg->flags & PG_HOLE) == 0); + KASSERT(!blockalloc || (pg->flags & PG_RDONLY) == 0); if (i < ridx || i >= ridx + orignmempages || async) { UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x", pg, pg->offset,0,0); Index: src/sys/ufs/ufs/ufs_inode.c diff -u src/sys/ufs/ufs/ufs_inode.c:1.88.2.1 src/sys/ufs/ufs/ufs_inode.c:1.88.2.2 --- src/sys/ufs/ufs/ufs_inode.c:1.88.2.1 Wed Nov 2 21:54:00 2011 +++ src/sys/ufs/ufs/ufs_inode.c Fri Feb 17 08:18:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: ufs_inode.c,v 1.88.2.1 2011/11/02 21:54:00 yamt Exp $ */ +/* $NetBSD: ufs_inode.c,v 1.88.2.2 2012/02/17 08:18:56 yamt Exp $ */ /* * Copyright (c) 1991, 1993 @@ -37,7 +37,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.88.2.1 2011/11/02 21:54:00 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.88.2.2 2012/02/17 08:18:56 yamt Exp $"); #if defined(_KERNEL_OPT) #include "opt_ffs.h" @@ -279,7 +279,7 @@ ufs_balloc_range(struct vnode *vp, off_t /* * if the allocation succeeded, mark all the pages dirty - * and clear PG_HOLE on any pages that are now fully backed + * and clear PG_RDONLY on any pages that are now fully backed * by disk blocks. if the allocation failed, we do not invalidate * the pages since they might have already existed and been dirty, * in which case we need to keep them around. if we created the pages, @@ -295,7 +295,7 @@ ufs_balloc_range(struct vnode *vp, off_t if (!error) { if (off <= pagestart + (i << PAGE_SHIFT) && pagestart + ((i + 1) << PAGE_SHIFT) <= eob) { - pgs[i]->flags &= ~PG_HOLE; + pgs[i]->flags &= ~PG_RDONLY; } uvm_pagemarkdirty(pgs[i], UVM_PAGE_STATUS_DIRTY); } Index: src/sys/uvm/uvm_bio.c diff -u src/sys/uvm/uvm_bio.c:1.79.2.1 src/sys/uvm/uvm_bio.c:1.79.2.2 --- src/sys/uvm/uvm_bio.c:1.79.2.1 Wed Nov 2 21:54:00 2011 +++ src/sys/uvm/uvm_bio.c Fri Feb 17 08:18:57 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_bio.c,v 1.79.2.1 2011/11/02 21:54:00 yamt Exp $ */ +/* $NetBSD: uvm_bio.c,v 1.79.2.2 2012/02/17 08:18:57 yamt Exp $ */ /* * Copyright (c) 1998 Chuck Silvers. @@ -34,7 +34,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.79.2.1 2011/11/02 21:54:00 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.79.2.2 2012/02/17 08:18:57 yamt Exp $"); #include "opt_uvmhist.h" #include "opt_ubc.h" @@ -265,13 +265,13 @@ ubc_fault_page(const struct uvm_faultinf /* * Note that a page whose backing store is partially allocated - * is marked as PG_HOLE. + * is marked as PG_RDONLY. * * it's a responsibility of ubc_alloc's caller to allocate backing * blocks before writing to the window. */ - KASSERT((pg->flags & PG_HOLE) == 0 || + KASSERT((pg->flags & PG_RDONLY) == 0 || (access_type & VM_PROT_WRITE) == 0 || pg->offset < umap->writeoff || pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); Index: src/sys/uvm/uvm_page.c diff -u src/sys/uvm/uvm_page.c:1.178.2.12 src/sys/uvm/uvm_page.c:1.178.2.13 --- src/sys/uvm/uvm_page.c:1.178.2.12 Wed Jan 4 16:31:17 2012 +++ src/sys/uvm/uvm_page.c Fri Feb 17 08:18:57 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.c,v 1.178.2.12 2012/01/04 16:31:17 yamt Exp $ */ +/* $NetBSD: uvm_page.c,v 1.178.2.13 2012/02/17 08:18:57 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.12 2012/01/04 16:31:17 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.13 2012/02/17 08:18:57 yamt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -2110,7 +2110,7 @@ uvm_pagereadonly_p(struct vm_page *pg) KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock)); KASSERT(uobj != NULL || mutex_owned(pg->uanon->an_lock)); - if ((pg->flags & (PG_RDONLY|PG_HOLE)) != 0) { + if ((pg->flags & PG_RDONLY) != 0) { return true; } if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) { Index: src/sys/uvm/uvm_page.h diff -u src/sys/uvm/uvm_page.h:1.73.2.8 src/sys/uvm/uvm_page.h:1.73.2.9 --- src/sys/uvm/uvm_page.h:1.73.2.8 Wed Nov 30 14:33:47 2011 +++ src/sys/uvm/uvm_page.h Fri Feb 17 08:18:57 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page.h,v 1.73.2.8 2011/11/30 14:33:47 yamt Exp $ */ +/* $NetBSD: uvm_page.h,v 1.73.2.9 2012/02/17 08:18:57 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -160,9 +160,8 @@ struct vm_page { * is only set when the page is on no queues, and is cleared when the page * is placed on the free list. * - * PG_RDONLY and PG_HOLE acts like a "read-only count". ie. either of - * them is set, the page should not be mapped writably. typically - * they are set by pgo_get to inform the fault handler. + * PG_RDONLY is used to indicate that the page should not be mapped writably. + * typically they are set by pgo_get to inform the fault handler. * * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in * uvm_page_status.c first. @@ -178,7 +177,6 @@ struct vm_page { #define PG_RDONLY 0x0080 /* page must be mapped read-only */ #define PG_ZERO 0x0100 /* page is pre-zero'd */ #define PG_TABLED 0x0200 /* page is in VP table */ -#define PG_HOLE 0x0400 /* XXX */ #define PG_PAGER1 0x1000 /* pager-specific flag */ #define PG_PAGER2 0x2000 /* pager-specific flag */ @@ -186,7 +184,7 @@ struct vm_page { #define UVM_PGFLAGBITS \ "\20\1CLEAN\2DIRTY\3BUSY\4WANTED" \ "\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \ - "\11ZERO\12TABLED\13HOLE" \ + "\11ZERO\12TABLED" \ "\15PAGER1\16PAGER2" #define PQ_FREE 0x0001 /* page is on free list */ Index: src/sys/uvm/uvm_vnode.c diff -u src/sys/uvm/uvm_vnode.c:1.97.2.4 src/sys/uvm/uvm_vnode.c:1.97.2.5 --- src/sys/uvm/uvm_vnode.c:1.97.2.4 Wed Jan 18 02:09:06 2012 +++ src/sys/uvm/uvm_vnode.c Fri Feb 17 08:18:57 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_vnode.c,v 1.97.2.4 2012/01/18 02:09:06 yamt Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -45,7 +45,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.4 2012/01/18 02:09:06 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $"); #include "opt_uvmhist.h" @@ -318,9 +318,9 @@ uvn_findpage(struct uvm_object *uobj, vo continue; } - /* skip PG_RDONLY and PG_HOLE pages if requested */ - if ((flags & UFP_NORDONLY) && - (pg->flags & (PG_RDONLY|PG_HOLE))) { + /* skip PG_RDONLY pages if requested */ + if ((flags & UFP_NORDONLY) != 0 && + (pg->flags & PG_RDONLY) != 0) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); goto skip; }