Module Name: src Committed By: ad Date: Sat Mar 14 20:45:24 UTC 2020
Modified Files: src/external/cddl/osnet/dist/uts/common/fs/zfs: zfs_vnops.c src/sys/kern: vfs_subr.c src/sys/miscfs/genfs: genfs_io.c src/sys/sys: vnode.h src/sys/ufs/lfs: lfs_pages.c src/sys/uvm: uvm_object.h uvm_page_status.c uvm_pager.h uvm_vnode.c Log Message: Make uvm_pagemarkdirty() responsible for putting vnodes onto the syncer work list. Proposed on tech-kern@. To generate a diff of this commit: cvs rdiff -u -r1.63 -r1.64 \ src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c cvs rdiff -u -r1.483 -r1.484 src/sys/kern/vfs_subr.c cvs rdiff -u -r1.92 -r1.93 src/sys/miscfs/genfs/genfs_io.c cvs rdiff -u -r1.292 -r1.293 src/sys/sys/vnode.h cvs rdiff -u -r1.23 -r1.24 src/sys/ufs/lfs/lfs_pages.c cvs rdiff -u -r1.37 -r1.38 src/sys/uvm/uvm_object.h cvs rdiff -u -r1.3 -r1.4 src/sys/uvm/uvm_page_status.c cvs rdiff -u -r1.45 -r1.46 src/sys/uvm/uvm_pager.h cvs rdiff -u -r1.109 -r1.110 src/sys/uvm/uvm_vnode.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c diff -u src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c:1.63 src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c:1.64 --- src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c:1.63 Sun Mar 8 19:59:45 2020 +++ src/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c Sat Mar 14 20:45:23 2020 @@ -6028,19 +6028,9 @@ zfs_netbsd_getpages(void *v) pg->flags &= ~(PG_FAKE); } - if (memwrite) { - if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) { - /* For write faults, start dirtiness tracking. */ - uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN); - } - mutex_enter(vp->v_interlock); - if ((vp->v_iflag & VI_ONWORKLST) == 0) { - vn_syncer_add_to_worklist(vp, filedelay); - } - if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) { - vp->v_iflag |= VI_WRMAPDIRTY; - } - mutex_exit(vp->v_interlock); + if (memwrite && uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) { + /* For write faults, start dirtiness tracking. */ + uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN); } rw_exit(rw); ap->a_m[ap->a_centeridx] = pg; Index: src/sys/kern/vfs_subr.c diff -u src/sys/kern/vfs_subr.c:1.483 src/sys/kern/vfs_subr.c:1.484 --- src/sys/kern/vfs_subr.c:1.483 Sun Mar 1 21:39:07 2020 +++ src/sys/kern/vfs_subr.c Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_subr.c,v 1.483 2020/03/01 21:39:07 ad Exp $ */ +/* $NetBSD: vfs_subr.c,v 1.484 2020/03/14 20:45:23 ad Exp $ */ /*- * Copyright (c) 1997, 1998, 2004, 2005, 2007, 2008, 2019, 2020 @@ -69,7 +69,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.483 2020/03/01 21:39:07 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_subr.c,v 1.484 2020/03/14 20:45:23 ad Exp $"); #ifdef _KERNEL_OPT #include "opt_ddb.h" @@ -422,10 +422,8 @@ brelvp(struct buf *bp) bufremvn(bp); if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST && - LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { - KASSERT((vp->v_iflag & VI_WRMAPDIRTY) == 0); + LIST_FIRST(&vp->v_dirtyblkhd) == NULL) vn_syncer_remove_from_worklist(vp); - } bp->b_objlock = &buffer_lock; bp->b_vp = NULL; @@ -463,10 +461,8 @@ reassignbuf(struct buf *bp, struct vnode listheadp = &vp->v_cleanblkhd; if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST && - LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { - KASSERT((vp->v_iflag & VI_WRMAPDIRTY) == 0); + LIST_FIRST(&vp->v_dirtyblkhd) == NULL) vn_syncer_remove_from_worklist(vp); - } } else { listheadp = &vp->v_dirtyblkhd; if ((vp->v_iflag & VI_ONWORKLST) == 0) { Index: src/sys/miscfs/genfs/genfs_io.c diff -u src/sys/miscfs/genfs/genfs_io.c:1.92 src/sys/miscfs/genfs/genfs_io.c:1.93 --- src/sys/miscfs/genfs/genfs_io.c:1.92 Sat Mar 14 20:23:51 2020 +++ src/sys/miscfs/genfs/genfs_io.c Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $ */ +/* $NetBSD: genfs_io.c,v 1.93 2020/03/14 20:45:23 ad Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.92 2020/03/14 20:23:51 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.93 2020/03/14 20:45:23 ad Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -61,7 +61,6 @@ static int genfs_getpages_read(struct vn static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw, void (*)(struct buf *)); static void genfs_rel_pages(struct vm_page **, unsigned int); -static void genfs_markdirty(struct vnode *); int genfs_maxdio = MAXPHYS; @@ -83,22 +82,6 @@ genfs_rel_pages(struct vm_page **pgs, un uvm_page_unbusy(pgs, npages); } -static void -genfs_markdirty(struct vnode *vp) -{ - - KASSERT(rw_write_held(vp->v_uobj.vmobjlock)); - - mutex_enter(vp->v_interlock); - if ((vp->v_iflag & VI_ONWORKLST) == 0) { - vn_syncer_add_to_worklist(vp, filedelay); - } - if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) { - vp->v_iflag |= VI_WRMAPDIRTY; - } - mutex_exit(vp->v_interlock); -} - /* * generic VM getpages routine. * Return PG_BUSY pages for the given range, @@ -278,7 +261,6 @@ startover: UVM_PAGE_STATUS_UNKNOWN); } } - genfs_markdirty(vp); } goto out_err; } @@ -547,9 +529,6 @@ out: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN); } } - if (memwrite) { - genfs_markdirty(vp); - } rw_exit(uobj->vmobjlock); if (ap->a_m != NULL) { memcpy(ap->a_m, &pgs[ridx], @@ -912,8 +891,6 @@ genfs_do_putpages(struct vnode *vp, off_ retry: modified = false; flags = origflags; - KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || - (vp->v_iflag & VI_WRMAPDIRTY) == 0); /* * shortcut if we have no pages to process. @@ -921,10 +898,14 @@ retry: nodirty = radix_tree_empty_tagged_tree_p(&uobj->uo_pages, UVM_PAGE_DIRTY_TAG); +#ifdef DIAGNOSTIC + mutex_enter(vp->v_interlock); + KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 || nodirty); + mutex_exit(vp->v_interlock); +#endif if (uobj->uo_npages == 0 || (dirtyonly && nodirty)) { mutex_enter(vp->v_interlock); if (vp->v_iflag & VI_ONWORKLST) { - vp->v_iflag &= ~VI_WRMAPDIRTY; if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) vn_syncer_remove_from_worklist(vp); } @@ -1150,7 +1131,6 @@ retry: */ if (needs_clean) { - KDASSERT((vp->v_iflag & VI_ONWORKLST)); wasclean = false; memset(pgs, 0, sizeof(pgs)); pg->flags |= PG_BUSY; @@ -1320,7 +1300,7 @@ retry: */ mutex_enter(vp->v_interlock); - if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 && + if (modified && (vp->v_iflag & VI_WRMAP) != 0 && (vp->v_type != VBLK || (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) { GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED); @@ -1334,7 +1314,6 @@ retry: if ((vp->v_iflag & VI_ONWORKLST) != 0 && radix_tree_empty_tagged_tree_p(&uobj->uo_pages, UVM_PAGE_DIRTY_TAG)) { - vp->v_iflag &= ~VI_WRMAPDIRTY; if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) vn_syncer_remove_from_worklist(vp); } @@ -1635,9 +1614,6 @@ genfs_compat_getpages(void *v) UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0)); error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0; - if (error == 0 && memwrite) { - genfs_markdirty(vp); - } return error; } if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) { @@ -1691,9 +1667,6 @@ genfs_compat_getpages(void *v) if (error) { uvm_page_unbusy(pgs, npages); } - if (error == 0 && memwrite) { - genfs_markdirty(vp); - } rw_exit(uobj->vmobjlock); return error; } Index: src/sys/sys/vnode.h diff -u src/sys/sys/vnode.h:1.292 src/sys/sys/vnode.h:1.293 --- src/sys/sys/vnode.h:1.292 Thu Mar 5 15:18:55 2020 +++ src/sys/sys/vnode.h Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: vnode.h,v 1.292 2020/03/05 15:18:55 riastradh Exp $ */ +/* $NetBSD: vnode.h,v 1.293 2020/03/14 20:45:23 ad Exp $ */ /*- * Copyright (c) 2008 The NetBSD Foundation, Inc. @@ -210,9 +210,8 @@ typedef struct vnode vnode_t; #define VI_TEXT 0x00000100 /* vnode is a pure text prototype */ #define VI_EXECMAP 0x00000200 /* might have PROT_EXEC mappings */ #define VI_WRMAP 0x00000400 /* might have PROT_WRITE u. mappings */ -#define VI_WRMAPDIRTY 0x00000800 /* might have dirty pages */ +#define VI_PAGES 0x00000800 /* UVM object has >0 pages */ #define VI_ONWORKLST 0x00004000 /* On syncer work-list */ -#define VI_PAGES 0x00008000 /* UVM object has >0 pages */ /* * The third set are locked by the underlying file system. @@ -221,7 +220,7 @@ typedef struct vnode vnode_t; #define VNODE_FLAGBITS \ "\20\1ROOT\2SYSTEM\3ISTTY\4MAPPED\5MPSAFE\6LOCKSWORK\11TEXT\12EXECMAP" \ - "\13WRMAP\14WRMAPDIRTY\17ONWORKLST\31DIROP" + "\13WRMAP\14PAGES\17ONWORKLST\31DIROP" #define VSIZENOTSET ((voff_t)-1) Index: src/sys/ufs/lfs/lfs_pages.c diff -u src/sys/ufs/lfs/lfs_pages.c:1.23 src/sys/ufs/lfs/lfs_pages.c:1.24 --- src/sys/ufs/lfs/lfs_pages.c:1.23 Sat Mar 14 20:23:51 2020 +++ src/sys/ufs/lfs/lfs_pages.c Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: lfs_pages.c,v 1.23 2020/03/14 20:23:51 ad Exp $ */ +/* $NetBSD: lfs_pages.c,v 1.24 2020/03/14 20:45:23 ad Exp $ */ /*- * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2019 The NetBSD Foundation, Inc. @@ -60,7 +60,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: lfs_pages.c,v 1.23 2020/03/14 20:23:51 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: lfs_pages.c,v 1.24 2020/03/14 20:45:23 ad Exp $"); #ifdef _KERNEL_OPT #include "opt_compat_netbsd.h" @@ -455,11 +455,12 @@ retry: * If there are no pages, don't do anything. */ if (vp->v_uobj.uo_npages == 0) { + mutex_enter(vp->v_interlock); if ((vp->v_iflag & VI_ONWORKLST) && LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { - vp->v_iflag &= ~VI_WRMAPDIRTY; vn_syncer_remove_from_worklist(vp); } + mutex_exit(vp->v_interlock); if (trans_mp) fstrans_done(trans_mp); rw_exit(vp->v_uobj.vmobjlock); Index: src/sys/uvm/uvm_object.h diff -u src/sys/uvm/uvm_object.h:1.37 src/sys/uvm/uvm_object.h:1.38 --- src/sys/uvm/uvm_object.h:1.37 Sun Feb 23 15:46:43 2020 +++ src/sys/uvm/uvm_object.h Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_object.h,v 1.37 2020/02/23 15:46:43 ad Exp $ */ +/* $NetBSD: uvm_object.h,v 1.38 2020/03/14 20:45:23 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -114,7 +114,7 @@ extern const struct uvm_pagerops aobj_pa */ #define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \ - (UVM_OBJ_IS_VNODE(uobj) && uvn_needs_writefault_p(uobj)) + (UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj)) #define UVM_OBJ_IS_AOBJ(uobj) \ ((uobj)->pgops == &aobj_pager) Index: src/sys/uvm/uvm_page_status.c diff -u src/sys/uvm/uvm_page_status.c:1.3 src/sys/uvm/uvm_page_status.c:1.4 --- src/sys/uvm/uvm_page_status.c:1.3 Sun Feb 23 15:46:43 2020 +++ src/sys/uvm/uvm_page_status.c Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_page_status.c,v 1.3 2020/02/23 15:46:43 ad Exp $ */ +/* $NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $ */ /*- * Copyright (c)2011 YAMAMOTO Takashi, @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.3 2020/02/23 15:46:43 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_page_status.c,v 1.4 2020/03/14 20:45:23 ad Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -109,6 +109,15 @@ uvm_pagemarkdirty(struct vm_page *pg, un radix_tree_clear_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG); } else { + /* + * on first dirty page, mark the object dirty. + * for vnodes this inserts to the syncer worklist. + */ + if (radix_tree_empty_tagged_tree_p(&uobj->uo_pages, + UVM_PAGE_DIRTY_TAG) && + uobj->pgops->pgo_markdirty != NULL) { + (*uobj->pgops->pgo_markdirty)(uobj); + } radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG); } Index: src/sys/uvm/uvm_pager.h diff -u src/sys/uvm/uvm_pager.h:1.45 src/sys/uvm/uvm_pager.h:1.46 --- src/sys/uvm/uvm_pager.h:1.45 Sun Dec 9 20:33:04 2018 +++ src/sys/uvm/uvm_pager.h Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pager.h,v 1.45 2018/12/09 20:33:04 jdolecek Exp $ */ +/* $NetBSD: uvm_pager.h,v 1.46 2020/03/14 20:45:23 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -135,6 +135,9 @@ struct uvm_pagerops { /* put/write pages */ int (*pgo_put)(struct uvm_object *, voff_t, voff_t, int); + + /* mark object dirty */ + void (*pgo_markdirty)(struct uvm_object *); }; /* pager flags [mostly for flush] */ Index: src/sys/uvm/uvm_vnode.c diff -u src/sys/uvm/uvm_vnode.c:1.109 src/sys/uvm/uvm_vnode.c:1.110 --- src/sys/uvm/uvm_vnode.c:1.109 Sat Mar 14 20:23:51 2020 +++ src/sys/uvm/uvm_vnode.c Sat Mar 14 20:45:23 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $ */ +/* $NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -45,12 +45,13 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $"); #ifdef _KERNEL_OPT #include "opt_uvmhist.h" #endif +#include <sys/atomic.h> #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> @@ -80,6 +81,7 @@ static void uvn_alloc_ractx(struct uvm_o static void uvn_detach(struct uvm_object *); static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); +static void uvn_markdirty(struct uvm_object *); static int uvn_put(struct uvm_object *, voff_t, voff_t, int); static void uvn_reference(struct uvm_object *); @@ -96,6 +98,7 @@ const struct uvm_pagerops uvm_vnodeops = .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, + .pgo_markdirty = uvn_markdirty, }; /* @@ -153,7 +156,6 @@ uvn_put(struct uvm_object *uobj, voff_t return error; } - /* * uvn_get: get pages (synchronously) from backing store * @@ -194,6 +196,25 @@ uvn_get(struct uvm_object *uobj, voff_t return error; } +/* + * uvn_markdirty: called when the object gains first dirty page + * + * => uobj must be write locked. + */ + +static void +uvn_markdirty(struct uvm_object *uobj) +{ + struct vnode *vp = (struct vnode *)uobj; + + KASSERT(rw_write_held(uobj->vmobjlock)); + + mutex_enter(vp->v_interlock); + if ((vp->v_iflag & VI_ONWORKLST) == 0) { + vn_syncer_add_to_worklist(vp, filedelay); + } + mutex_exit(vp->v_interlock); +} /* * uvn_findpages: @@ -462,12 +483,14 @@ bool uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; + int iflag; /* * v_interlock is not held here, but VI_EXECMAP is only ever changed * with the vmobjlock held too. */ - return (vp->v_iflag & VI_EXECMAP) != 0; + iflag = atomic_load_relaxed(&vp->v_iflag); + return (iflag & VI_EXECMAP) != 0; } bool @@ -478,20 +501,6 @@ uvn_clean_p(struct uvm_object *uobj) UVM_PAGE_DIRTY_TAG); } -bool -uvn_needs_writefault_p(struct uvm_object *uobj) -{ - struct vnode *vp = (struct vnode *)uobj; - - /* - * v_interlock is not held here, but VI_WRMAP and VI_WRMAPDIRTY are - * only ever changed with the vmobjlock held too, or when it's known - * the uvm_object contains no pages (VI_PAGES clear). - */ - return uvn_clean_p(uobj) || - (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; -} - static void uvn_alloc_ractx(struct uvm_object *uobj) {