Module Name: src Committed By: ad Date: Sat Mar 14 19:54:06 UTC 2020
Modified Files: src/sys/rump/librump/rumpkern: vm.c Log Message: rump - page/object dirtyness tracking corrections. To generate a diff of this commit: cvs rdiff -u -r1.184 -r1.185 src/sys/rump/librump/rumpkern/vm.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/rump/librump/rumpkern/vm.c diff -u src/sys/rump/librump/rumpkern/vm.c:1.184 src/sys/rump/librump/rumpkern/vm.c:1.185 --- src/sys/rump/librump/rumpkern/vm.c:1.184 Sun Feb 23 15:46:42 2020 +++ src/sys/rump/librump/rumpkern/vm.c Sat Mar 14 19:54:06 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: vm.c,v 1.184 2020/02/23 15:46:42 ad Exp $ */ +/* $NetBSD: vm.c,v 1.185 2020/03/14 19:54:06 ad Exp $ */ /* * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. @@ -41,7 +41,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.184 2020/02/23 15:46:42 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.185 2020/03/14 19:54:06 ad Exp $"); #include <sys/param.h> #include <sys/atomic.h> @@ -172,9 +172,11 @@ uvm_pagealloc_strat(struct uvm_object *u pg->offset = off; pg->uobject = uobj; - pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE; - if (flags & UVM_PGA_ZERO) { - uvm_pagezero(pg); + if (UVM_OBJ_IS_VNODE(uobj) && uobj->uo_npages == 0) { + struct vnode *vp = (struct vnode *)uobj; + mutex_enter(vp->v_interlock); + vp->v_iflag |= VI_PAGES; + mutex_exit(vp->v_interlock); } if (radix_tree_insert_node(&uobj->uo_pages, off >> PAGE_SHIFT, @@ -182,6 +184,12 @@ uvm_pagealloc_strat(struct uvm_object *u pool_cache_put(&pagecache, pg); return NULL; } + uobj->uo_npages++; + + pg->flags = PG_CLEAN|PG_BUSY|PG_FAKE; + if (flags & UVM_PGA_ZERO) { + uvm_pagezero(pg); + } /* * Don't put anons on the LRU page queue. We can't flush them @@ -195,8 +203,6 @@ uvm_pagealloc_strat(struct uvm_object *u mutex_exit(&vmpage_lruqueue_lock); } - uobj->uo_npages++; - return pg; } @@ -227,6 +233,13 @@ uvm_pagefree(struct vm_page *pg) atomic_dec_uint(&vmpage_onqueue); } + if (UVM_OBJ_IS_VNODE(uobj) && uobj->uo_npages == 0) { + struct vnode *vp = (struct vnode *)uobj; + mutex_enter(vp->v_interlock); + vp->v_iflag &= ~VI_PAGES; + mutex_exit(vp->v_interlock); + } + mutex_destroy(&pg->interlock); pool_cache_put(&pagecache, pg); }