Module Name:    src
Committed By:   matt
Date:           Tue Feb 14 01:12:43 UTC 2012

Modified Files:
        src/sys/uvm [matt-nb5-mips64]: uvm_anon.c uvm_km.c uvm_page.c
            uvm_pager.c uvm_pdaemon.c uvm_pglist.c

Log Message:
Add more KASSERTs (more! more! more!).
When returning page to the free pool, make sure to dequeue the pages before
hand or free page queue corruption will happen.


To generate a diff of this commit:
cvs rdiff -u -r1.51 -r1.51.28.1 src/sys/uvm/uvm_anon.c
cvs rdiff -u -r1.101.4.2.4.7 -r1.101.4.2.4.8 src/sys/uvm/uvm_km.c
cvs rdiff -u -r1.140.6.3.4.7 -r1.140.6.3.4.8 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.92.18.4 -r1.92.18.5 src/sys/uvm/uvm_pager.c
cvs rdiff -u -r1.93.4.2.4.4 -r1.93.4.2.4.5 src/sys/uvm/uvm_pdaemon.c
cvs rdiff -u -r1.42.16.11 -r1.42.16.12 src/sys/uvm/uvm_pglist.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_anon.c
diff -u src/sys/uvm/uvm_anon.c:1.51 src/sys/uvm/uvm_anon.c:1.51.28.1
--- src/sys/uvm/uvm_anon.c:1.51	Fri Jan 18 10:48:23 2008
+++ src/sys/uvm/uvm_anon.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_anon.c,v 1.51 2008/01/18 10:48:23 yamt Exp $	*/
+/*	$NetBSD: uvm_anon.c,v 1.51.28.1 2012/02/14 01:12:42 matt Exp $	*/
 
 /*
  *
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.51 2008/01/18 10:48:23 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.51.28.1 2012/02/14 01:12:42 matt Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -192,6 +192,7 @@ uvm_anfree(struct vm_anon *anon)
 				return;
 			}
 			mutex_enter(&uvm_pageqlock);
+			uvm_pagedequeue(pg);
 			uvm_pagefree(pg);
 			mutex_exit(&uvm_pageqlock);
 			mutex_exit(&anon->an_lock);

Index: src/sys/uvm/uvm_km.c
diff -u src/sys/uvm/uvm_km.c:1.101.4.2.4.7 src/sys/uvm/uvm_km.c:1.101.4.2.4.8
--- src/sys/uvm/uvm_km.c:1.101.4.2.4.7	Fri Feb 10 07:14:00 2012
+++ src/sys/uvm/uvm_km.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_km.c,v 1.101.4.2.4.7 2012/02/10 07:14:00 matt Exp $	*/
+/*	$NetBSD: uvm_km.c,v 1.101.4.2.4.8 2012/02/14 01:12:42 matt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -128,7 +128,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.101.4.2.4.7 2012/02/10 07:14:00 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.101.4.2.4.8 2012/02/14 01:12:42 matt Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -437,6 +437,7 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t
 		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
 		if (pg != NULL) {
 			mutex_enter(&uvm_pageqlock);
+			uvm_pagedequeue(pg);
 			uvm_pagefree(pg);
 			mutex_exit(&uvm_pageqlock);
 		}

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.140.6.3.4.7 src/sys/uvm/uvm_page.c:1.140.6.3.4.8
--- src/sys/uvm/uvm_page.c:1.140.6.3.4.7	Thu Feb  9 03:05:00 2012
+++ src/sys/uvm/uvm_page.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.140.6.3.4.7 2012/02/09 03:05:00 matt Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.140.6.3.4.8 2012/02/14 01:12:42 matt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.140.6.3.4.7 2012/02/09 03:05:00 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.140.6.3.4.8 2012/02/14 01:12:42 matt Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -1109,6 +1109,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu,
 		/* cpu, try1 */
 		struct pgflist * const freeq = pgfl->pgfl_queues[free_list];
 		if ((pg = LIST_FIRST(&freeq[try1])) != NULL) {
+			KASSERT(pg->pqflags & PQ_FREE);
 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
 			KASSERT(pgfl == &ucpu->page_free[color]);
 		    	ucpu->page_cpuhit++;
@@ -1118,7 +1119,11 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu,
 		/* global, try1 */
 		struct pgflist * const gfreeq = gpgfl->pgfl_queues[free_list];
 		if ((pg = LIST_FIRST(&gfreeq[try1])) != NULL) {
+			KASSERT(pg->pqflags & PQ_FREE);
 			ucpu = VM_FREE_PAGE_TO_CPU(pg);
+#ifndef MULTIPROCESSOR
+			KASSERT(ucpu == uvm.cpus);
+#endif
 			pgfl = &ucpu->page_free[color];
 		    	ucpu->page_cpumiss++;
 			goto gotit;
@@ -1126,6 +1131,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu,
 
 		/* cpu, try2 */
 		if ((pg = LIST_FIRST(&freeq[try2])) != NULL) {
+			KASSERT(pg->pqflags & PQ_FREE);
 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
 			KASSERT(pgfl == &ucpu->page_free[color]);
 		    	ucpu->page_cpuhit++;
@@ -1135,7 +1141,11 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu,
 
 		/* global, try2 */
 		if ((pg = LIST_FIRST(&gfreeq[try2])) != NULL) {
+			KASSERT(pg->pqflags & PQ_FREE);
 			ucpu = VM_FREE_PAGE_TO_CPU(pg);
+#ifndef MULTIPROCESSOR
+			KASSERT(ucpu == uvm.cpus);
+#endif
 			pgfl = &ucpu->page_free[color];
 		    	ucpu->page_cpumiss++;
 			try1 = try2;
@@ -1503,6 +1513,7 @@ uvm_pagefree(struct vm_page *pg)
 	}
 #endif /* DEBUG */
 
+	KASSERT(!uvmpdpol_pageisqueued_p(pg));
 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
 	KASSERT(!(pg->pqflags & PQ_FREE));
 	KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));

Index: src/sys/uvm/uvm_pager.c
diff -u src/sys/uvm/uvm_pager.c:1.92.18.4 src/sys/uvm/uvm_pager.c:1.92.18.5
--- src/sys/uvm/uvm_pager.c:1.92.18.4	Thu Feb  9 03:05:01 2012
+++ src/sys/uvm/uvm_pager.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pager.c,v 1.92.18.4 2012/02/09 03:05:01 matt Exp $	*/
+/*	$NetBSD: uvm_pager.c,v 1.92.18.5 2012/02/14 01:12:42 matt Exp $	*/
 
 /*
  *
@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.92.18.4 2012/02/09 03:05:01 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.92.18.5 2012/02/14 01:12:42 matt Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -422,7 +422,6 @@ uvm_aio_aiodone_pages(struct vm_page **p
 		 */
 
 		if (pg->flags & PG_PAGEOUT) {
-			pg->flags &= ~PG_PAGEOUT;
 			uvm_pageout_done(pg, true);
 			pg->flags |= PG_RELEASED;
 		}

Index: src/sys/uvm/uvm_pdaemon.c
diff -u src/sys/uvm/uvm_pdaemon.c:1.93.4.2.4.4 src/sys/uvm/uvm_pdaemon.c:1.93.4.2.4.5
--- src/sys/uvm/uvm_pdaemon.c:1.93.4.2.4.4	Mon Feb 13 23:07:31 2012
+++ src/sys/uvm/uvm_pdaemon.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.4 2012/02/13 23:07:31 matt Exp $	*/
+/*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.5 2012/02/14 01:12:42 matt Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.4 2012/02/13 23:07:31 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.5 2012/02/14 01:12:42 matt Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -528,13 +528,13 @@ uvm_pageout_start(struct uvm_pggroup *gr
 
 	mutex_spin_enter(&uvm_fpageqlock);
 
-	uvmexp.paging += npages;
 	uvmpd_checkgroup(grp);
+	uvmexp.paging += npages;
 	if (grp->pgrp_paging == 0) {
 		TAILQ_INSERT_TAIL(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
-		uvmpd_checkgroup(grp);
 	}
 	grp->pgrp_paging += npages;
+	uvmpd_checkgroup(grp);
 	mutex_spin_exit(&uvm_fpageqlock);
 }
 
@@ -559,6 +559,11 @@ uvm_pageout_done(struct vm_page *pg, boo
 	grp->pgrp_pdfreed += freed;
 
 	/*
+	 * Page is no longer being paged out.
+	 */
+	pg->flags &= ~PG_PAGEOUT;
+
+	/*
 	 * wake up either of pagedaemon or LWPs waiting for it.
 	 */
 	if (grp->pgrp_free * uvmexp.npggroups <= uvmexp.reserve_kernel) {

Index: src/sys/uvm/uvm_pglist.c
diff -u src/sys/uvm/uvm_pglist.c:1.42.16.11 src/sys/uvm/uvm_pglist.c:1.42.16.12
--- src/sys/uvm/uvm_pglist.c:1.42.16.11	Thu Feb  9 03:05:01 2012
+++ src/sys/uvm/uvm_pglist.c	Tue Feb 14 01:12:42 2012
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pglist.c,v 1.42.16.11 2012/02/09 03:05:01 matt Exp $	*/
+/*	$NetBSD: uvm_pglist.c,v 1.42.16.12 2012/02/14 01:12:42 matt Exp $	*/
 
 /*-
  * Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.42.16.11 2012/02/09 03:05:01 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.42.16.12 2012/02/14 01:12:42 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -85,12 +85,16 @@ uvm_pglist_add(struct vm_page *pg, struc
 	int free_list, color, queue;
 
 	KASSERT(mutex_owned(&uvm_fpageqlock));
+	KASSERT(pg->pqflags & PQ_FREE);
 
 #if PGFL_NQUEUES != 2
 #error uvm_pglistalloc needs to be updated
 #endif
 
 	ucpu = VM_FREE_PAGE_TO_CPU(pg);
+#ifndef MULTIPROCESSOR
+	KASSERT(ucpu == uvm.cpus);
+#endif
 	free_list = uvm_page_lookup_freelist(pg);
 	color = VM_PGCOLOR_BUCKET(pg);
 	queue = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
@@ -599,6 +603,9 @@ uvm_pglistfree(struct pglist *list)
 
 	mutex_spin_enter(&uvm_fpageqlock);
 	struct uvm_cpu * const ucpu = curcpu()->ci_data.cpu_uvm;
+#ifndef MULTIPROCESSOR
+	KASSERT(ucpu == uvm.cpus);
+#endif
 	while ((pg = TAILQ_FIRST(list)) != NULL) {
 		KASSERT(!uvmpdpol_pageisqueued_p(pg));
 		TAILQ_REMOVE(list, pg, pageq.queue);
@@ -615,6 +622,9 @@ uvm_pglistfree(struct pglist *list)
 		const size_t free_list = uvm_page_lookup_freelist(pg);
 		const size_t color = VM_PGCOLOR_BUCKET(pg);
 		const size_t queue = iszero ? PGFL_ZEROS : PGFL_UNKNOWN;
+#ifndef MULTIPROCESSOR
+		KASSERT(ucpu == uvm.cpus);
+#endif
 		pg->offset = (uintptr_t)ucpu;
 		LIST_INSERT_HEAD(&uvm.page_free[color].
 		    pgfl_queues[free_list][queue], pg, pageq.list);

Reply via email to