Module Name:    src
Committed By:   ad
Date:           Wed Dec 18 20:38:14 UTC 2019

Modified Files:
        src/sys/uvm: uvm_loan.c uvm_page.c

Log Message:
PR kern/54783: t_mmap crahes the kernel

- Fix various locking & sequencing errors with breaking loans.

- Don't call uvm_pageremove_tree() while holding pg->interlock as radixtree
  can take further locks when freeing nodes.


To generate a diff of this commit:
cvs rdiff -u -r1.91 -r1.92 src/sys/uvm/uvm_loan.c
cvs rdiff -u -r1.205 -r1.206 src/sys/uvm/uvm_page.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_loan.c
diff -u src/sys/uvm/uvm_loan.c:1.91 src/sys/uvm/uvm_loan.c:1.92
--- src/sys/uvm/uvm_loan.c:1.91	Sun Dec 15 21:11:35 2019
+++ src/sys/uvm/uvm_loan.c	Wed Dec 18 20:38:14 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_loan.c,v 1.91 2019/12/15 21:11:35 ad Exp $	*/
+/*	$NetBSD: uvm_loan.c,v 1.92 2019/12/18 20:38:14 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.91 2019/12/15 21:11:35 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.92 2019/12/18 20:38:14 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1121,7 +1121,7 @@ uvm_loanbreak(struct vm_page *uobjpage)
 	 * force a reload of the old page by clearing it from all
 	 * pmaps.
 	 * transfer dirtiness of the old page to the new page.
-	 * then lock pg->interlock to rename the pages.
+	 * then rename the pages.
 	 */
 
 	uvm_pagecopy(uobjpage, pg);	/* old -> new */
@@ -1143,14 +1143,6 @@ uvm_loanbreak(struct vm_page *uobjpage)
 	UVM_PAGE_OWN(uobjpage, NULL);
 
 	/*
-	 * replace uobjpage with new page.
-	 */
-
-	mutex_enter(&uobjpage->interlock);
-	uvm_pagereplace(uobjpage, pg);
-	mutex_exit(&uobjpage->interlock);
-
-	/*
 	 * if the page is no longer referenced by
 	 * an anon (i.e. we are breaking an O->K
 	 * loan), then remove it from any pageq's.
@@ -1159,6 +1151,12 @@ uvm_loanbreak(struct vm_page *uobjpage)
 		uvm_pagedequeue(uobjpage);
 
 	/*
+	 * replace uobjpage with new page.
+	 */
+
+	uvm_pagereplace(uobjpage, pg);
+
+	/*
 	 * at this point we have absolutely no
 	 * control over uobjpage
 	 */
@@ -1176,59 +1174,58 @@ uvm_loanbreak(struct vm_page *uobjpage)
 int
 uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
 {
-	struct vm_page *pg, *dequeuepg;
+	struct vm_page *newpg, *oldpg;
 
 	KASSERT(mutex_owned(anon->an_lock));
 	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
 
 	/* get new un-owned replacement page */
-	pg = uvm_pagealloc(NULL, 0, NULL, 0);
-	if (pg == NULL) {
+	newpg = uvm_pagealloc(NULL, 0, NULL, 0);
+	if (newpg == NULL) {
 		return ENOMEM;
 	}
 
+	oldpg = anon->an_page;
+	if (uobj == NULL) {
+		/*
+		 * we were the lender (A->K); need to remove the page from
+		 * pageq's.
+		 */
+		uvm_pagedequeue(oldpg);
+	}
+
 	/* copy old -> new */
-	uvm_pagecopy(anon->an_page, pg);
+	uvm_pagecopy(oldpg, newpg);
 
 	/* force reload */
-	pmap_page_protect(anon->an_page, VM_PROT_NONE);
-	if (pg < anon->an_page) {
-		mutex_enter(&pg->interlock);
-		mutex_enter(&anon->an_page->interlock);
+	pmap_page_protect(oldpg, VM_PROT_NONE);
+	if (newpg < oldpg) {
+		mutex_enter(&newpg->interlock);
+		mutex_enter(&oldpg->interlock);
 	} else {
-		mutex_enter(&anon->an_page->interlock);
-		mutex_enter(&pg->interlock);
+		mutex_enter(&oldpg->interlock);
+		mutex_enter(&newpg->interlock);
 	}
-	anon->an_page->uanon = NULL;
+	oldpg->uanon = NULL;
 	/* in case we owned */
-	anon->an_page->flags &= ~PG_ANON;
+	oldpg->flags &= ~PG_ANON;
 
 	if (uobj) {
 		/* if we were receiver of loan */
-		anon->an_page->loan_count--;
-		dequeuepg = NULL;
-	} else {
-		/*
-		 * we were the lender (A->K); need to remove the page from
-		 * pageq's.
-		 */
-		dequeuepg = anon->an_page;
+		oldpg->loan_count--;
 	}
 
 	/* install new page in anon */
-	anon->an_page = pg;
-	pg->uanon = anon;
-	pg->flags |= PG_ANON;
-
-	mutex_exit(&pg->interlock);
-	mutex_exit(&anon->an_page->interlock);
-	uvm_pageactivate(pg);
-	if (dequeuepg != NULL) {
-		uvm_pagedequeue(anon->an_page);
-	}
+	anon->an_page = newpg;
+	newpg->uanon = anon;
+	newpg->flags |= PG_ANON;
+
+	mutex_exit(&newpg->interlock);
+	mutex_exit(&oldpg->interlock);
+	uvm_pageactivate(newpg);
 
-	pg->flags &= ~(PG_BUSY|PG_FAKE);
-	UVM_PAGE_OWN(pg, NULL);
+	newpg->flags &= ~(PG_BUSY|PG_FAKE);
+	UVM_PAGE_OWN(newpg, NULL);
 
 	if (uobj) {
 		mutex_exit(uobj->vmobjlock);

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.205 src/sys/uvm/uvm_page.c:1.206
--- src/sys/uvm/uvm_page.c:1.205	Mon Dec 16 22:47:55 2019
+++ src/sys/uvm/uvm_page.c	Wed Dec 18 20:38:14 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.205 2019/12/16 22:47:55 ad Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.206 2019/12/18 20:38:14 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.205 2019/12/16 22:47:55 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.206 2019/12/18 20:38:14 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -148,13 +148,6 @@ void uvm_physseg_seg_chomp_slab(uvm_phys
 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
 
 /*
- * local prototypes
- */
-
-static int uvm_pageinsert(struct uvm_object *, struct vm_page *);
-static void uvm_pageremove(struct uvm_object *, struct vm_page *);
-
-/*
  * inline functions
  */
 
@@ -205,22 +198,6 @@ uvm_pageinsert_tree(struct uvm_object *u
 	return 0;
 }
 
-static inline int
-uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
-{
-	int error;
-
-	KDASSERT(uobj != NULL);
-	KDASSERT(uobj == pg->uobject);
-	error = uvm_pageinsert_tree(uobj, pg);
-	if (error != 0) {
-		KASSERT(error == ENOMEM);
-		return error;
-	}
-	uvm_pageinsert_object(uobj, pg);
-	return error;
-}
-
 /*
  * uvm_page_remove: remove page from object.
  *
@@ -265,16 +242,6 @@ uvm_pageremove_tree(struct uvm_object *u
 	KASSERT(pg == opg);
 }
 
-static inline void
-uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
-{
-
-	KDASSERT(uobj != NULL);
-	KASSERT(uobj == pg->uobject);
-	uvm_pageremove_object(uobj, pg);
-	uvm_pageremove_tree(uobj, pg);
-}
-
 static void
 uvm_page_init_buckets(struct pgfreelist *pgfl)
 {
@@ -1043,9 +1010,10 @@ uvm_pagealloc_strat(struct uvm_object *o
 		pg->flags |= PG_ANON;
 		cpu_count(CPU_COUNT_ANONPAGES, 1);
 	} else if (obj) {
-		error = uvm_pageinsert(obj, pg);
+		uvm_pageinsert_object(obj, pg);
+		error = uvm_pageinsert_tree(obj, pg);
 		if (error != 0) {
-			pg->uobject = NULL;
+			uvm_pageremove_object(obj, pg);
 			uvm_pagefree(pg);
 			return NULL;
 		}
@@ -1077,7 +1045,6 @@ uvm_pagealloc_strat(struct uvm_object *o
  * uvm_pagereplace: replace a page with another
  *
  * => object must be locked
- * => interlock must be held
  */
 
 void
@@ -1091,13 +1058,23 @@ uvm_pagereplace(struct vm_page *oldpg, s
 	KASSERT(newpg->uobject == NULL);
 	KASSERT(mutex_owned(uobj->vmobjlock));
 
-	newpg->uobject = uobj;
 	newpg->offset = oldpg->offset;
-
 	uvm_pageremove_tree(uobj, oldpg);
 	uvm_pageinsert_tree(uobj, newpg);
+
+	/* take page interlocks during rename */
+	if (oldpg < newpg) {
+		mutex_enter(&oldpg->interlock);
+		mutex_enter(&newpg->interlock);
+	} else {
+		mutex_enter(&newpg->interlock);
+		mutex_enter(&oldpg->interlock);
+	}
+	newpg->uobject = uobj;
 	uvm_pageinsert_object(uobj, newpg);
 	uvm_pageremove_object(uobj, oldpg);
+	mutex_exit(&oldpg->interlock);
+	mutex_exit(&newpg->interlock);
 }
 
 /*
@@ -1115,7 +1092,8 @@ uvm_pagerealloc(struct vm_page *pg, stru
 	 */
 
 	if (pg->uobject) {
-		uvm_pageremove(pg->uobject, pg);
+		uvm_pageremove_tree(pg->uobject, pg);
+		uvm_pageremove_object(pg->uobject, pg);
 	}
 
 	/*
@@ -1197,6 +1175,14 @@ uvm_pagefree(struct vm_page *pg)
 		mutex_owned(pg->uanon->an_lock));
 
 	/*
+	 * remove the page from the object's tree beore acquiring any page
+	 * interlocks: this can acquire locks to free radixtree nodes.
+	 */
+	if (pg->uobject != NULL) {
+		uvm_pageremove_tree(pg->uobject, pg);
+	}
+
+	/*
 	 * if the page is loaned, resolve the loan instead of freeing.
 	 */
 
@@ -1217,7 +1203,7 @@ uvm_pagefree(struct vm_page *pg)
 		mutex_enter(&pg->interlock);
 		locked = true;
 		if (pg->uobject != NULL) {
-			uvm_pageremove(pg->uobject, pg);
+			uvm_pageremove_object(pg->uobject, pg);
 			pg->flags &= ~PG_CLEAN;
 		} else if (pg->uanon != NULL) {
 			if ((pg->flags & PG_ANON) == 0) {
@@ -1256,7 +1242,7 @@ uvm_pagefree(struct vm_page *pg)
 	 * remove page from its object or anon.
 	 */
 	if (pg->uobject != NULL) {
-		uvm_pageremove(pg->uobject, pg);
+		uvm_pageremove_object(pg->uobject, pg);
 	} else if (pg->uanon != NULL) {
 		pg->uanon->an_page = NULL;
 		pg->uanon = NULL;

Reply via email to