Module Name:    src
Committed By:   tls
Date:           Sun Aug 10 06:57:00 UTC 2014

Modified Files:
        src/sys/uvm [tls-earlyentropy]: uvm_aobj.c uvm_bio.c uvm_extern.h
            uvm_map.c uvm_page.c uvm_pglist.c uvm_swap.c

Log Message:
Rebase.


To generate a diff of this commit:
cvs rdiff -u -r1.120 -r1.120.2.1 src/sys/uvm/uvm_aobj.c
cvs rdiff -u -r1.80 -r1.80.2.1 src/sys/uvm/uvm_bio.c
cvs rdiff -u -r1.189 -r1.189.2.1 src/sys/uvm/uvm_extern.h
cvs rdiff -u -r1.328 -r1.328.2.1 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.183.2.1 -r1.183.2.2 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.63 -r1.63.2.1 src/sys/uvm/uvm_pglist.c
cvs rdiff -u -r1.168 -r1.168.2.1 src/sys/uvm/uvm_swap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_aobj.c
diff -u src/sys/uvm/uvm_aobj.c:1.120 src/sys/uvm/uvm_aobj.c:1.120.2.1
--- src/sys/uvm/uvm_aobj.c:1.120	Fri Oct 25 20:22:55 2013
+++ src/sys/uvm/uvm_aobj.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_aobj.c,v 1.120 2013/10/25 20:22:55 martin Exp $	*/
+/*	$NetBSD: uvm_aobj.c,v 1.120.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.120 2013/10/25 20:22:55 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.120.2.1 2014/08/10 06:57:00 tls Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -146,6 +146,7 @@ struct uvm_aobj {
 	struct uao_swhash *u_swhash;
 	u_long u_swhashmask;		/* mask for hashtable */
 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
+	int u_freelist;		  /* freelist to allocate pages from */
 };
 
 static void	uao_free(struct uvm_aobj *);
@@ -161,6 +162,8 @@ static bool uao_pagein(struct uvm_aobj *
 static bool uao_pagein_page(struct uvm_aobj *, int);
 #endif /* defined(VMSWAP) */
 
+static struct vm_page	*uao_pagealloc(struct uvm_object *, voff_t, int);
+
 /*
  * aobj_pager
  *
@@ -436,6 +439,12 @@ uao_create(vsize_t size, int flags)
 	}
 
 	/*
+	 * no freelist by default
+	 */
+
+	aobj->u_freelist = VM_NFREELIST;
+
+	/*
  	 * allocate hash/array if necessary
  	 *
  	 * note: in the KERNSWAP case no need to worry about locking since
@@ -490,6 +499,41 @@ uao_create(vsize_t size, int flags)
 }
 
 /*
+ * uao_set_pgfl: allocate pages only from the specified freelist.
+ *
+ * => must be called before any pages are allocated for the object.
+ * => reset by setting it to VM_NFREELIST, meaning any freelist.
+ */
+
+void
+uao_set_pgfl(struct uvm_object *uobj, int freelist)
+{
+	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
+
+	KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
+	KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
+	    freelist);
+
+	aobj->u_freelist = freelist;
+}
+
+/*
+ * uao_pagealloc: allocate a page for aobj.
+ */
+
+static inline struct vm_page *
+uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
+{
+	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
+
+	if (__predict_true(aobj->u_freelist == VM_NFREELIST))
+		return uvm_pagealloc(uobj, offset, NULL, flags);
+	else
+		return uvm_pagealloc_strat(uobj, offset, NULL, flags,
+		    UVM_PGA_STRAT_ONLY, aobj->u_freelist);
+}
+
+/*
  * uao_init: set up aobj pager subsystem
  *
  * => called at boot time from uvm_pager_init()
@@ -864,8 +908,8 @@ uao_get(struct uvm_object *uobj, voff_t 
 
 			if (ptmp == NULL && uao_find_swslot(uobj,
 			    current_offset >> PAGE_SHIFT) == 0) {
-				ptmp = uvm_pagealloc(uobj, current_offset,
-				    NULL, UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
+				ptmp = uao_pagealloc(uobj, current_offset,
+				    UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
 				if (ptmp) {
 					/* new page */
 					ptmp->flags &= ~(PG_FAKE);
@@ -959,8 +1003,7 @@ gotpage:
 			/* not resident?   allocate one now (if we can) */
 			if (ptmp == NULL) {
 
-				ptmp = uvm_pagealloc(uobj, current_offset,
-				    NULL, 0);
+				ptmp = uao_pagealloc(uobj, current_offset, 0);
 
 				/* out of RAM? */
 				if (ptmp == NULL) {

Index: src/sys/uvm/uvm_bio.c
diff -u src/sys/uvm/uvm_bio.c:1.80 src/sys/uvm/uvm_bio.c:1.80.2.1
--- src/sys/uvm/uvm_bio.c:1.80	Fri Oct 25 20:23:33 2013
+++ src/sys/uvm/uvm_bio.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_bio.c,v 1.80 2013/10/25 20:23:33 martin Exp $	*/
+/*	$NetBSD: uvm_bio.c,v 1.80.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.80 2013/10/25 20:23:33 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.80.2.1 2014/08/10 06:57:00 tls Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_ubc.h"
@@ -210,6 +210,12 @@ ubc_init(void)
 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
 		panic("ubc_init: failed to map ubc_object");
 	}
+}
+
+void
+ubchist_init(void)
+{
+
 	UVMHIST_INIT(ubchist, 300);
 }
 

Index: src/sys/uvm/uvm_extern.h
diff -u src/sys/uvm/uvm_extern.h:1.189 src/sys/uvm/uvm_extern.h:1.189.2.1
--- src/sys/uvm/uvm_extern.h:1.189	Fri Feb 21 22:08:07 2014
+++ src/sys/uvm/uvm_extern.h	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_extern.h,v 1.189 2014/02/21 22:08:07 skrll Exp $	*/
+/*	$NetBSD: uvm_extern.h,v 1.189.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -539,11 +539,13 @@ void		vunmapbuf(struct buf *, vsize_t);
 
 /* uvm_aobj.c */
 struct uvm_object	*uao_create(vsize_t, int);
+void			uao_set_pgfl(struct uvm_object *, int);
 void			uao_detach(struct uvm_object *);
 void			uao_reference(struct uvm_object *);
 
 /* uvm_bio.c */
 void			ubc_init(void);
+void			ubchist_init(void);
 void *			ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int,
 			    int);
 void			ubc_release(void *, int);

Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.328 src/sys/uvm/uvm_map.c:1.328.2.1
--- src/sys/uvm/uvm_map.c:1.328	Wed Mar  5 05:35:55 2014
+++ src/sys/uvm/uvm_map.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_map.c,v 1.328 2014/03/05 05:35:55 matt Exp $	*/
+/*	$NetBSD: uvm_map.c,v 1.328.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.328 2014/03/05 05:35:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.328.2.1 2014/08/10 06:57:00 tls Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -3097,6 +3097,7 @@ uvm_map_inherit(struct vm_map *map, vadd
 	case MAP_INHERIT_NONE:
 	case MAP_INHERIT_COPY:
 	case MAP_INHERIT_SHARE:
+	case MAP_INHERIT_ZERO:
 		break;
 	default:
 		UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
@@ -4183,6 +4184,197 @@ uvmspace_free(struct vmspace *vm)
 	pool_cache_put(&uvm_vmspace_cache, vm);
 }
 
+static struct vm_map_entry *
+uvm_mapent_clone(struct vm_map *new_map, struct vm_map_entry *old_entry,
+    int flags)
+{
+	struct vm_map_entry *new_entry;
+
+	new_entry = uvm_mapent_alloc(new_map, 0);
+	/* old_entry -> new_entry */
+	uvm_mapent_copy(old_entry, new_entry);
+
+	/* new pmap has nothing wired in it */
+	new_entry->wired_count = 0;
+
+	/*
+	 * gain reference to object backing the map (can't
+	 * be a submap, already checked this case).
+	 */
+
+	if (new_entry->aref.ar_amap)
+		uvm_map_reference_amap(new_entry, flags);
+
+	if (new_entry->object.uvm_obj &&
+	    new_entry->object.uvm_obj->pgops->pgo_reference)
+		new_entry->object.uvm_obj->pgops->pgo_reference(
+			new_entry->object.uvm_obj);
+
+	/* insert entry at end of new_map's entry list */
+	uvm_map_entry_link(new_map, new_map->header.prev,
+	    new_entry);
+
+	return new_entry;
+}
+
+/*
+ * share the mapping: this means we want the old and
+ * new entries to share amaps and backing objects.
+ */
+static void
+uvm_mapent_forkshared(struct vm_map *new_map, struct vm_map *old_map,
+    struct vm_map_entry *old_entry)
+{
+	/*
+	 * if the old_entry needs a new amap (due to prev fork)
+	 * then we need to allocate it now so that we have
+	 * something we own to share with the new_entry.   [in
+	 * other words, we need to clear needs_copy]
+	 */
+
+	if (UVM_ET_ISNEEDSCOPY(old_entry)) {
+		/* get our own amap, clears needs_copy */
+		amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
+		    0, 0);
+		/* XXXCDC: WAITOK??? */
+	}
+
+	uvm_mapent_clone(new_map, old_entry, AMAP_SHARED);
+}
+
+
+static void
+uvm_mapent_forkcopy(struct vm_map *new_map, struct vm_map *old_map,
+    struct vm_map_entry *old_entry)
+{
+	struct vm_map_entry *new_entry;
+
+	/*
+	 * copy-on-write the mapping (using mmap's
+	 * MAP_PRIVATE semantics)
+	 *
+	 * allocate new_entry, adjust reference counts.
+	 * (note that new references are read-only).
+	 */
+
+	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
+
+	new_entry->etype |=
+	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
+
+	/*
+	 * the new entry will need an amap.  it will either
+	 * need to be copied from the old entry or created
+	 * from scratch (if the old entry does not have an
+	 * amap).  can we defer this process until later
+	 * (by setting "needs_copy") or do we need to copy
+	 * the amap now?
+	 *
+	 * we must copy the amap now if any of the following
+	 * conditions hold:
+	 * 1. the old entry has an amap and that amap is
+	 *    being shared.  this means that the old (parent)
+	 *    process is sharing the amap with another
+	 *    process.  if we do not clear needs_copy here
+	 *    we will end up in a situation where both the
+	 *    parent and child process are refering to the
+	 *    same amap with "needs_copy" set.  if the
+	 *    parent write-faults, the fault routine will
+	 *    clear "needs_copy" in the parent by allocating
+	 *    a new amap.   this is wrong because the
+	 *    parent is supposed to be sharing the old amap
+	 *    and the new amap will break that.
+	 *
+	 * 2. if the old entry has an amap and a non-zero
+	 *    wire count then we are going to have to call
+	 *    amap_cow_now to avoid page faults in the
+	 *    parent process.   since amap_cow_now requires
+	 *    "needs_copy" to be clear we might as well
+	 *    clear it here as well.
+	 *
+	 */
+
+	if (old_entry->aref.ar_amap != NULL) {
+		if ((amap_flags(old_entry->aref.ar_amap) & AMAP_SHARED) != 0 ||
+		    VM_MAPENT_ISWIRED(old_entry)) {
+
+			amap_copy(new_map, new_entry,
+			    AMAP_COPY_NOCHUNK, 0, 0);
+			/* XXXCDC: M_WAITOK ... ok? */
+		}
+	}
+
+	/*
+	 * if the parent's entry is wired down, then the
+	 * parent process does not want page faults on
+	 * access to that memory.  this means that we
+	 * cannot do copy-on-write because we can't write
+	 * protect the old entry.   in this case we
+	 * resolve all copy-on-write faults now, using
+	 * amap_cow_now.   note that we have already
+	 * allocated any needed amap (above).
+	 */
+
+	if (VM_MAPENT_ISWIRED(old_entry)) {
+
+		/*
+		 * resolve all copy-on-write faults now
+		 * (note that there is nothing to do if
+		 * the old mapping does not have an amap).
+		 */
+		if (old_entry->aref.ar_amap)
+			amap_cow_now(new_map, new_entry);
+
+	} else {
+		/*
+		 * setup mappings to trigger copy-on-write faults
+		 * we must write-protect the parent if it has
+		 * an amap and it is not already "needs_copy"...
+		 * if it is already "needs_copy" then the parent
+		 * has already been write-protected by a previous
+		 * fork operation.
+		 */
+		if (old_entry->aref.ar_amap &&
+		    !UVM_ET_ISNEEDSCOPY(old_entry)) {
+			if (old_entry->max_protection & VM_PROT_WRITE) {
+				pmap_protect(old_map->pmap,
+				    old_entry->start, old_entry->end,
+				    old_entry->protection & ~VM_PROT_WRITE);
+			}
+			old_entry->etype |= UVM_ET_NEEDSCOPY;
+		}
+	}
+}
+
+/*
+ * zero the mapping: the new entry will be zero initialized
+ */
+static void
+uvm_mapent_forkzero(struct vm_map *new_map, struct vm_map *old_map,
+    struct vm_map_entry *old_entry)
+{
+	struct vm_map_entry *new_entry;
+
+	new_entry = uvm_mapent_clone(new_map, old_entry, 0);
+
+	new_entry->etype |=
+	    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
+
+	if (new_entry->aref.ar_amap) {
+		uvm_map_unreference_amap(new_entry, 0);
+		new_entry->aref.ar_pageoff = 0;
+		new_entry->aref.ar_amap = NULL;
+	}
+
+	if (UVM_ET_ISOBJ(new_entry)) {
+		if (new_entry->object.uvm_obj->pgops->pgo_detach)
+			new_entry->object.uvm_obj->pgops->pgo_detach(
+			    new_entry->object.uvm_obj);
+		new_entry->object.uvm_obj = NULL;
+		new_entry->etype &= ~UVM_ET_OBJ;
+	}
+}
+
 /*
  *   F O R K   -   m a i n   e n t r y   p o i n t
  */
@@ -4200,7 +4392,6 @@ uvmspace_fork(struct vmspace *vm1)
 	struct vm_map *old_map = &vm1->vm_map;
 	struct vm_map *new_map;
 	struct vm_map_entry *old_entry;
-	struct vm_map_entry *new_entry;
 	UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
 
 	vm_map_lock(old_map);
@@ -4230,7 +4421,6 @@ uvmspace_fork(struct vmspace *vm1)
 
 		switch (old_entry->inheritance) {
 		case MAP_INHERIT_NONE:
-
 			/*
 			 * drop the mapping, modify size
 			 */
@@ -4238,171 +4428,20 @@ uvmspace_fork(struct vmspace *vm1)
 			break;
 
 		case MAP_INHERIT_SHARE:
-
-			/*
-			 * share the mapping: this means we want the old and
-			 * new entries to share amaps and backing objects.
-			 */
-			/*
-			 * if the old_entry needs a new amap (due to prev fork)
-			 * then we need to allocate it now so that we have
-			 * something we own to share with the new_entry.   [in
-			 * other words, we need to clear needs_copy]
-			 */
-
-			if (UVM_ET_ISNEEDSCOPY(old_entry)) {
-				/* get our own amap, clears needs_copy */
-				amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
-				    0, 0);
-				/* XXXCDC: WAITOK??? */
-			}
-
-			new_entry = uvm_mapent_alloc(new_map, 0);
-			/* old_entry -> new_entry */
-			uvm_mapent_copy(old_entry, new_entry);
-
-			/* new pmap has nothing wired in it */
-			new_entry->wired_count = 0;
-
-			/*
-			 * gain reference to object backing the map (can't
-			 * be a submap, already checked this case).
-			 */
-
-			if (new_entry->aref.ar_amap)
-				uvm_map_reference_amap(new_entry, AMAP_SHARED);
-
-			if (new_entry->object.uvm_obj &&
-			    new_entry->object.uvm_obj->pgops->pgo_reference)
-				new_entry->object.uvm_obj->
-				    pgops->pgo_reference(
-				        new_entry->object.uvm_obj);
-
-			/* insert entry at end of new_map's entry list */
-			uvm_map_entry_link(new_map, new_map->header.prev,
-			    new_entry);
-
+			uvm_mapent_forkshared(new_map, old_map, old_entry);
 			break;
 
 		case MAP_INHERIT_COPY:
+			uvm_mapent_forkcopy(new_map, old_map, old_entry);
+			break;
 
-			/*
-			 * copy-on-write the mapping (using mmap's
-			 * MAP_PRIVATE semantics)
-			 *
-			 * allocate new_entry, adjust reference counts.
-			 * (note that new references are read-only).
-			 */
-
-			new_entry = uvm_mapent_alloc(new_map, 0);
-			/* old_entry -> new_entry */
-			uvm_mapent_copy(old_entry, new_entry);
-
-			if (new_entry->aref.ar_amap)
-				uvm_map_reference_amap(new_entry, 0);
-
-			if (new_entry->object.uvm_obj &&
-			    new_entry->object.uvm_obj->pgops->pgo_reference)
-				new_entry->object.uvm_obj->pgops->pgo_reference
-				    (new_entry->object.uvm_obj);
-
-			/* new pmap has nothing wired in it */
-			new_entry->wired_count = 0;
-
-			new_entry->etype |=
-			    (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
-			uvm_map_entry_link(new_map, new_map->header.prev,
-			    new_entry);
-
-			/*
-			 * the new entry will need an amap.  it will either
-			 * need to be copied from the old entry or created
-			 * from scratch (if the old entry does not have an
-			 * amap).  can we defer this process until later
-			 * (by setting "needs_copy") or do we need to copy
-			 * the amap now?
-			 *
-			 * we must copy the amap now if any of the following
-			 * conditions hold:
-			 * 1. the old entry has an amap and that amap is
-			 *    being shared.  this means that the old (parent)
-			 *    process is sharing the amap with another
-			 *    process.  if we do not clear needs_copy here
-			 *    we will end up in a situation where both the
-			 *    parent and child process are refering to the
-			 *    same amap with "needs_copy" set.  if the
-			 *    parent write-faults, the fault routine will
-			 *    clear "needs_copy" in the parent by allocating
-			 *    a new amap.   this is wrong because the
-			 *    parent is supposed to be sharing the old amap
-			 *    and the new amap will break that.
-			 *
-			 * 2. if the old entry has an amap and a non-zero
-			 *    wire count then we are going to have to call
-			 *    amap_cow_now to avoid page faults in the
-			 *    parent process.   since amap_cow_now requires
-			 *    "needs_copy" to be clear we might as well
-			 *    clear it here as well.
-			 *
-			 */
-
-			if (old_entry->aref.ar_amap != NULL) {
-				if ((amap_flags(old_entry->aref.ar_amap) &
-				     AMAP_SHARED) != 0 ||
-				    VM_MAPENT_ISWIRED(old_entry)) {
-
-					amap_copy(new_map, new_entry,
-					    AMAP_COPY_NOCHUNK, 0, 0);
-					/* XXXCDC: M_WAITOK ... ok? */
-				}
-			}
-
-			/*
-			 * if the parent's entry is wired down, then the
-			 * parent process does not want page faults on
-			 * access to that memory.  this means that we
-			 * cannot do copy-on-write because we can't write
-			 * protect the old entry.   in this case we
-			 * resolve all copy-on-write faults now, using
-			 * amap_cow_now.   note that we have already
-			 * allocated any needed amap (above).
-			 */
-
-			if (VM_MAPENT_ISWIRED(old_entry)) {
-
-			  /*
-			   * resolve all copy-on-write faults now
-			   * (note that there is nothing to do if
-			   * the old mapping does not have an amap).
-			   */
-			  if (old_entry->aref.ar_amap)
-			    amap_cow_now(new_map, new_entry);
-
-			} else {
-
-			  /*
-			   * setup mappings to trigger copy-on-write faults
-			   * we must write-protect the parent if it has
-			   * an amap and it is not already "needs_copy"...
-			   * if it is already "needs_copy" then the parent
-			   * has already been write-protected by a previous
-			   * fork operation.
-			   */
-
-			  if (old_entry->aref.ar_amap &&
-			      !UVM_ET_ISNEEDSCOPY(old_entry)) {
-			      if (old_entry->max_protection & VM_PROT_WRITE) {
-				pmap_protect(old_map->pmap,
-					     old_entry->start,
-					     old_entry->end,
-					     old_entry->protection &
-					     ~VM_PROT_WRITE);
-			      }
-			      old_entry->etype |= UVM_ET_NEEDSCOPY;
-			  }
-			}
+		case MAP_INHERIT_ZERO:
+			uvm_mapent_forkzero(new_map, old_map, old_entry);
 			break;
-		}  /* end of switch statement */
+		default:
+			KASSERT(0);
+			break;
+		}
 		old_entry = old_entry->next;
 	}
 

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.183.2.1 src/sys/uvm/uvm_page.c:1.183.2.2
--- src/sys/uvm/uvm_page.c:1.183.2.1	Mon Apr  7 03:37:33 2014
+++ src/sys/uvm/uvm_page.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.183.2.1 2014/04/07 03:37:33 tls Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.183.2.2 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.183.2.1 2014/04/07 03:37:33 tls Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.183.2.2 2014/08/10 06:57:00 tls Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvmhist.h"
@@ -1686,15 +1686,10 @@ uvm_page_unbusy(struct vm_page **pgs, in
 void
 uvm_page_own(struct vm_page *pg, const char *tag)
 {
-	struct uvm_object *uobj;
-	struct vm_anon *anon;
 
 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
-
-	uobj = pg->uobject;
-	anon = pg->uanon;
-	KASSERT(uvm_page_locked_p(pg));
 	KASSERT((pg->flags & PG_WANTED) == 0);
+	KASSERT(uvm_page_locked_p(pg));
 
 	/* gain ownership? */
 	if (tag) {
@@ -1705,8 +1700,8 @@ uvm_page_own(struct vm_page *pg, const c
 			    pg->owner, pg->owner_tag);
 			panic("uvm_page_own");
 		}
-		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
-		pg->lowner = (curlwp) ? curlwp->l_lid :  (lwpid_t) -1;
+		pg->owner = curproc->p_pid;
+		pg->lowner = curlwp->l_lid;
 		pg->owner_tag = tag;
 		return;
 	}

Index: src/sys/uvm/uvm_pglist.c
diff -u src/sys/uvm/uvm_pglist.c:1.63 src/sys/uvm/uvm_pglist.c:1.63.2.1
--- src/sys/uvm/uvm_pglist.c:1.63	Sun Sep 15 15:54:23 2013
+++ src/sys/uvm/uvm_pglist.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pglist.c,v 1.63 2013/09/15 15:54:23 martin Exp $	*/
+/*	$NetBSD: uvm_pglist.c,v 1.63.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*-
  * Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.63 2013/09/15 15:54:23 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.63.2.1 2014/08/10 06:57:00 tls Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>

Index: src/sys/uvm/uvm_swap.c
diff -u src/sys/uvm/uvm_swap.c:1.168 src/sys/uvm/uvm_swap.c:1.168.2.1
--- src/sys/uvm/uvm_swap.c:1.168	Sun Mar 16 05:20:30 2014
+++ src/sys/uvm/uvm_swap.c	Sun Aug 10 06:57:00 2014
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_swap.c,v 1.168 2014/03/16 05:20:30 dholland Exp $	*/
+/*	$NetBSD: uvm_swap.c,v 1.168.2.1 2014/08/10 06:57:00 tls Exp $	*/
 
 /*
  * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.168 2014/03/16 05:20:30 dholland Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.168.2.1 2014/08/10 06:57:00 tls Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_compat_netbsd.h"
@@ -492,13 +492,17 @@ sys_swapctl(struct lwp *l, const struct 
 	    || SCARG(uap, cmd) == SWAP_STATS13
 #endif
 	    ) {
-		if ((size_t)misc > (size_t)uvmexp.nswapdev)
-			misc = uvmexp.nswapdev;
-
-		if (misc == 0) {
+		if (misc < 0) {
 			error = EINVAL;
 			goto out;
 		}
+		if (misc == 0 || uvmexp.nswapdev == 0) {
+			error = 0;
+			goto out;
+		}
+		/* Make sure userland cannot exhaust kernel memory */
+		if ((size_t)misc > (size_t)uvmexp.nswapdev)
+			misc = uvmexp.nswapdev;
 		KASSERT(misc > 0);
 #if defined(COMPAT_13)
 		if (SCARG(uap, cmd) == SWAP_STATS13)
@@ -561,7 +565,6 @@ sys_swapctl(struct lwp *l, const struct 
 		if (SCARG(uap, cmd) == SWAP_ON &&
 		    copystr("miniroot", userpath, SWAP_PATH_MAX, &len))
 			panic("swapctl: miniroot copy failed");
-		KASSERT(len > 0);
 	} else {
 		struct pathbuf *pb;
 
@@ -1283,6 +1286,7 @@ const struct bdevsw swap_bdevsw = {
 	.d_ioctl = noioctl,
 	.d_dump = nodump,
 	.d_psize = nosize,
+	.d_discard = nodiscard,
 	.d_flag = D_OTHER
 };
 
@@ -1297,6 +1301,7 @@ const struct cdevsw swap_cdevsw = {
 	.d_poll = nopoll,
 	.d_mmap = nommap,
 	.d_kqfilter = nokqfilter,
+	.d_discard = nodiscard,
 	.d_flag = D_OTHER,
 };
 

Reply via email to