Module Name:    src
Committed By:   chs
Date:           Mon Jun 10 00:35:47 UTC 2019

Modified Files:
        src/sys/kern: sysv_shm.c

Log Message:
shmctl(SHM_LOCK) does not need to mess with mappings of the shm segment,
uvm_obj_wirepages() is sufficient.  this fixes the problem reported in
https://syzkaller.appspot.com/bug?id=71f9271d761f5b6ed517a18030dc04f0135e6179


To generate a diff of this commit:
cvs rdiff -u -r1.134 -r1.135 src/sys/kern/sysv_shm.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/sysv_shm.c
diff -u src/sys/kern/sysv_shm.c:1.134 src/sys/kern/sysv_shm.c:1.135
--- src/sys/kern/sysv_shm.c:1.134	Wed Apr 10 10:03:50 2019
+++ src/sys/kern/sysv_shm.c	Mon Jun 10 00:35:47 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $	*/
+/*	$NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.134 2019/04/10 10:03:50 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.135 2019/06/10 00:35:47 chs Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_sysv.h"
@@ -249,55 +249,30 @@ shmmap_getprivate(struct proc *p)
 /*
  * Lock/unlock the memory.
  *  => must be called with shm_lock held;
- *  => called from one place, thus, inline;
  */
-static inline int
-shm_memlock(struct lwp *l, struct shmid_ds *shmseg, int shmid, int cmd)
+static int
+shm_memlock(struct shmid_ds *shmseg, int shmid, int cmd)
 {
-	struct proc *p = l->l_proc;
-	struct shmmap_entry *shmmap_se;
-	struct shmmap_state *shmmap_s;
 	size_t size;
 	int error;
 
 	KASSERT(mutex_owned(&shm_lock));
-	shmmap_s = shmmap_getprivate(p);
 
-	/* Find our shared memory address by shmid */
-	SLIST_FOREACH(shmmap_se, &shmmap_s->entries, next) {
-		if (shmmap_se->shmid != shmid)
-			continue;
-
-		size = (shmseg->shm_segsz + PGOFSET) & ~PGOFSET;
-
-		if (cmd == SHM_LOCK &&
-		    (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
-			/* Wire the object and map, then tag it */
-			error = uvm_obj_wirepages(shmseg->_shm_internal,
-			    0, size, NULL);
-			if (error)
-				return EIO;
-			error = uvm_map_pageable(&p->p_vmspace->vm_map,
-			    shmmap_se->va, shmmap_se->va + size, false, 0);
-			if (error) {
-				uvm_obj_unwirepages(shmseg->_shm_internal,
-				    0, size);
-				if (error == EFAULT)
-					error = ENOMEM;
-				return error;
-			}
-			shmseg->shm_perm.mode |= SHMSEG_WIRED;
-
-		} else if (cmd == SHM_UNLOCK &&
-		    (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
-			/* Unwire the object and map, then untag it */
-			uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
-			error = uvm_map_pageable(&p->p_vmspace->vm_map,
-			    shmmap_se->va, shmmap_se->va + size, true, 0);
-			if (error)
-				return EIO;
-			shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
-		}
+	size = round_page(shmseg->shm_segsz);
+
+	if (cmd == SHM_LOCK && (shmseg->shm_perm.mode & SHMSEG_WIRED) == 0) {
+		/* Wire the object and map, then tag it */
+		error = uvm_obj_wirepages(shmseg->_shm_internal,
+		    0, size, NULL);
+		if (error)
+			return EIO;
+		shmseg->shm_perm.mode |= SHMSEG_WIRED;
+
+	} else if (cmd == SHM_UNLOCK &&
+	    (shmseg->shm_perm.mode & SHMSEG_WIRED) != 0) {
+		/* Unwire the object, then untag it */
+		uvm_obj_unwirepages(shmseg->_shm_internal, 0, size);
+		shmseg->shm_perm.mode &= ~SHMSEG_WIRED;
 	}
 
 	return 0;
@@ -462,16 +437,6 @@ sys_shmat(struct lwp *l, const struct sy
 	    UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
 	if (error)
 		goto err_detach;
-	if (shm_use_phys || (shmseg->shm_perm.mode & SHMSEG_WIRED)) {
-		error = uvm_map_pageable(&vm->vm_map, attach_va,
-		    attach_va + size, false, 0);
-		if (error) {
-			if (error == EFAULT)
-				error = ENOMEM;
-			uvm_deallocate(&vm->vm_map, attach_va, size);
-			goto err_detach;
-		}
-	}
 
 	/* Set the new address, and update the time */
 	mutex_enter(&shm_lock);
@@ -595,7 +560,7 @@ shmctl1(struct lwp *l, int shmid, int cm
 		    (cmd == SHM_LOCK) ? KAUTH_REQ_SYSTEM_SYSVIPC_SHM_LOCK :
 		    KAUTH_REQ_SYSTEM_SYSVIPC_SHM_UNLOCK, NULL, NULL, NULL)) != 0)
 			break;
-		error = shm_memlock(l, shmseg, shmid, cmd);
+		error = shm_memlock(shmseg, shmid, cmd);
 		break;
 	default:
 		error = EINVAL;
@@ -717,7 +682,7 @@ sys_shmget(struct lwp *l, const struct s
 		mutex_exit(&shm_lock);
 		return ENOSPC;
 	}
-	size = (size + PGOFSET) & ~PGOFSET;
+	size = round_page(size);
 	if (shm_committed + btoc(size) > shminfo.shmall) {
 		mutex_exit(&shm_lock);
 		return ENOMEM;

Reply via email to