Module Name:    src
Committed By:   riastradh
Date:           Thu May 21 21:55:55 UTC 2015

Modified Files:
        src/sys/external/bsd/drm2/include/linux: ww_mutex.h
        src/sys/external/bsd/drm2/linux: linux_ww_mutex.c

Log Message:
Adapt ww_mutex to use LOCKDEBUG.

Should help track down PR 49862.


To generate a diff of this commit:
cvs rdiff -u -r1.10 -r1.11 src/sys/external/bsd/drm2/include/linux/ww_mutex.h
cvs rdiff -u -r1.1 -r1.2 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/include/linux/ww_mutex.h
diff -u src/sys/external/bsd/drm2/include/linux/ww_mutex.h:1.10 src/sys/external/bsd/drm2/include/linux/ww_mutex.h:1.11
--- src/sys/external/bsd/drm2/include/linux/ww_mutex.h:1.10	Thu Jan  8 23:35:47 2015
+++ src/sys/external/bsd/drm2/include/linux/ww_mutex.h	Thu May 21 21:55:55 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: ww_mutex.h,v 1.10 2015/01/08 23:35:47 riastradh Exp $	*/
+/*	$NetBSD: ww_mutex.h,v 1.11 2015/05/21 21:55:55 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -56,7 +56,6 @@ struct ww_acquire_ctx {
 };
 
 struct ww_mutex {
-	kmutex_t		wwm_lock;
 	enum ww_mutex_state {
 		WW_UNLOCKED,	/* nobody owns it */
 		WW_OWNED,	/* owned by a lwp without a context */
@@ -67,9 +66,17 @@ struct ww_mutex {
 		struct lwp		*owner;
 		struct ww_acquire_ctx	*ctx;
 	}			wwm_u;
+	/*
+	 * XXX wwm_lock must *not* be first, so that the ww_mutex has a
+	 * different address from the kmutex for LOCKDEBUG purposes.
+	 */
+	kmutex_t		wwm_lock;
 	struct ww_class		*wwm_class;
 	struct rb_tree		wwm_waiters;
 	kcondvar_t		wwm_cv;
+#ifdef LOCKDEBUG
+	bool			wwm_debug;
+#endif
 };
 
 /* XXX Make the nm output a little more greppable...  */

Index: src/sys/external/bsd/drm2/linux/linux_ww_mutex.c
diff -u src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.1 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.2
--- src/sys/external/bsd/drm2/linux/linux_ww_mutex.c:1.1	Thu Jan  8 23:35:47 2015
+++ src/sys/external/bsd/drm2/linux/linux_ww_mutex.c	Thu May 21 21:55:55 2015
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $	*/
+/*	$NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,17 +30,28 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.2 2015/05/21 21:55:55 riastradh Exp $");
 
 #include <sys/types.h>
 #include <sys/atomic.h>
 #include <sys/condvar.h>
+#include <sys/lockdebug.h>
 #include <sys/lwp.h>
 #include <sys/mutex.h>
 #include <sys/rbtree.h>
 
 #include <linux/ww_mutex.h>
 
+#define	WW_WANTLOCK(WW)							      \
+	LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW),			      \
+	    (uintptr_t)__builtin_return_address(0), 0)
+#define	WW_LOCKED(WW)							      \
+	LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL,			      \
+	    (uintptr_t)__builtin_return_address(0), 0)
+#define	WW_UNLOCKED(WW)							      \
+	LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW),			      \
+	    (uintptr_t)__builtin_return_address(0), 0)
+
 static int
 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
 {
@@ -109,6 +120,53 @@ ww_acquire_fini(struct ww_acquire_ctx *c
 	ctx->wwx_owner = NULL;
 }
 
+#ifdef LOCKDEBUG
+static void
+ww_dump(volatile void *cookie)
+{
+	volatile struct ww_mutex *mutex = cookie;
+
+	printf_nolog("%-13s: ", "state");
+	switch (mutex->wwm_state) {
+	case WW_UNLOCKED:
+		printf_nolog("unlocked\n");
+		break;
+	case WW_OWNED:
+		printf_nolog("owned by lwp\n");
+		printf_nolog("%-13s: %p\n", "owner", mutex->wwm_u.owner);
+		printf_nolog("%-13s: %s\n", "waiters",
+		    cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
+			? "yes" : "no");
+		break;
+	case WW_CTX:
+		printf_nolog("owned via ctx\n");
+		printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
+		printf_nolog("%-13s: %p\n", "lwp",
+		    mutex->wwm_u.ctx->wwx_owner);
+		printf_nolog("%-13s: %s\n", "waiters",
+		    cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv))
+			? "yes" : "no");
+		break;
+	case WW_WANTOWN:
+		printf_nolog("owned via ctx\n");
+		printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
+		printf_nolog("%-13s: %p\n", "lwp",
+		    mutex->wwm_u.ctx->wwx_owner);
+		printf_nolog("%-13s: %s\n", "waiters", "yes (noctx)");
+		break;
+	default:
+		printf_nolog("unknown\n");
+		break;
+	}
+}
+
+static lockops_t ww_lockops = {
+	.lo_name = "Wait/wound mutex",
+	.lo_type = LOCKOPS_SLEEP,
+	.lo_dump = ww_dump,
+};
+#endif
+
 void
 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
 {
@@ -122,12 +180,21 @@ ww_mutex_init(struct ww_mutex *mutex, st
 	mutex->wwm_class = class;
 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
 	cv_init(&mutex->wwm_cv, "linuxwwm");
+#ifdef LOCKDEBUG
+	mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
+	    (uintptr_t)__builtin_return_address(0));
+#endif
 }
 
 void
 ww_mutex_destroy(struct ww_mutex *mutex)
 {
 
+	KASSERT(mutex->wwm_state == WW_UNLOCKED);
+
+#ifdef LOCKDEBUG
+	LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
+#endif
 	cv_destroy(&mutex->wwm_cv);
 #if 0
 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
@@ -267,6 +334,7 @@ retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
 		mutex->wwm_state = WW_OWNED;
 		mutex->wwm_u.owner = curlwp;
+		WW_LOCKED(mutex);
 		break;
 	case WW_OWNED:
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -301,6 +369,7 @@ retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
 		mutex->wwm_state = WW_OWNED;
 		mutex->wwm_u.owner = curlwp;
+		WW_LOCKED(mutex);
 		break;
 	case WW_OWNED:
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -335,9 +404,15 @@ int
 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
 
+	/*
+	 * We do not WW_WANTLOCK at the beginning because we may
+	 * correctly already hold it, if we have a context, in which
+	 * case we must return EALREADY to the caller.
+	 */
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
+		WW_WANTLOCK(mutex);
 		ww_mutex_lock_noctx(mutex);
 		return 0;
 	}
@@ -355,10 +430,13 @@ ww_mutex_lock(struct ww_mutex *mutex, st
 	mutex_enter(&mutex->wwm_lock);
 retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
+		WW_WANTLOCK(mutex);
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
+		WW_LOCKED(mutex);
 		goto locked;
 	case WW_OWNED:
+		WW_WANTLOCK(mutex);
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ww_mutex_state_wait(mutex, WW_OWNED);
@@ -372,10 +450,12 @@ retry:	switch (mutex->wwm_state) {
 		panic("wait/wound mutex %p in bad state: %d",
 		    mutex, (int)mutex->wwm_state);
 	}
+
 	KASSERT(mutex->wwm_state == WW_CTX);
 	KASSERT(mutex->wwm_u.ctx != NULL);
 	KASSERT((mutex->wwm_u.ctx == ctx) ||
 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
+
 	if (mutex->wwm_u.ctx == ctx) {
 		/*
 		 * We already own it.  Yes, this can happen correctly
@@ -384,7 +464,15 @@ retry:	switch (mutex->wwm_state) {
 		 */
 		mutex_exit(&mutex->wwm_lock);
 		return -EALREADY;
-	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
+	}
+
+	/*
+	 * We do not own it.  We can safely assert to LOCKDEBUG that we
+	 * want it.
+	 */
+	WW_WANTLOCK(mutex);
+
+	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
 		/*
 		 * Owned by a higher-priority party.  Tell the caller
 		 * to unlock everything and start over.
@@ -394,14 +482,14 @@ retry:	switch (mutex->wwm_state) {
 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
 		mutex_exit(&mutex->wwm_lock);
 		return -EDEADLK;
-	} else {
-		/*
-		 * Owned by a lower-priority party.  Ask that party to
-		 * wake us when it is done or it realizes it needs to
-		 * back off.
-		 */
-		ww_mutex_lock_wait(mutex, ctx);
 	}
+
+	/*
+	 * Owned by a lower-priority party.  Ask that party to wake us
+	 * when it is done or it realizes it needs to back off.
+	 */
+	ww_mutex_lock_wait(mutex, ctx);
+
 locked:	ctx->wwx_acquired++;
 	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
@@ -415,10 +503,17 @@ ww_mutex_lock_interruptible(struct ww_mu
 {
 	int ret;
 
+	/*
+	 * We do not WW_WANTLOCK at the beginning because we may
+	 * correctly already hold it, if we have a context, in which
+	 * case we must return EALREADY to the caller.
+	 */
 	ASSERT_SLEEPABLE();
 
-	if (ctx == NULL)
+	if (ctx == NULL) {
+		WW_WANTLOCK(mutex);
 		return ww_mutex_lock_noctx_sig(mutex);
+	}
 
 	KASSERTMSG((ctx->wwx_owner == curlwp),
 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
@@ -433,10 +528,13 @@ ww_mutex_lock_interruptible(struct ww_mu
 	mutex_enter(&mutex->wwm_lock);
 retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
+		WW_WANTLOCK(mutex);
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
+		WW_LOCKED(mutex);
 		goto locked;
 	case WW_OWNED:
+		WW_WANTLOCK(mutex);
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
 		    "locking %p against myself: %p", mutex, curlwp);
 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
@@ -454,10 +552,12 @@ retry:	switch (mutex->wwm_state) {
 		panic("wait/wound mutex %p in bad state: %d",
 		    mutex, (int)mutex->wwm_state);
 	}
+
 	KASSERT(mutex->wwm_state == WW_CTX);
 	KASSERT(mutex->wwm_u.ctx != NULL);
 	KASSERT((mutex->wwm_u.ctx == ctx) ||
 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
+
 	if (mutex->wwm_u.ctx == ctx) {
 		/*
 		 * We already own it.  Yes, this can happen correctly
@@ -466,7 +566,15 @@ retry:	switch (mutex->wwm_state) {
 		 */
 		mutex_exit(&mutex->wwm_lock);
 		return -EALREADY;
-	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
+	}
+
+	/*
+	 * We do not own it.  We can safely assert to LOCKDEBUG that we
+	 * want it.
+	 */
+	WW_WANTLOCK(mutex);
+
+	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
 		/*
 		 * Owned by a higher-priority party.  Tell the caller
 		 * to unlock everything and start over.
@@ -476,16 +584,16 @@ retry:	switch (mutex->wwm_state) {
 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
 		mutex_exit(&mutex->wwm_lock);
 		return -EDEADLK;
-	} else {
-		/*
-		 * Owned by a lower-priority party.  Ask that party to
-		 * wake us when it is done or it realizes it needs to
-		 * back off.
-		 */
-		ret = ww_mutex_lock_wait_sig(mutex, ctx);
-		if (ret)
-			goto out;
 	}
+
+	/*
+	 * Owned by a lower-priority party.  Ask that party to wake us
+	 * when it is done or it realizes it needs to back off.
+	 */
+	ret = ww_mutex_lock_wait_sig(mutex, ctx);
+	if (ret)
+		goto out;
+
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
@@ -499,6 +607,8 @@ void
 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
 {
 
+	/* Caller must not try to lock against self here.  */
+	WW_WANTLOCK(mutex);
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL) {
@@ -524,6 +634,7 @@ retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
+		WW_LOCKED(mutex);
 		goto locked;
 	case WW_OWNED:
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -539,15 +650,18 @@ retry:	switch (mutex->wwm_state) {
 		panic("wait/wound mutex %p in bad state: %d",
 		    mutex, (int)mutex->wwm_state);
 	}
+
 	KASSERT(mutex->wwm_state == WW_CTX);
 	KASSERT(mutex->wwm_u.ctx != NULL);
 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
 	    "locking %p against myself: %p", mutex, curlwp);
+
 	/*
 	 * Owned by another party, of any priority.  Ask that party to
 	 * wake us when it's done.
 	 */
 	ww_mutex_lock_wait(mutex, ctx);
+
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
@@ -561,6 +675,7 @@ ww_mutex_lock_slow_interruptible(struct 
 {
 	int ret;
 
+	WW_WANTLOCK(mutex);
 	ASSERT_SLEEPABLE();
 
 	if (ctx == NULL)
@@ -584,6 +699,7 @@ retry:	switch (mutex->wwm_state) {
 	case WW_UNLOCKED:
 		mutex->wwm_state = WW_CTX;
 		mutex->wwm_u.ctx = ctx;
+		WW_LOCKED(mutex);
 		goto locked;
 	case WW_OWNED:
 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
@@ -603,10 +719,12 @@ retry:	switch (mutex->wwm_state) {
 		panic("wait/wound mutex %p in bad state: %d",
 		    mutex, (int)mutex->wwm_state);
 	}
+
 	KASSERT(mutex->wwm_state == WW_CTX);
 	KASSERT(mutex->wwm_u.ctx != NULL);
 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
 	    "locking %p against myself: %p", mutex, curlwp);
+
 	/*
 	 * Owned by another party, of any priority.  Ask that party to
 	 * wake us when it's done.
@@ -614,6 +732,7 @@ retry:	switch (mutex->wwm_state) {
 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
 	if (ret)
 		goto out;
+
 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
 	    (mutex->wwm_state == WW_WANTOWN));
 	KASSERT(mutex->wwm_u.ctx == ctx);
@@ -632,6 +751,8 @@ ww_mutex_trylock(struct ww_mutex *mutex)
 	if (mutex->wwm_state == WW_UNLOCKED) {
 		mutex->wwm_state = WW_OWNED;
 		mutex->wwm_u.owner = curlwp;
+		WW_WANTLOCK(mutex);
+		WW_LOCKED(mutex);
 		ret = 1;
 	} else {
 		KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
@@ -702,6 +823,7 @@ ww_mutex_unlock(struct ww_mutex *mutex)
 		mutex->wwm_state = WW_UNLOCKED;
 		break;
 	}
+	WW_UNLOCKED(mutex);
 	cv_broadcast(&mutex->wwm_cv);
 	mutex_exit(&mutex->wwm_lock);
 }

Reply via email to