Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 00:58:22 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/include/linux: srcu.h
        src/sys/external/bsd/drm2/linux: files.drmkms_linux
Added Files:
        src/sys/external/bsd/drm2/linux: linux_srcu.c

Log Message:
Draft SRCU implementation.


To generate a diff of this commit:
cvs rdiff -u -r1.1 -r1.2 src/sys/external/bsd/drm2/include/linux/srcu.h
cvs rdiff -u -r1.18 -r1.19 src/sys/external/bsd/drm2/linux/files.drmkms_linux
cvs rdiff -u -r0 -r1.1 src/sys/external/bsd/drm2/linux/linux_srcu.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/include/linux/srcu.h
diff -u src/sys/external/bsd/drm2/include/linux/srcu.h:1.1 src/sys/external/bsd/drm2/include/linux/srcu.h:1.2
--- src/sys/external/bsd/drm2/include/linux/srcu.h:1.1	Sun Dec 19 00:28:30 2021
+++ src/sys/external/bsd/drm2/include/linux/srcu.h	Sun Dec 19 00:58:22 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: srcu.h,v 1.1 2021/12/19 00:28:30 riastradh Exp $	*/
+/*	$NetBSD: srcu.h,v 1.2 2021/12/19 00:58:22 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -32,4 +32,28 @@
 #ifndef	_LINUX_SRCU_H_
 #define	_LINUX_SRCU_H_
 
+#include <sys/types.h>
+#include <sys/condvar.h>
+#include <sys/mutex.h>
+
+struct lwp;
+struct percpu;
+
+struct srcu {
+	struct percpu		*srcu_percpu;	/* struct srcu_cpu */
+	kmutex_t		srcu_lock;
+	kcondvar_t		srcu_cv;
+	struct lwp		*srcu_sync;
+	int64_t			srcu_total;
+	volatile unsigned	srcu_gen;
+};
+
+void	srcu_init(struct srcu *, const char *);
+void	srcu_fini(struct srcu *);
+
+int	srcu_read_lock(struct srcu *);
+void	srcu_read_unlock(struct srcu *, int);
+
+void	synchronize_srcu(struct srcu *);
+
 #endif	/* _LINUX_SRCU_H_ */

Index: src/sys/external/bsd/drm2/linux/files.drmkms_linux
diff -u src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.18 src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.19
--- src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.18	Sun Dec 19 00:27:09 2021
+++ src/sys/external/bsd/drm2/linux/files.drmkms_linux	Sun Dec 19 00:58:22 2021
@@ -1,4 +1,4 @@
-#       $NetBSD: files.drmkms_linux,v 1.18 2021/12/19 00:27:09 riastradh Exp $
+#       $NetBSD: files.drmkms_linux,v 1.19 2021/12/19 00:58:22 riastradh Exp $
 
 define	drmkms_linux: i2cexec, i2c_bitbang
 
@@ -17,5 +17,6 @@ file	external/bsd/drm2/linux/linux_modul
 file	external/bsd/drm2/linux/linux_pci.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_rcu.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_reservation.c	drmkms_linux
+file	external/bsd/drm2/linux/linux_srcu.c		drmkms_linux
 file	external/bsd/drm2/linux/linux_writecomb.c	drmkms_linux
 file	external/bsd/drm2/linux/linux_ww_mutex.c	drmkms_linux

Added files:

Index: src/sys/external/bsd/drm2/linux/linux_srcu.c
diff -u /dev/null src/sys/external/bsd/drm2/linux/linux_srcu.c:1.1
--- /dev/null	Sun Dec 19 00:58:22 2021
+++ src/sys/external/bsd/drm2/linux/linux_srcu.c	Sun Dec 19 00:58:22 2021
@@ -0,0 +1,307 @@
+/*	$NetBSD: linux_srcu.c,v 1.1 2021/12/19 00:58:22 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: linux_srcu.c,v 1.1 2021/12/19 00:58:22 riastradh Exp $");
+
+/*
+ * SRCU: Sleepable RCU
+ *
+ *	(This is not exactly SRCU as Linux implements it; it is my
+ *	approximation of the semantics I think we need.)
+ *
+ *	For each srcu context, representing a related set of read
+ *	sections, on each CPU we store two counts of numbers of
+ *	readers in two epochs: active readers and draining readers.
+ *
+ *	All new srcu read sections get counted in the active epoch.
+ *	When there's no synchronize_srcu in progress, the draining
+ *	epoch has zero readers.  When a thread calls synchronize_srcu,
+ *	which must be serialized by the caller, it it swaps the sense
+ *	of the epochs, issues an xcall to collect a global count of the
+ *	number of readers in the now-draining epoch, and waits for the
+ *	remainder to complete.
+ *
+ *	This is basically NetBSD localcount(9), but without the
+ *	restriction that the caller of localcount_drain must guarantee
+ *	no new readers -- srcu uses two counts per CPU instead of one
+ *	like localcount(9), and synchronize_srcu just waits for all
+ *	existing readers to drain while new oness count toward a new
+ *	epoch.
+ */
+
+#include <sys/types.h>
+#include <sys/condvar.h>
+#include <sys/mutex.h>
+#include <sys/percpu.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/xcall.h>
+
+#include <linux/srcu.h>
+
+struct srcu_cpu {
+	int64_t	src_count[2];
+};
+
+/*
+ * srcu_init(srcu, name)
+ *
+ *	Initialize the srcu state with the specified name.  Caller must
+ *	call srcu_fini when done.
+ *
+ *	name should be no longer than 8 characters; longer will be
+ *	truncated.
+ *
+ *	May sleep.
+ */
+void
+srcu_init(struct srcu *srcu, const char *name)
+{
+
+	ASSERT_SLEEPABLE();
+
+	srcu->srcu_percpu = percpu_alloc(sizeof(struct srcu_cpu));
+	mutex_init(&srcu->srcu_lock, MUTEX_DEFAULT, IPL_VM);
+	cv_init(&srcu->srcu_cv, name);
+	srcu->srcu_sync = NULL;
+	srcu->srcu_total = 0;
+	srcu->srcu_gen = 0;
+}
+
+/*
+ * srcu_fini(srcu)
+ *
+ *	Finalize an srcu state, which must not be in use right now.  If
+ *	any srcu read sections might be active, caller must wait for
+ *	them to complete with synchronize_srcu.
+ *
+ *	May sleep.
+ */
+void
+srcu_fini(struct srcu *srcu)
+{
+
+	ASSERT_SLEEPABLE();
+
+	KASSERTMSG((srcu->srcu_sync == NULL),
+	    "srcu_fini in lwp %p while synchronize_srcu running in lwp %p",
+	    curlwp, srcu->srcu_sync);
+	cv_destroy(&srcu->srcu_cv);
+	mutex_destroy(&srcu->srcu_lock);
+	percpu_free(srcu->srcu_percpu, sizeof(struct srcu_cpu));
+}
+
+/*
+ * srcu_adjust(srcu, gen, delta)
+ *
+ *	Internal subroutine: Add delta to the local CPU's count of
+ *	readers in the generation gen.
+ *
+ *	Never sleeps.
+ */
+static void
+srcu_adjust(struct srcu *srcu, unsigned gen, int delta)
+{
+	struct srcu_cpu *cpu;
+	unsigned epoch = gen & 1; /* active epoch */
+
+	cpu = percpu_getref(srcu->srcu_percpu);
+	cpu->src_count[epoch] += delta;
+	percpu_putref(srcu->srcu_percpu);
+}
+
+/*
+ * srcu_read_lock(srcu)
+ *
+ *	Enter an srcu read section and return a ticket for it.  Any
+ *	subsequent synchronize_srcu will wait until this thread calls
+ *	srcu_read_unlock(srcu, ticket).
+ *
+ *	Never sleeps.
+ */
+int
+srcu_read_lock(struct srcu *srcu)
+{
+	unsigned gen;
+
+	/*
+	 * Prevent xcall while we fetch the generation and adjust the
+	 * count.
+	 */
+	kpreempt_disable();
+	gen = srcu->srcu_gen;
+	/* Fetch the generation once before incrementing the count.  */
+	__insn_barrier();
+	srcu_adjust(srcu, gen, +1);
+	kpreempt_enable();
+
+	/*
+	 * Increment the count in our generation before doing anything
+	 * else on this CPU.
+	 *
+	 * No stronger, inter-CPU memory barrier is needed: if there is
+	 * a concurrent synchronize_srcu, it will issue an xcall that
+	 * functions as a stronger memory barrier.
+	 */
+	__insn_barrier();
+
+	return gen;
+}
+
+/*
+ * srcu_read_unlock(srcu, ticket)
+ *
+ *	Exit an srcu read section started with srcu_read_lock returning
+ *	ticket.  If there is a pending synchronize_srcu and we might be
+ *	the last reader, notify it.
+ *
+ *	Never sleeps.
+ */
+void
+srcu_read_unlock(struct srcu *srcu, int ticket)
+{
+	unsigned gen = ticket;
+
+	/*
+	 * Make sure all side effects have completed on this CPU before
+	 * decrementing the count.
+	 *
+	 * No stronger, inter-CPU memory barrier is needed: if there is
+	 * a concurrent synchronize_srcu, it will issue an xcall that
+	 * functions as a stronger memory barrier.
+	 */
+	__insn_barrier();
+
+	/*
+	 * Prevent xcall while we determine whether we need to notify a
+	 * sync and decrement the count in our generation.
+	 */
+	kpreempt_disable();
+	if (__predict_true(gen == srcu->srcu_gen)) {
+		/*
+		 * Fast path: just decrement the local count.  If a
+		 * sync has begun and incremented gen after we observed
+		 * it, it will issue an xcall that will run after this
+		 * kpreempt_disable section to collect our local count.
+		 */
+		srcu_adjust(srcu, gen, -1);
+	} else {
+		/*
+		 * Slow path: decrement the total count, and if it goes
+		 * to zero, notify the sync in progress.  The xcall may
+		 * have already run, or it may have yet to run; since
+		 * we can't tell which, we must contribute to the
+		 * global count, not to our local count.
+		 */
+		mutex_spin_enter(&srcu->srcu_lock);
+		KASSERT(srcu->srcu_sync != NULL);
+		if (--srcu->srcu_total == 0)
+			cv_broadcast(&srcu->srcu_cv);
+		mutex_spin_exit(&srcu->srcu_lock);
+	}
+	kpreempt_enable();
+}
+
+/*
+ * synchronize_srcu_xc(a, b)
+ *
+ *	Cross-call function for synchronize_srcu: a is the struct srcu
+ *	pointer; b is ignored.  Transfer the local count of srcu
+ *	readers on this CPU in the inactive epoch to the global count
+ *	under the srcu sync lock.
+ */
+static void
+synchronize_srcu_xc(void *a, void *b)
+{
+	struct srcu *srcu = a;
+	struct srcu_cpu *cpu;
+	unsigned gen, epoch;
+	uint64_t local;
+
+	/* Operate under the sync lock.  Blocks preemption as side effect.  */
+	mutex_spin_enter(&srcu->srcu_lock);
+
+	gen = srcu->srcu_gen;	/* active generation */
+	epoch = 1 ^ (gen & 1);	/* draining epoch */
+
+	/* Transfer the local count to the global count.  */
+	cpu = percpu_getref(srcu->srcu_percpu);
+	local = cpu->src_count[epoch];
+	srcu->srcu_total += local;
+	cpu->src_count[epoch] -= local; /* i.e., cpu->src_count[epoch] = 0 */
+	KASSERT(cpu->src_count[epoch] == 0);
+	percpu_putref(srcu->srcu_percpu);
+
+	mutex_spin_exit(&srcu->srcu_lock);
+}
+
+/*
+ * synchronize_srcu(srcu)
+ *
+ *	Wait for all srcu readers on all CPUs that may have begun
+ *	before sychronize_srcu to complete.
+ *
+ *	May sleep.  (Practically guaranteed to sleep!)
+ */
+void
+synchronize_srcu(struct srcu *srcu)
+{
+
+	ASSERT_SLEEPABLE();
+
+	/* Start a sync, and advance the active generation.  */
+	mutex_spin_enter(&srcu->srcu_lock);
+	while (srcu->srcu_sync != NULL)
+		cv_wait(&srcu->srcu_cv, &srcu->srcu_lock);
+	KASSERT(srcu->srcu_total == 0);
+	srcu->srcu_sync = curlwp;
+	srcu->srcu_gen++;
+	mutex_spin_exit(&srcu->srcu_lock);
+
+	/*
+	 * Wait for all CPUs to witness the change to the active
+	 * generation, and collect their local counts in the draining
+	 * epoch into the global count.
+	 */
+	xc_wait(xc_broadcast(0, synchronize_srcu_xc, srcu, NULL));
+
+	/*
+	 * Wait for the global count of users in the draining epoch to
+	 * drain to zero.
+	 */
+	mutex_spin_enter(&srcu->srcu_lock);
+	while (srcu->srcu_total != 0)
+		cv_wait(&srcu->srcu_cv, &srcu->srcu_lock);
+	srcu->srcu_sync = NULL;
+	cv_broadcast(&srcu->srcu_cv);
+	mutex_spin_exit(&srcu->srcu_lock);
+}

Reply via email to