Module Name:    src
Committed By:   riastradh
Date:           Mon Aug 26 15:19:22 UTC 2024

Modified Files:
        src/lib/libc/gen: arc4random.c

Log Message:
arc4random(3): Reseed if system entropy epoch changes.

This can happen, for example, if the system is a VM instance, and the
VM is cloned.

This incurs the cost of a system call on every arc4random call, which
is unfortunate, but

1. we don't currently have a (machine-independent) mechanism for
   exposing a read-only page to userland shared by the kernel to
   enable a cheaper access path to the entropy epoch; and

2. the algorithm here -- a simple application of ChaCha -- is likely
   also a bottleneck and could be much cheaper by

   (a) using sys/crypto/chacha for machine-dependent vectorized
       ChaCha code, and

   (b) filling a buffer (somewhere between a cipher block and a page)
       in a batch at a time, instead of running ChaCha to generate
       only 32 bytes at a time.

So although this might be a performance hit, the security benefit is
worthwhile and we have a clear path to do better than reversing the
performance hit later.

PR kern/58632: getentropy(2) and arc4random(3) do not reseed on VM
fork


To generate a diff of this commit:
cvs rdiff -u -r1.34 -r1.35 src/lib/libc/gen/arc4random.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libc/gen/arc4random.c
diff -u src/lib/libc/gen/arc4random.c:1.34 src/lib/libc/gen/arc4random.c:1.35
--- src/lib/libc/gen/arc4random.c:1.34	Sat Jan 20 14:52:47 2024
+++ src/lib/libc/gen/arc4random.c	Mon Aug 26 15:19:22 2024
@@ -1,4 +1,4 @@
-/*	$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $	*/
+/*	$NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -52,7 +52,7 @@
  */
 
 #include <sys/cdefs.h>
-__RCSID("$NetBSD: arc4random.c,v 1.34 2024/01/20 14:52:47 christos Exp $");
+__RCSID("$NetBSD: arc4random.c,v 1.35 2024/08/26 15:19:22 riastradh Exp $");
 
 #include "namespace.h"
 #include "reentrant.h"
@@ -65,6 +65,7 @@ __RCSID("$NetBSD: arc4random.c,v 1.34 20
 
 #include <assert.h>
 #include <sha2.h>
+#include <stdatomic.h>
 #include <stdbool.h>
 #include <stdint.h>
 #include <stdlib.h>
@@ -398,11 +399,67 @@ crypto_onetimestream(const void *seed, v
 		(void)explicit_memset(block, 0, sizeof block);
 }
 
+/*
+ * entropy_epoch()
+ *
+ *	Return the current entropy epoch, from the sysctl node
+ *	kern.entropy.epoch.
+ *
+ *	The entropy epoch is never zero.  Initially, or on error, it is
+ *	(unsigned)-1.  It may wrap around but it skips (unsigned)-1 and
+ *	0 when it does.  Changes happen less than once per second, so
+ *	wraparound will only affect systems after 136 years of uptime.
+ *
+ *	XXX This should get it from a page shared read-only by kernel
+ *	with userland, but until we implement such a mechanism, this
+ *	sysctl -- incurring the cost of a syscall -- will have to
+ *	serve.
+ */
+static unsigned
+entropy_epoch(void)
+{
+	static atomic_int mib0[3];
+	static atomic_bool initialized = false;
+	int mib[3];
+	unsigned epoch = -1;
+	size_t epochlen = sizeof(epoch);
+
+	/*
+	 * Resolve kern.entropy.epoch if we haven't already.  Cache it
+	 * for the next caller.  Initialization is idempotent, so it's
+	 * OK if two threads do it at once.
+	 */
+	if (atomic_load_explicit(&initialized, memory_order_acquire)) {
+		mib[0] = atomic_load_explicit(&mib0[0], memory_order_relaxed);
+		mib[1] = atomic_load_explicit(&mib0[1], memory_order_relaxed);
+		mib[2] = atomic_load_explicit(&mib0[2], memory_order_relaxed);
+	} else {
+		size_t nmib = __arraycount(mib);
+
+		if (sysctlnametomib("kern.entropy.epoch", mib, &nmib) == -1)
+			return -1;
+		if (nmib != __arraycount(mib))
+			return -1;
+		atomic_store_explicit(&mib0[0], mib[0], memory_order_relaxed);
+		atomic_store_explicit(&mib0[1], mib[1], memory_order_relaxed);
+		atomic_store_explicit(&mib0[2], mib[2], memory_order_relaxed);
+		atomic_store_explicit(&initialized, true,
+		    memory_order_release);
+	}
+
+	if (sysctl(mib, __arraycount(mib), &epoch, &epochlen, NULL, 0) == -1)
+		return -1;
+	if (epochlen != sizeof(epoch))
+		return -1;
+
+	return epoch;
+}
+
 /* arc4random state: per-thread, per-process (zeroed in child on fork) */
 
 struct arc4random_prng {
 	struct crypto_prng	arc4_prng;
-	bool			arc4_seeded;
+	unsigned		arc4_epoch;
 };
 
 static void
@@ -413,6 +470,7 @@ arc4random_prng_addrandom(struct arc4ran
 	SHA256_CTX ctx;
 	uint8_t buf[crypto_prng_SEEDBYTES];
 	size_t buflen = sizeof buf;
+	unsigned epoch = entropy_epoch();
 
 	__CTASSERT(sizeof buf == SHA256_DIGEST_LENGTH);
 
@@ -436,7 +494,7 @@ arc4random_prng_addrandom(struct arc4ran
 	/* reseed(SHA256(prng() || sysctl(KERN_ARND) || data)) */
 	crypto_prng_seed(&prng->arc4_prng, buf);
 	(void)explicit_memset(buf, 0, sizeof buf);
-	prng->arc4_seeded = true;
+	prng->arc4_epoch = epoch;
 }
 
 #ifdef _REENTRANT
@@ -567,7 +625,7 @@ arc4random_prng_get(void)
 	}
 
 	/* Guarantee the PRNG is seeded.  */
-	if (__predict_false(!prng->arc4_seeded))
+	if (__predict_false(prng->arc4_epoch != entropy_epoch()))
 		arc4random_prng_addrandom(prng, NULL, 0);
 
 	return prng;

Reply via email to