commit:     4c9bb1563e46363720d3778468b068a8509a2f36
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon May 30 13:57:08 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon May 30 13:57:08 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4c9bb156

Linux patch 5.18.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1000_linux-5.18.1.patch | 2933 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2937 insertions(+)

diff --git a/0000_README b/0000_README
index 298c5715..62ab5b31 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-5.18.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.18.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-5.18.1.patch b/1000_linux-5.18.1.patch
new file mode 100644
index 00000000..679abefd
--- /dev/null
+++ b/1000_linux-5.18.1.patch
@@ -0,0 +1,2933 @@
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst 
b/Documentation/admin-guide/sysctl/kernel.rst
+index 1144ea3229a37..e9c18dabc5523 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -994,6 +994,9 @@ This is a directory, with the following entries:
+ * ``boot_id``: a UUID generated the first time this is retrieved, and
+   unvarying after that;
+ 
++* ``uuid``: a UUID generated every time this is retrieved (this can
++  thus be used to generate UUIDs at will);
++
+ * ``entropy_avail``: the pool's entropy count, in bits;
+ 
+ * ``poolsize``: the entropy pool size, in bits;
+@@ -1001,10 +1004,7 @@ This is a directory, with the following entries:
+ * ``urandom_min_reseed_secs``: obsolete (used to determine the minimum
+   number of seconds between urandom pool reseeding). This file is
+   writable for compatibility purposes, but writing to it has no effect
+-  on any RNG behavior.
+-
+-* ``uuid``: a UUID generated every time this is retrieved (this can
+-  thus be used to generate UUIDs at will);
++  on any RNG behavior;
+ 
+ * ``write_wakeup_threshold``: when the entropy count drops below this
+   (as a number of bits), processes waiting to write to ``/dev/random``
+diff --git a/Makefile b/Makefile
+index 7d5b0bfe79602..2bb168acb8f43 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 18
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h
+index b565cc6f408e9..f89798da8a147 100644
+--- a/arch/alpha/include/asm/timex.h
++++ b/arch/alpha/include/asm/timex.h
+@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
+       __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
+       return ret;
+ }
++#define get_cycles get_cycles
+ 
+ #endif
+diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
+index 7c3b3671d6c25..6d1337c169cd3 100644
+--- a/arch/arm/include/asm/timex.h
++++ b/arch/arm/include/asm/timex.h
+@@ -11,5 +11,6 @@
+ 
+ typedef unsigned long cycles_t;
+ #define get_cycles()  ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
++#define random_get_entropy() (((unsigned long)get_cycles()) ?: 
random_get_entropy_fallback())
+ 
+ #endif
+diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
+index 869a3ac6bf23a..7ccc077a60bed 100644
+--- a/arch/ia64/include/asm/timex.h
++++ b/arch/ia64/include/asm/timex.h
+@@ -39,6 +39,7 @@ get_cycles (void)
+       ret = ia64_getreg(_IA64_REG_AR_ITC);
+       return ret;
+ }
++#define get_cycles get_cycles
+ 
+ extern void ia64_cpu_local_tick (void);
+ extern unsigned long long ia64_native_sched_clock (void);
+diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h
+index 6a21d93582805..f4a7a340f4cae 100644
+--- a/arch/m68k/include/asm/timex.h
++++ b/arch/m68k/include/asm/timex.h
+@@ -35,7 +35,7 @@ static inline unsigned long random_get_entropy(void)
+ {
+       if (mach_random_get_entropy)
+               return mach_random_get_entropy();
+-      return 0;
++      return random_get_entropy_fallback();
+ }
+ #define random_get_entropy    random_get_entropy
+ 
+diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
+index 8026baf46e729..2e107886f97ac 100644
+--- a/arch/mips/include/asm/timex.h
++++ b/arch/mips/include/asm/timex.h
+@@ -76,25 +76,24 @@ static inline cycles_t get_cycles(void)
+       else
+               return 0;       /* no usable counter */
+ }
++#define get_cycles get_cycles
+ 
+ /*
+  * Like get_cycles - but where c0_count is not available we desperately
+  * use c0_random in an attempt to get at least a little bit of entropy.
+- *
+- * R6000 and R6000A neither have a count register nor a random register.
+- * That leaves no entropy source in the CPU itself.
+  */
+ static inline unsigned long random_get_entropy(void)
+ {
+-      unsigned int prid = read_c0_prid();
+-      unsigned int imp = prid & PRID_IMP_MASK;
++      unsigned int c0_random;
+ 
+-      if (can_use_mips_counter(prid))
++      if (can_use_mips_counter(read_c0_prid()))
+               return read_c0_count();
+-      else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
+-              return read_c0_random();
++
++      if (cpu_has_3kex)
++              c0_random = (read_c0_random() >> 8) & 0x3f;
+       else
+-              return 0;       /* no usable register */
++              c0_random = read_c0_random() & 0x3f;
++      return (random_get_entropy_fallback() << 6) | (0x3f - c0_random);
+ }
+ #define random_get_entropy random_get_entropy
+ 
+diff --git a/arch/nios2/include/asm/timex.h b/arch/nios2/include/asm/timex.h
+index a769f871b28d9..40a1adc9bd03e 100644
+--- a/arch/nios2/include/asm/timex.h
++++ b/arch/nios2/include/asm/timex.h
+@@ -8,5 +8,8 @@
+ typedef unsigned long cycles_t;
+ 
+ extern cycles_t get_cycles(void);
++#define get_cycles get_cycles
++
++#define random_get_entropy() (((unsigned long)get_cycles()) ?: 
random_get_entropy_fallback())
+ 
+ #endif
+diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h
+index 06b510f8172e3..b4622cb06a75e 100644
+--- a/arch/parisc/include/asm/timex.h
++++ b/arch/parisc/include/asm/timex.h
+@@ -13,9 +13,10 @@
+ 
+ typedef unsigned long cycles_t;
+ 
+-static inline cycles_t get_cycles (void)
++static inline cycles_t get_cycles(void)
+ {
+       return mfctl(16);
+ }
++#define get_cycles get_cycles
+ 
+ #endif
+diff --git a/arch/powerpc/include/asm/timex.h 
b/arch/powerpc/include/asm/timex.h
+index fa2e76e4093a3..14b4489de52c5 100644
+--- a/arch/powerpc/include/asm/timex.h
++++ b/arch/powerpc/include/asm/timex.h
+@@ -19,6 +19,7 @@ static inline cycles_t get_cycles(void)
+ {
+       return mftb();
+ }
++#define get_cycles get_cycles
+ 
+ #endif        /* __KERNEL__ */
+ #endif        /* _ASM_POWERPC_TIMEX_H */
+diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
+index 507cae273bc62..d6a7428f6248d 100644
+--- a/arch/riscv/include/asm/timex.h
++++ b/arch/riscv/include/asm/timex.h
+@@ -41,7 +41,7 @@ static inline u32 get_cycles_hi(void)
+ static inline unsigned long random_get_entropy(void)
+ {
+       if (unlikely(clint_time_val == NULL))
+-              return 0;
++              return random_get_entropy_fallback();
+       return get_cycles();
+ }
+ #define random_get_entropy()  random_get_entropy()
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index 2cfce42aa7fc4..ce878e85b6e4e 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -197,6 +197,7 @@ static inline cycles_t get_cycles(void)
+ {
+       return (cycles_t) get_tod_clock() >> 2;
+ }
++#define get_cycles get_cycles
+ 
+ int get_phys_clock(unsigned long *clock);
+ void init_cpu_timer(void);
+diff --git a/arch/sparc/include/asm/timex_32.h 
b/arch/sparc/include/asm/timex_32.h
+index 542915b462097..f86326a6f89e0 100644
+--- a/arch/sparc/include/asm/timex_32.h
++++ b/arch/sparc/include/asm/timex_32.h
+@@ -9,8 +9,6 @@
+ 
+ #define CLOCK_TICK_RATE       1193180 /* Underlying HZ */
+ 
+-/* XXX Maybe do something better at some point... -DaveM */
+-typedef unsigned long cycles_t;
+-#define get_cycles()  (0)
++#include <asm-generic/timex.h>
+ 
+ #endif
+diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h
+index e392a9a5bc9bd..9f27176adb26d 100644
+--- a/arch/um/include/asm/timex.h
++++ b/arch/um/include/asm/timex.h
+@@ -2,13 +2,8 @@
+ #ifndef __UM_TIMEX_H
+ #define __UM_TIMEX_H
+ 
+-typedef unsigned long cycles_t;
+-
+-static inline cycles_t get_cycles (void)
+-{
+-      return 0;
+-}
+-
+ #define CLOCK_TICK_RATE (HZ)
+ 
++#include <asm-generic/timex.h>
++
+ #endif
+diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
+index a4a8b1b16c0c1..956e4145311b1 100644
+--- a/arch/x86/include/asm/timex.h
++++ b/arch/x86/include/asm/timex.h
+@@ -5,6 +5,15 @@
+ #include <asm/processor.h>
+ #include <asm/tsc.h>
+ 
++static inline unsigned long random_get_entropy(void)
++{
++      if (!IS_ENABLED(CONFIG_X86_TSC) &&
++          !cpu_feature_enabled(X86_FEATURE_TSC))
++              return random_get_entropy_fallback();
++      return rdtsc();
++}
++#define random_get_entropy random_get_entropy
++
+ /* Assume we use the PIT time source for the clock tick */
+ #define CLOCK_TICK_RATE               PIT_TICK_RATE
+ 
+diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
+index 01a300a9700b9..fbdc3d9514943 100644
+--- a/arch/x86/include/asm/tsc.h
++++ b/arch/x86/include/asm/tsc.h
+@@ -20,13 +20,12 @@ extern void disable_TSC(void);
+ 
+ static inline cycles_t get_cycles(void)
+ {
+-#ifndef CONFIG_X86_TSC
+-      if (!boot_cpu_has(X86_FEATURE_TSC))
++      if (!IS_ENABLED(CONFIG_X86_TSC) &&
++          !cpu_feature_enabled(X86_FEATURE_TSC))
+               return 0;
+-#endif
+-
+       return rdtsc();
+ }
++#define get_cycles get_cycles
+ 
+ extern struct system_counterval_t convert_art_to_tsc(u64 art);
+ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
+diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
+index 233ec75e60c69..3f2462f2d0270 100644
+--- a/arch/xtensa/include/asm/timex.h
++++ b/arch/xtensa/include/asm/timex.h
+@@ -29,10 +29,6 @@
+ 
+ extern unsigned long ccount_freq;
+ 
+-typedef unsigned long long cycles_t;
+-
+-#define get_cycles()  (0)
+-
+ void local_timer_setup(unsigned cpu);
+ 
+ /*
+@@ -59,4 +55,6 @@ static inline void set_linux_timer (unsigned long ccompare)
+       xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
+ }
+ 
++#include <asm-generic/timex.h>
++
+ #endif        /* _XTENSA_TIMEX_H */
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index a4b638bea6f16..cc2fe0618178e 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -415,19 +415,30 @@ static ssize_t acpi_data_show(struct file *filp, struct 
kobject *kobj,
+                             loff_t offset, size_t count)
+ {
+       struct acpi_data_attr *data_attr;
+-      void *base;
+-      ssize_t rc;
++      void __iomem *base;
++      ssize_t size;
+ 
+       data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
++      size = data_attr->attr.size;
++
++      if (offset < 0)
++              return -EINVAL;
++
++      if (offset >= size)
++              return 0;
+ 
+-      base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
++      if (count > size - offset)
++              count = size - offset;
++
++      base = acpi_os_map_iomem(data_attr->addr, size);
+       if (!base)
+               return -ENOMEM;
+-      rc = memory_read_from_buffer(buf, count, &offset, base,
+-                                   data_attr->attr.size);
+-      acpi_os_unmap_memory(base, data_attr->attr.size);
+ 
+-      return rc;
++      memcpy_fromio(buf, base + offset, count);
++
++      acpi_os_unmap_iomem(base, size);
++
++      return count;
+ }
+ 
+ static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 4c9adb4f3d5d7..7a66eec08e373 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -15,14 +15,12 @@
+  *   - Sysctl interface.
+  *
+  * The high level overview is that there is one input pool, into which
+- * various pieces of data are hashed. Some of that data is then "credited" as
+- * having a certain number of bits of entropy. When enough bits of entropy are
+- * available, the hash is finalized and handed as a key to a stream cipher 
that
+- * expands it indefinitely for various consumers. This key is periodically
+- * refreshed as the various entropy collectors, described below, add data to 
the
+- * input pool and credit it. There is currently no Fortuna-like scheduler
+- * involved, which can lead to malicious entropy sources causing a premature
+- * reseed, and the entropy estimates are, at best, conservative guesses.
++ * various pieces of data are hashed. Prior to initialization, some of that
++ * data is then "credited" as having a certain number of bits of entropy.
++ * When enough bits of entropy are available, the hash is finalized and
++ * handed as a key to a stream cipher that expands it indefinitely for
++ * various consumers. This key is periodically refreshed as the various
++ * entropy collectors, described below, add data to the input pool.
+  */
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+@@ -53,6 +51,7 @@
+ #include <linux/completion.h>
+ #include <linux/uuid.h>
+ #include <linux/uaccess.h>
++#include <linux/siphash.h>
+ #include <crypto/chacha.h>
+ #include <crypto/blake2s.h>
+ #include <asm/processor.h>
+@@ -71,27 +70,27 @@
+  *********************************************************************/
+ 
+ /*
+- * crng_init =  0 --> Uninitialized
+- *            1 --> Initialized
+- *            2 --> Initialized from input_pool
+- *
+  * crng_init is protected by base_crng->lock, and only increases
+- * its value (from 0->1->2).
++ * its value (from empty->early->ready).
+  */
+-static int crng_init = 0;
+-#define crng_ready() (likely(crng_init > 1))
+-/* Various types of waiters for crng_init->2 transition. */
++static enum {
++      CRNG_EMPTY = 0, /* Little to no entropy collected */
++      CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
++      CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
++} crng_init __read_mostly = CRNG_EMPTY;
++static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
++#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= 
CRNG_READY)
++/* Various types of waiters for crng_init->CRNG_READY transition. */
+ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ static struct fasync_struct *fasync;
+ static DEFINE_SPINLOCK(random_ready_chain_lock);
+ static RAW_NOTIFIER_HEAD(random_ready_chain);
+ 
+ /* Control how we warn userspace. */
+-static struct ratelimit_state unseeded_warning =
+-      RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+ static struct ratelimit_state urandom_warning =
+       RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+-static int ratelimit_disable __read_mostly;
++static int ratelimit_disable __read_mostly =
++      IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+ module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+ 
+@@ -110,6 +109,11 @@ bool rng_is_initialized(void)
+ }
+ EXPORT_SYMBOL(rng_is_initialized);
+ 
++static void __cold crng_set_ready(struct work_struct *work)
++{
++      static_branch_enable(&crng_is_ready);
++}
++
+ /* Used by wait_for_random_bytes(), and considered an entropy collector, 
below. */
+ static void try_to_generate_entropy(void);
+ 
+@@ -144,7 +148,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
+  * returns: 0 if callback is successfully added
+  *        -EALREADY if pool is already initialised (callback not called)
+  */
+-int register_random_ready_notifier(struct notifier_block *nb)
++int __cold register_random_ready_notifier(struct notifier_block *nb)
+ {
+       unsigned long flags;
+       int ret = -EALREADY;
+@@ -162,7 +166,7 @@ int register_random_ready_notifier(struct notifier_block 
*nb)
+ /*
+  * Delete a previously registered readiness callback function.
+  */
+-int unregister_random_ready_notifier(struct notifier_block *nb)
++int __cold unregister_random_ready_notifier(struct notifier_block *nb)
+ {
+       unsigned long flags;
+       int ret;
+@@ -173,7 +177,7 @@ int unregister_random_ready_notifier(struct notifier_block 
*nb)
+       return ret;
+ }
+ 
+-static void process_random_ready_list(void)
++static void __cold process_random_ready_list(void)
+ {
+       unsigned long flags;
+ 
+@@ -182,28 +186,10 @@ static void process_random_ready_list(void)
+       spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ }
+ 
+-#define warn_unseeded_randomness(previous) \
+-      _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
+-
+-static void _warn_unseeded_randomness(const char *func_name, void *caller, 
void **previous)
+-{
+-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+-      const bool print_once = false;
+-#else
+-      static bool print_once __read_mostly;
+-#endif
+-
+-      if (print_once || crng_ready() ||
+-          (previous && (caller == READ_ONCE(*previous))))
+-              return;
+-      WRITE_ONCE(*previous, caller);
+-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+-      print_once = true;
+-#endif
+-      if (__ratelimit(&unseeded_warning))
+-              printk_deferred(KERN_NOTICE "random: %s called from %pS with 
crng_init=%d\n",
+-                              func_name, caller, crng_init);
+-}
++#define warn_unseeded_randomness() \
++      if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
++              printk_deferred(KERN_NOTICE "random: %s called from %pS with 
crng_init=%d\n", \
++                              __func__, (void *)_RET_IP_, crng_init)
+ 
+ 
+ /*********************************************************************
+@@ -216,7 +202,7 @@ static void _warn_unseeded_randomness(const char 
*func_name, void *caller, void
+  *
+  * There are a few exported interfaces for use by other drivers:
+  *
+- *    void get_random_bytes(void *buf, size_t nbytes)
++ *    void get_random_bytes(void *buf, size_t len)
+  *    u32 get_random_u32()
+  *    u64 get_random_u64()
+  *    unsigned int get_random_int()
+@@ -232,8 +218,8 @@ static void _warn_unseeded_randomness(const char 
*func_name, void *caller, void
+  *********************************************************************/
+ 
+ enum {
+-      CRNG_RESEED_INTERVAL = 300 * HZ,
+-      CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
++      CRNG_RESEED_START_INTERVAL = HZ,
++      CRNG_RESEED_INTERVAL = 60 * HZ
+ };
+ 
+ static struct {
+@@ -256,24 +242,17 @@ static DEFINE_PER_CPU(struct crng, crngs) = {
+       .lock = INIT_LOCAL_LOCK(crngs.lock),
+ };
+ 
+-/* Used by crng_reseed() to extract a new seed from the input pool. */
+-static bool drain_entropy(void *buf, size_t nbytes, bool force);
++/* Used by crng_reseed() and crng_make_state() to extract a new seed from the 
input pool. */
++static void extract_entropy(void *buf, size_t len);
+ 
+-/*
+- * This extracts a new crng key from the input pool, but only if there is a
+- * sufficient amount of entropy available or force is true, in order to
+- * mitigate bruteforcing of newly added bits.
+- */
+-static void crng_reseed(bool force)
++/* This extracts a new crng key from the input pool. */
++static void crng_reseed(void)
+ {
+       unsigned long flags;
+       unsigned long next_gen;
+       u8 key[CHACHA_KEY_SIZE];
+-      bool finalize_init = false;
+ 
+-      /* Only reseed if we can, to prevent brute forcing a small amount of 
new bits. */
+-      if (!drain_entropy(key, sizeof(key), force))
+-              return;
++      extract_entropy(key, sizeof(key));
+ 
+       /*
+        * We copy the new key into the base_crng, overwriting the old one,
+@@ -288,28 +267,10 @@ static void crng_reseed(bool force)
+               ++next_gen;
+       WRITE_ONCE(base_crng.generation, next_gen);
+       WRITE_ONCE(base_crng.birth, jiffies);
+-      if (!crng_ready()) {
+-              crng_init = 2;
+-              finalize_init = true;
+-      }
++      if (!static_branch_likely(&crng_is_ready))
++              crng_init = CRNG_READY;
+       spin_unlock_irqrestore(&base_crng.lock, flags);
+       memzero_explicit(key, sizeof(key));
+-      if (finalize_init) {
+-              process_random_ready_list();
+-              wake_up_interruptible(&crng_init_wait);
+-              kill_fasync(&fasync, SIGIO, POLL_IN);
+-              pr_notice("crng init done\n");
+-              if (unseeded_warning.missed) {
+-                      pr_notice("%d get_random_xx warning(s) missed due to 
ratelimiting\n",
+-                                unseeded_warning.missed);
+-                      unseeded_warning.missed = 0;
+-              }
+-              if (urandom_warning.missed) {
+-                      pr_notice("%d urandom warning(s) missed due to 
ratelimiting\n",
+-                                urandom_warning.missed);
+-                      urandom_warning.missed = 0;
+-              }
+-      }
+ }
+ 
+ /*
+@@ -345,10 +306,10 @@ static void crng_fast_key_erasure(u8 
key[CHACHA_KEY_SIZE],
+ }
+ 
+ /*
+- * Return whether the crng seed is considered to be sufficiently
+- * old that a reseeding might be attempted. This happens if the last
+- * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at
+- * an interval proportional to the uptime.
++ * Return whether the crng seed is considered to be sufficiently old
++ * that a reseeding is needed. This happens if the last reseeding
++ * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
++ * proportional to the uptime.
+  */
+ static bool crng_has_old_seed(void)
+ {
+@@ -360,10 +321,10 @@ static bool crng_has_old_seed(void)
+               if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+                       WRITE_ONCE(early_boot, false);
+               else
+-                      interval = max_t(unsigned int, 5 * HZ,
++                      interval = max_t(unsigned int, 
CRNG_RESEED_START_INTERVAL,
+                                        (unsigned int)uptime / 2 * HZ);
+       }
+-      return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
++      return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+ }
+ 
+ /*
+@@ -382,28 +343,31 @@ static void crng_make_state(u32 
chacha_state[CHACHA_STATE_WORDS],
+       /*
+        * For the fast path, we check whether we're ready, unlocked first, and
+        * then re-check once locked later. In the case where we're really not
+-       * ready, we do fast key erasure with the base_crng directly, because
+-       * this is what crng_pre_init_inject() mutates during early init.
++       * ready, we do fast key erasure with the base_crng directly, extracting
++       * when crng_init is CRNG_EMPTY.
+        */
+       if (!crng_ready()) {
+               bool ready;
+ 
+               spin_lock_irqsave(&base_crng.lock, flags);
+               ready = crng_ready();
+-              if (!ready)
++              if (!ready) {
++                      if (crng_init == CRNG_EMPTY)
++                              extract_entropy(base_crng.key, 
sizeof(base_crng.key));
+                       crng_fast_key_erasure(base_crng.key, chacha_state,
+                                             random_data, random_data_len);
++              }
+               spin_unlock_irqrestore(&base_crng.lock, flags);
+               if (!ready)
+                       return;
+       }
+ 
+       /*
+-       * If the base_crng is old enough, we try to reseed, which in turn
+-       * bumps the generation counter that we check below.
++       * If the base_crng is old enough, we reseed, which in turn bumps the
++       * generation counter that we check below.
+        */
+       if (unlikely(crng_has_old_seed()))
+-              crng_reseed(false);
++              crng_reseed();
+ 
+       local_lock_irqsave(&crngs.lock, flags);
+       crng = raw_cpu_ptr(&crngs);
+@@ -433,68 +397,24 @@ static void crng_make_state(u32 
chacha_state[CHACHA_STATE_WORDS],
+       local_unlock_irqrestore(&crngs.lock, flags);
+ }
+ 
+-/*
+- * This function is for crng_init == 0 only. It loads entropy directly
+- * into the crng's key, without going through the input pool. It is,
+- * generally speaking, not very safe, but we use this only at early
+- * boot time when it's better to have something there rather than
+- * nothing.
+- *
+- * If account is set, then the crng_init_cnt counter is incremented.
+- * This shouldn't be set by functions like add_device_randomness(),
+- * where we can't trust the buffer passed to it is guaranteed to be
+- * unpredictable (so it might not have any entropy at all).
+- */
+-static void crng_pre_init_inject(const void *input, size_t len, bool account)
+-{
+-      static int crng_init_cnt = 0;
+-      struct blake2s_state hash;
+-      unsigned long flags;
+-
+-      blake2s_init(&hash, sizeof(base_crng.key));
+-
+-      spin_lock_irqsave(&base_crng.lock, flags);
+-      if (crng_init != 0) {
+-              spin_unlock_irqrestore(&base_crng.lock, flags);
+-              return;
+-      }
+-
+-      blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
+-      blake2s_update(&hash, input, len);
+-      blake2s_final(&hash, base_crng.key);
+-
+-      if (account) {
+-              crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - 
crng_init_cnt);
+-              if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+-                      ++base_crng.generation;
+-                      crng_init = 1;
+-              }
+-      }
+-
+-      spin_unlock_irqrestore(&base_crng.lock, flags);
+-
+-      if (crng_init == 1)
+-              pr_notice("fast init done\n");
+-}
+-
+-static void _get_random_bytes(void *buf, size_t nbytes)
++static void _get_random_bytes(void *buf, size_t len)
+ {
+       u32 chacha_state[CHACHA_STATE_WORDS];
+       u8 tmp[CHACHA_BLOCK_SIZE];
+-      size_t len;
++      size_t first_block_len;
+ 
+-      if (!nbytes)
++      if (!len)
+               return;
+ 
+-      len = min_t(size_t, 32, nbytes);
+-      crng_make_state(chacha_state, buf, len);
+-      nbytes -= len;
+-      buf += len;
++      first_block_len = min_t(size_t, 32, len);
++      crng_make_state(chacha_state, buf, first_block_len);
++      len -= first_block_len;
++      buf += first_block_len;
+ 
+-      while (nbytes) {
+-              if (nbytes < CHACHA_BLOCK_SIZE) {
++      while (len) {
++              if (len < CHACHA_BLOCK_SIZE) {
+                       chacha20_block(chacha_state, tmp);
+-                      memcpy(buf, tmp, nbytes);
++                      memcpy(buf, tmp, len);
+                       memzero_explicit(tmp, sizeof(tmp));
+                       break;
+               }
+@@ -502,7 +422,7 @@ static void _get_random_bytes(void *buf, size_t nbytes)
+               chacha20_block(chacha_state, buf);
+               if (unlikely(chacha_state[12] == 0))
+                       ++chacha_state[13];
+-              nbytes -= CHACHA_BLOCK_SIZE;
++              len -= CHACHA_BLOCK_SIZE;
+               buf += CHACHA_BLOCK_SIZE;
+       }
+ 
+@@ -519,22 +439,20 @@ static void _get_random_bytes(void *buf, size_t nbytes)
+  * wait_for_random_bytes() should be called and return 0 at least once
+  * at any point prior.
+  */
+-void get_random_bytes(void *buf, size_t nbytes)
++void get_random_bytes(void *buf, size_t len)
+ {
+-      static void *previous;
+-
+-      warn_unseeded_randomness(&previous);
+-      _get_random_bytes(buf, nbytes);
++      warn_unseeded_randomness();
++      _get_random_bytes(buf, len);
+ }
+ EXPORT_SYMBOL(get_random_bytes);
+ 
+-static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
++static ssize_t get_random_bytes_user(struct iov_iter *iter)
+ {
+-      size_t len, left, ret = 0;
+       u32 chacha_state[CHACHA_STATE_WORDS];
+-      u8 output[CHACHA_BLOCK_SIZE];
++      u8 block[CHACHA_BLOCK_SIZE];
++      size_t ret = 0, copied;
+ 
+-      if (!nbytes)
++      if (unlikely(!iov_iter_count(iter)))
+               return 0;
+ 
+       /*
+@@ -548,30 +466,22 @@ static ssize_t get_random_bytes_user(void __user *buf, 
size_t nbytes)
+        * use chacha_state after, so we can simply return those bytes to
+        * the user directly.
+        */
+-      if (nbytes <= CHACHA_KEY_SIZE) {
+-              ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
++      if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
++              ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+               goto out_zero_chacha;
+       }
+ 
+       for (;;) {
+-              chacha20_block(chacha_state, output);
++              chacha20_block(chacha_state, block);
+               if (unlikely(chacha_state[12] == 0))
+                       ++chacha_state[13];
+ 
+-              len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
+-              left = copy_to_user(buf, output, len);
+-              if (left) {
+-                      ret += len - left;
+-                      break;
+-              }
+-
+-              buf += len;
+-              ret += len;
+-              nbytes -= len;
+-              if (!nbytes)
++              copied = copy_to_iter(block, sizeof(block), iter);
++              ret += copied;
++              if (!iov_iter_count(iter) || copied != sizeof(block))
+                       break;
+ 
+-              BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
++              BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+               if (ret % PAGE_SIZE == 0) {
+                       if (signal_pending(current))
+                               break;
+@@ -579,7 +489,7 @@ static ssize_t get_random_bytes_user(void __user *buf, 
size_t nbytes)
+               }
+       }
+ 
+-      memzero_explicit(output, sizeof(output));
++      memzero_explicit(block, sizeof(block));
+ out_zero_chacha:
+       memzero_explicit(chacha_state, sizeof(chacha_state));
+       return ret ? ret : -EFAULT;
+@@ -591,98 +501,69 @@ out_zero_chacha:
+  * provided by this function is okay, the function wait_for_random_bytes()
+  * should be called and return 0 at least once at any point prior.
+  */
+-struct batched_entropy {
+-      union {
+-              /*
+-               * We make this 1.5x a ChaCha block, so that we get the
+-               * remaining 32 bytes from fast key erasure, plus one full
+-               * block from the detached ChaCha state. We can increase
+-               * the size of this later if needed so long as we keep the
+-               * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
+-               */
+-              u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
+-              u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
+-      };
+-      local_lock_t lock;
+-      unsigned long generation;
+-      unsigned int position;
+-};
+-
+ 
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+-      .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
+-      .position = UINT_MAX
+-};
+-
+-u64 get_random_u64(void)
+-{
+-      u64 ret;
+-      unsigned long flags;
+-      struct batched_entropy *batch;
+-      static void *previous;
+-      unsigned long next_gen;
+-
+-      warn_unseeded_randomness(&previous);
+-
+-      local_lock_irqsave(&batched_entropy_u64.lock, flags);
+-      batch = raw_cpu_ptr(&batched_entropy_u64);
+-
+-      next_gen = READ_ONCE(base_crng.generation);
+-      if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
+-          next_gen != batch->generation) {
+-              _get_random_bytes(batch->entropy_u64, 
sizeof(batch->entropy_u64));
+-              batch->position = 0;
+-              batch->generation = next_gen;
+-      }
+-
+-      ret = batch->entropy_u64[batch->position];
+-      batch->entropy_u64[batch->position] = 0;
+-      ++batch->position;
+-      local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL(get_random_u64);
+-
+-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+-      .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
+-      .position = UINT_MAX
+-};
+-
+-u32 get_random_u32(void)
+-{
+-      u32 ret;
+-      unsigned long flags;
+-      struct batched_entropy *batch;
+-      static void *previous;
+-      unsigned long next_gen;
+-
+-      warn_unseeded_randomness(&previous);
+-
+-      local_lock_irqsave(&batched_entropy_u32.lock, flags);
+-      batch = raw_cpu_ptr(&batched_entropy_u32);
+-
+-      next_gen = READ_ONCE(base_crng.generation);
+-      if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
+-          next_gen != batch->generation) {
+-              _get_random_bytes(batch->entropy_u32, 
sizeof(batch->entropy_u32));
+-              batch->position = 0;
+-              batch->generation = next_gen;
+-      }
+-
+-      ret = batch->entropy_u32[batch->position];
+-      batch->entropy_u32[batch->position] = 0;
+-      ++batch->position;
+-      local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
+-      return ret;
+-}
+-EXPORT_SYMBOL(get_random_u32);
++#define DEFINE_BATCHED_ENTROPY(type)                                          
\
++struct batch_ ##type {                                                        
        \
++      /*                                                                      
\
++       * We make this 1.5x a ChaCha block, so that we get the                 
\
++       * remaining 32 bytes from fast key erasure, plus one full              
\
++       * block from the detached ChaCha state. We can increase                
\
++       * the size of this later if needed so long as we keep the              
\
++       * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.               
\
++       */                                                                     
\
++      type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];               
\
++      local_lock_t lock;                                                      
\
++      unsigned long generation;                                               
\
++      unsigned int position;                                                  
\
++};                                                                            
\
++                                                                              
\
++static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {      
\
++      .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),                  
\
++      .position = UINT_MAX                                                    
\
++};                                                                            
\
++                                                                              
\
++type get_random_ ##type(void)                                                 
\
++{                                                                             
\
++      type ret;                                                               
\
++      unsigned long flags;                                                    
\
++      struct batch_ ##type *batch;                                            
\
++      unsigned long next_gen;                                                 
\
++                                                                              
\
++      warn_unseeded_randomness();                                             
\
++                                                                              
\
++      if  (!crng_ready()) {                                                   
\
++              _get_random_bytes(&ret, sizeof(ret));                           
\
++              return ret;                                                     
\
++      }                                                                       
\
++                                                                              
\
++      local_lock_irqsave(&batched_entropy_ ##type.lock, flags);               
\
++      batch = raw_cpu_ptr(&batched_entropy_##type);                           
\
++                                                                              
\
++      next_gen = READ_ONCE(base_crng.generation);                             
\
++      if (batch->position >= ARRAY_SIZE(batch->entropy) ||                    
\
++          next_gen != batch->generation) {                                    
\
++              _get_random_bytes(batch->entropy, sizeof(batch->entropy));      
\
++              batch->position = 0;                                            
\
++              batch->generation = next_gen;                                   
\
++      }                                                                       
\
++                                                                              
\
++      ret = batch->entropy[batch->position];                                  
\
++      batch->entropy[batch->position] = 0;                                    
\
++      ++batch->position;                                                      
\
++      local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);          
\
++      return ret;                                                             
\
++}                                                                             
\
++EXPORT_SYMBOL(get_random_ ##type);
++
++DEFINE_BATCHED_ENTROPY(u64)
++DEFINE_BATCHED_ENTROPY(u32)
+ 
+ #ifdef CONFIG_SMP
+ /*
+  * This function is called when the CPU is coming up, with entry
+  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+  */
+-int random_prepare_cpu(unsigned int cpu)
++int __cold random_prepare_cpu(unsigned int cpu)
+ {
+       /*
+        * When the cpu comes back online, immediately invalidate both
+@@ -696,62 +577,30 @@ int random_prepare_cpu(unsigned int cpu)
+ }
+ #endif
+ 
+-/**
+- * randomize_page - Generate a random, page aligned address
+- * @start:    The smallest acceptable address the caller will take.
+- * @range:    The size of the area, starting at @start, within which the
+- *            random address must fall.
+- *
+- * If @start + @range would overflow, @range is capped.
+- *
+- * NOTE: Historical use of randomize_range, which this replaces, presumed that
+- * @start was already page aligned.  We now align it regardless.
+- *
+- * Return: A page aligned address within [start, start + range).  On error,
+- * @start is returned.
+- */
+-unsigned long randomize_page(unsigned long start, unsigned long range)
+-{
+-      if (!PAGE_ALIGNED(start)) {
+-              range -= PAGE_ALIGN(start) - start;
+-              start = PAGE_ALIGN(start);
+-      }
+-
+-      if (start > ULONG_MAX - range)
+-              range = ULONG_MAX - start;
+-
+-      range >>= PAGE_SHIFT;
+-
+-      if (range == 0)
+-              return start;
+-
+-      return start + (get_random_long() % range << PAGE_SHIFT);
+-}
+-
+ /*
+  * This function will use the architecture-specific hardware random
+  * number generator if it is available. It is not recommended for
+  * use. Use get_random_bytes() instead. It returns the number of
+  * bytes filled in.
+  */
+-size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
++size_t __must_check get_random_bytes_arch(void *buf, size_t len)
+ {
+-      size_t left = nbytes;
++      size_t left = len;
+       u8 *p = buf;
+ 
+       while (left) {
+               unsigned long v;
+-              size_t chunk = min_t(size_t, left, sizeof(unsigned long));
++              size_t block_len = min_t(size_t, left, sizeof(unsigned long));
+ 
+               if (!arch_get_random_long(&v))
+                       break;
+ 
+-              memcpy(p, &v, chunk);
+-              p += chunk;
+-              left -= chunk;
++              memcpy(p, &v, block_len);
++              p += block_len;
++              left -= block_len;
+       }
+ 
+-      return nbytes - left;
++      return len - left;
+ }
+ EXPORT_SYMBOL(get_random_bytes_arch);
+ 
+@@ -762,33 +611,28 @@ EXPORT_SYMBOL(get_random_bytes_arch);
+  *
+  * Callers may add entropy via:
+  *
+- *     static void mix_pool_bytes(const void *in, size_t nbytes)
++ *     static void mix_pool_bytes(const void *buf, size_t len)
+  *
+  * After which, if added entropy should be credited:
+  *
+- *     static void credit_entropy_bits(size_t nbits)
++ *     static void credit_init_bits(size_t bits)
+  *
+- * Finally, extract entropy via these two, with the latter one
+- * setting the entropy count to zero and extracting only if there
+- * is POOL_MIN_BITS entropy credited prior or force is true:
++ * Finally, extract entropy via:
+  *
+- *     static void extract_entropy(void *buf, size_t nbytes)
+- *     static bool drain_entropy(void *buf, size_t nbytes, bool force)
++ *     static void extract_entropy(void *buf, size_t len)
+  *
+  **********************************************************************/
+ 
+ enum {
+       POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+-      POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
++      POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
++      POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
+ };
+ 
+-/* For notifying userspace should write into /dev/random. */
+-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+-
+ static struct {
+       struct blake2s_state hash;
+       spinlock_t lock;
+-      unsigned int entropy_count;
++      unsigned int init_bits;
+ } input_pool = {
+       .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+                   BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+@@ -797,48 +641,30 @@ static struct {
+       .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ };
+ 
+-static void _mix_pool_bytes(const void *in, size_t nbytes)
++static void _mix_pool_bytes(const void *buf, size_t len)
+ {
+-      blake2s_update(&input_pool.hash, in, nbytes);
++      blake2s_update(&input_pool.hash, buf, len);
+ }
+ 
+ /*
+- * This function adds bytes into the entropy "pool".  It does not
+- * update the entropy estimate.  The caller should call
+- * credit_entropy_bits if this is appropriate.
++ * This function adds bytes into the input pool. It does not
++ * update the initialization bit counter; the caller should call
++ * credit_init_bits if this is appropriate.
+  */
+-static void mix_pool_bytes(const void *in, size_t nbytes)
++static void mix_pool_bytes(const void *buf, size_t len)
+ {
+       unsigned long flags;
+ 
+       spin_lock_irqsave(&input_pool.lock, flags);
+-      _mix_pool_bytes(in, nbytes);
++      _mix_pool_bytes(buf, len);
+       spin_unlock_irqrestore(&input_pool.lock, flags);
+ }
+ 
+-static void credit_entropy_bits(size_t nbits)
+-{
+-      unsigned int entropy_count, orig, add;
+-
+-      if (!nbits)
+-              return;
+-
+-      add = min_t(size_t, nbits, POOL_BITS);
+-
+-      do {
+-              orig = READ_ONCE(input_pool.entropy_count);
+-              entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
+-      } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != 
orig);
+-
+-      if (!crng_ready() && entropy_count >= POOL_MIN_BITS)
+-              crng_reseed(false);
+-}
+-
+ /*
+  * This is an HKDF-like construction for using the hashed collected entropy
+  * as a PRF key, that's then expanded block-by-block.
+  */
+-static void extract_entropy(void *buf, size_t nbytes)
++static void extract_entropy(void *buf, size_t len)
+ {
+       unsigned long flags;
+       u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+@@ -867,12 +693,12 @@ static void extract_entropy(void *buf, size_t nbytes)
+       spin_unlock_irqrestore(&input_pool.lock, flags);
+       memzero_explicit(next_key, sizeof(next_key));
+ 
+-      while (nbytes) {
+-              i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
++      while (len) {
++              i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
+               /* output = HASHPRF(seed, RDSEED || ++counter) */
+               ++block.counter;
+               blake2s(buf, (u8 *)&block, seed, i, sizeof(block), 
sizeof(seed));
+-              nbytes -= i;
++              len -= i;
+               buf += i;
+       }
+ 
+@@ -880,23 +706,43 @@ static void extract_entropy(void *buf, size_t nbytes)
+       memzero_explicit(&block, sizeof(block));
+ }
+ 
+-/*
+- * First we make sure we have POOL_MIN_BITS of entropy in the pool unless 
force
+- * is true, and then we set the entropy count to zero (but don't actually 
touch
+- * any data). Only then can we extract a new key with extract_entropy().
+- */
+-static bool drain_entropy(void *buf, size_t nbytes, bool force)
++#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
++
++static void __cold _credit_init_bits(size_t bits)
+ {
+-      unsigned int entropy_count;
++      static struct execute_work set_ready;
++      unsigned int new, orig, add;
++      unsigned long flags;
++
++      if (!bits)
++              return;
++
++      add = min_t(size_t, bits, POOL_BITS);
++
+       do {
+-              entropy_count = READ_ONCE(input_pool.entropy_count);
+-              if (!force && entropy_count < POOL_MIN_BITS)
+-                      return false;
+-      } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != 
entropy_count);
+-      extract_entropy(buf, nbytes);
+-      wake_up_interruptible(&random_write_wait);
+-      kill_fasync(&fasync, SIGIO, POLL_OUT);
+-      return true;
++              orig = READ_ONCE(input_pool.init_bits);
++              new = min_t(unsigned int, POOL_BITS, orig + add);
++      } while (cmpxchg(&input_pool.init_bits, orig, new) != orig);
++
++      if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
++              crng_reseed(); /* Sets crng_init to CRNG_READY under 
base_crng.lock. */
++              execute_in_process_context(crng_set_ready, &set_ready);
++              process_random_ready_list();
++              wake_up_interruptible(&crng_init_wait);
++              kill_fasync(&fasync, SIGIO, POLL_IN);
++              pr_notice("crng init done\n");
++              if (urandom_warning.missed)
++                      pr_notice("%d urandom warning(s) missed due to 
ratelimiting\n",
++                                urandom_warning.missed);
++      } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
++              spin_lock_irqsave(&base_crng.lock, flags);
++              /* Check if crng_init is CRNG_EMPTY, to avoid race with 
crng_reseed(). */
++              if (crng_init == CRNG_EMPTY) {
++                      extract_entropy(base_crng.key, sizeof(base_crng.key));
++                      crng_init = CRNG_EARLY;
++              }
++              spin_unlock_irqrestore(&base_crng.lock, flags);
++      }
+ }
+ 
+ 
+@@ -907,15 +753,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool 
force)
+  * The following exported functions are used for pushing entropy into
+  * the above entropy accumulation routines:
+  *
+- *    void add_device_randomness(const void *buf, size_t size);
+- *    void add_input_randomness(unsigned int type, unsigned int code,
+- *                              unsigned int value);
+- *    void add_disk_randomness(struct gendisk *disk);
+- *    void add_hwgenerator_randomness(const void *buffer, size_t count,
+- *                                    size_t entropy);
+- *    void add_bootloader_randomness(const void *buf, size_t size);
+- *    void add_vmfork_randomness(const void *unique_vm_id, size_t size);
++ *    void add_device_randomness(const void *buf, size_t len);
++ *    void add_hwgenerator_randomness(const void *buf, size_t len, size_t 
entropy);
++ *    void add_bootloader_randomness(const void *buf, size_t len);
++ *    void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+  *    void add_interrupt_randomness(int irq);
++ *    void add_input_randomness(unsigned int type, unsigned int code, 
unsigned int value);
++ *    void add_disk_randomness(struct gendisk *disk);
+  *
+  * add_device_randomness() adds data to the input pool that
+  * is likely to differ between two devices (or possibly even per boot).
+@@ -925,26 +769,13 @@ static bool drain_entropy(void *buf, size_t nbytes, bool 
force)
+  * that might otherwise be identical and have very little entropy
+  * available to them (particularly common in the embedded world).
+  *
+- * add_input_randomness() uses the input layer interrupt timing, as well
+- * as the event type information from the hardware.
+- *
+- * add_disk_randomness() uses what amounts to the seek time of block
+- * layer request events, on a per-disk_devt basis, as input to the
+- * entropy pool. Note that high-speed solid state drives with very low
+- * seek times do not make for good sources of entropy, as their seek
+- * times are usually fairly consistent.
+- *
+- * The above two routines try to estimate how many bits of entropy
+- * to credit. They do this by keeping track of the first and second
+- * order deltas of the event timings.
+- *
+  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+  * entropy as specified by the caller. If the entropy pool is full it will
+  * block until more entropy is needed.
+  *
+- * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
+- * add_device_randomness(), depending on whether or not the configuration
+- * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
++ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
++ * and device tree, and credits its input depending on whether or not the
++ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+  *
+  * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
+  * representing the current instance of a VM to the pool, without crediting,
+@@ -955,6 +786,19 @@ static bool drain_entropy(void *buf, size_t nbytes, bool 
force)
+  * as inputs, it feeds the input pool roughly once a second or after 64
+  * interrupts, crediting 1 bit of entropy for whichever comes first.
+  *
++ * add_input_randomness() uses the input layer interrupt timing, as well
++ * as the event type information from the hardware.
++ *
++ * add_disk_randomness() uses what amounts to the seek time of block
++ * layer request events, on a per-disk_devt basis, as input to the
++ * entropy pool. Note that high-speed solid state drives with very low
++ * seek times do not make for good sources of entropy, as their seek
++ * times are usually fairly consistent.
++ *
++ * The last two routines try to estimate how many bits of entropy
++ * to credit. They do this by keeping track of the first and second
++ * order deltas of the event timings.
++ *
+  **********************************************************************/
+ 
+ static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+@@ -972,46 +816,42 @@ early_param("random.trust_bootloader", 
parse_trust_bootloader);
+ 
+ /*
+  * The first collection of entropy occurs at system boot while interrupts
+- * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
+- * Depending on the above configuration knob, RDSEED may be considered
+- * sufficient for initialization. Note that much earlier setup may already
+- * have pushed entropy into the input pool by the time we get here.
++ * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
++ * utsname(), and the command line. Depending on the above configuration knob,
++ * RDSEED may be considered sufficient for initialization. Note that much
++ * earlier setup may already have pushed entropy into the input pool by the
++ * time we get here.
+  */
+-int __init rand_initialize(void)
++int __init random_init(const char *command_line)
+ {
+-      size_t i;
+       ktime_t now = ktime_get_real();
+-      bool arch_init = true;
+-      unsigned long rv;
++      unsigned int i, arch_bytes;
++      unsigned long entropy;
+ 
+ #if defined(LATENT_ENTROPY_PLUGIN)
+       static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst 
__latent_entropy;
+       _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
+ #endif
+ 
+-      for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
+-              if (!arch_get_random_seed_long_early(&rv) &&
+-                  !arch_get_random_long_early(&rv)) {
+-                      rv = random_get_entropy();
+-                      arch_init = false;
++      for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
++           i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
++              if (!arch_get_random_seed_long_early(&entropy) &&
++                  !arch_get_random_long_early(&entropy)) {
++                      entropy = random_get_entropy();
++                      arch_bytes -= sizeof(entropy);
+               }
+-              _mix_pool_bytes(&rv, sizeof(rv));
++              _mix_pool_bytes(&entropy, sizeof(entropy));
+       }
+       _mix_pool_bytes(&now, sizeof(now));
+       _mix_pool_bytes(utsname(), sizeof(*(utsname())));
++      _mix_pool_bytes(command_line, strlen(command_line));
++      add_latent_entropy();
+ 
+-      extract_entropy(base_crng.key, sizeof(base_crng.key));
+-      ++base_crng.generation;
+-
+-      if (arch_init && trust_cpu && !crng_ready()) {
+-              crng_init = 2;
+-              pr_notice("crng init done (trusting CPU's manufacturer)\n");
+-      }
++      if (crng_ready())
++              crng_reseed();
++      else if (trust_cpu)
++              credit_init_bits(arch_bytes * 8);
+ 
+-      if (ratelimit_disable) {
+-              urandom_warning.interval = 0;
+-              unseeded_warning.interval = 0;
+-      }
+       return 0;
+ }
+ 
+@@ -1023,164 +863,46 @@ int __init rand_initialize(void)
+  * the entropy pool having similar initial state across largely
+  * identical devices.
+  */
+-void add_device_randomness(const void *buf, size_t size)
++void add_device_randomness(const void *buf, size_t len)
+ {
+-      unsigned long cycles = random_get_entropy();
+-      unsigned long flags, now = jiffies;
+-
+-      if (crng_init == 0 && size)
+-              crng_pre_init_inject(buf, size, false);
++      unsigned long entropy = random_get_entropy();
++      unsigned long flags;
+ 
+       spin_lock_irqsave(&input_pool.lock, flags);
+-      _mix_pool_bytes(&cycles, sizeof(cycles));
+-      _mix_pool_bytes(&now, sizeof(now));
+-      _mix_pool_bytes(buf, size);
++      _mix_pool_bytes(&entropy, sizeof(entropy));
++      _mix_pool_bytes(buf, len);
+       spin_unlock_irqrestore(&input_pool.lock, flags);
+ }
+ EXPORT_SYMBOL(add_device_randomness);
+ 
+-/* There is one of these per entropy source */
+-struct timer_rand_state {
+-      unsigned long last_time;
+-      long last_delta, last_delta2;
+-};
+-
+-/*
+- * This function adds entropy to the entropy "pool" by using timing
+- * delays.  It uses the timer_rand_state structure to make an estimate
+- * of how many bits of entropy this call has added to the pool.
+- *
+- * The number "num" is also added to the pool - it should somehow describe
+- * the type of event which just happened.  This is currently 0-255 for
+- * keyboard scan codes, and 256 upwards for interrupts.
+- */
+-static void add_timer_randomness(struct timer_rand_state *state, unsigned int 
num)
+-{
+-      unsigned long cycles = random_get_entropy(), now = jiffies, flags;
+-      long delta, delta2, delta3;
+-
+-      spin_lock_irqsave(&input_pool.lock, flags);
+-      _mix_pool_bytes(&cycles, sizeof(cycles));
+-      _mix_pool_bytes(&now, sizeof(now));
+-      _mix_pool_bytes(&num, sizeof(num));
+-      spin_unlock_irqrestore(&input_pool.lock, flags);
+-
+-      /*
+-       * Calculate number of bits of randomness we probably added.
+-       * We take into account the first, second and third-order deltas
+-       * in order to make our estimate.
+-       */
+-      delta = now - READ_ONCE(state->last_time);
+-      WRITE_ONCE(state->last_time, now);
+-
+-      delta2 = delta - READ_ONCE(state->last_delta);
+-      WRITE_ONCE(state->last_delta, delta);
+-
+-      delta3 = delta2 - READ_ONCE(state->last_delta2);
+-      WRITE_ONCE(state->last_delta2, delta2);
+-
+-      if (delta < 0)
+-              delta = -delta;
+-      if (delta2 < 0)
+-              delta2 = -delta2;
+-      if (delta3 < 0)
+-              delta3 = -delta3;
+-      if (delta > delta2)
+-              delta = delta2;
+-      if (delta > delta3)
+-              delta = delta3;
+-
+-      /*
+-       * delta is now minimum absolute delta.
+-       * Round down by 1 bit on general principles,
+-       * and limit entropy estimate to 12 bits.
+-       */
+-      credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
+-}
+-
+-void add_input_randomness(unsigned int type, unsigned int code,
+-                        unsigned int value)
+-{
+-      static unsigned char last_value;
+-      static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
+-
+-      /* Ignore autorepeat and the like. */
+-      if (value == last_value)
+-              return;
+-
+-      last_value = value;
+-      add_timer_randomness(&input_timer_state,
+-                           (type << 4) ^ code ^ (code >> 4) ^ value);
+-}
+-EXPORT_SYMBOL_GPL(add_input_randomness);
+-
+-#ifdef CONFIG_BLOCK
+-void add_disk_randomness(struct gendisk *disk)
+-{
+-      if (!disk || !disk->random)
+-              return;
+-      /* First major is 1, so we get >= 0x200 here. */
+-      add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+-}
+-EXPORT_SYMBOL_GPL(add_disk_randomness);
+-
+-void rand_initialize_disk(struct gendisk *disk)
+-{
+-      struct timer_rand_state *state;
+-
+-      /*
+-       * If kzalloc returns null, we just won't use that entropy
+-       * source.
+-       */
+-      state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+-      if (state) {
+-              state->last_time = INITIAL_JIFFIES;
+-              disk->random = state;
+-      }
+-}
+-#endif
+-
+ /*
+  * Interface for in-kernel drivers of true hardware RNGs.
+  * Those devices may produce endless random bits and will be throttled
+  * when our pool is full.
+  */
+-void add_hwgenerator_randomness(const void *buffer, size_t count,
+-                              size_t entropy)
++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
+ {
+-      if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
+-              crng_pre_init_inject(buffer, count, true);
+-              mix_pool_bytes(buffer, count);
+-              return;
+-      }
++      mix_pool_bytes(buf, len);
++      credit_init_bits(entropy);
+ 
+       /*
+-       * Throttle writing if we're above the trickle threshold.
+-       * We'll be woken up again once below POOL_MIN_BITS, when
+-       * the calling thread is about to terminate, or once
+-       * CRNG_RESEED_INTERVAL has elapsed.
++       * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
++       * we're not yet initialized.
+        */
+-      wait_event_interruptible_timeout(random_write_wait,
+-                      !system_wq || kthread_should_stop() ||
+-                      input_pool.entropy_count < POOL_MIN_BITS,
+-                      CRNG_RESEED_INTERVAL);
+-      mix_pool_bytes(buffer, count);
+-      credit_entropy_bits(entropy);
++      if (!kthread_should_stop() && crng_ready())
++              schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
+ }
+ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+ 
+ /*
+- * Handle random seed passed by bootloader.
+- * If the seed is trustworthy, it would be regarded as hardware RNGs. 
Otherwise
+- * it would be regarded as device data.
+- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
++ * Handle random seed passed by bootloader, and credit it if
++ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+  */
+-void add_bootloader_randomness(const void *buf, size_t size)
++void __cold add_bootloader_randomness(const void *buf, size_t len)
+ {
++      mix_pool_bytes(buf, len);
+       if (trust_bootloader)
+-              add_hwgenerator_randomness(buf, size, size * 8);
+-      else
+-              add_device_randomness(buf, size);
++              credit_init_bits(len * 8);
+ }
+ EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+ 
+@@ -1192,11 +914,11 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
+  * don't credit it, but we do immediately force a reseed after so
+  * that it's used by the crng posthaste.
+  */
+-void add_vmfork_randomness(const void *unique_vm_id, size_t size)
++void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
+ {
+-      add_device_randomness(unique_vm_id, size);
++      add_device_randomness(unique_vm_id, len);
+       if (crng_ready()) {
+-              crng_reseed(true);
++              crng_reseed();
+               pr_notice("crng reseeded due to virtual machine fork\n");
+       }
+       blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
+@@ -1205,13 +927,13 @@ void add_vmfork_randomness(const void *unique_vm_id, 
size_t size)
+ EXPORT_SYMBOL_GPL(add_vmfork_randomness);
+ #endif
+ 
+-int register_random_vmfork_notifier(struct notifier_block *nb)
++int __cold register_random_vmfork_notifier(struct notifier_block *nb)
+ {
+       return blocking_notifier_chain_register(&vmfork_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
+ 
+-int unregister_random_vmfork_notifier(struct notifier_block *nb)
++int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
+ {
+       return blocking_notifier_chain_unregister(&vmfork_chain, nb);
+ }
+@@ -1223,17 +945,15 @@ struct fast_pool {
+       unsigned long pool[4];
+       unsigned long last;
+       unsigned int count;
+-      u16 reg_idx;
+ };
+ 
+ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+ #ifdef CONFIG_64BIT
+-      /* SipHash constants */
+-      .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL,
+-                0x6c7967656e657261UL, 0x7465646279746573UL }
++#define FASTMIX_PERM SIPHASH_PERMUTATION
++      .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, 
SIPHASH_CONST_3 }
+ #else
+-      /* HalfSipHash constants */
+-      .pool = { 0, 0, 0x6c796765U, 0x74656462U }
++#define FASTMIX_PERM HSIPHASH_PERMUTATION
++      .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, 
HSIPHASH_CONST_3 }
+ #endif
+ };
+ 
+@@ -1241,27 +961,16 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness) 
= {
+  * This is [Half]SipHash-1-x, starting from an empty key. Because
+  * the key is fixed, it assumes that its inputs are non-malicious,
+  * and therefore this has no security on its own. s represents the
+- * 128 or 256-bit SipHash state, while v represents a 128-bit input.
++ * four-word SipHash state, while v represents a two-word input.
+  */
+-static void fast_mix(unsigned long s[4], const unsigned long *v)
++static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
+ {
+-      size_t i;
+-
+-      for (i = 0; i < 16 / sizeof(long); ++i) {
+-              s[3] ^= v[i];
+-#ifdef CONFIG_64BIT
+-              s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = 
rol64(s[0], 32);
+-              s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2];
+-              s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0];
+-              s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = 
rol64(s[2], 32);
+-#else
+-              s[0] += s[1]; s[1] = rol32(s[1],  5); s[1] ^= s[0]; s[0] = 
rol32(s[0], 16);
+-              s[2] += s[3]; s[3] = rol32(s[3],  8); s[3] ^= s[2];
+-              s[0] += s[3]; s[3] = rol32(s[3],  7); s[3] ^= s[0];
+-              s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = 
rol32(s[2], 16);
+-#endif
+-              s[0] ^= v[i];
+-      }
++      s[3] ^= v1;
++      FASTMIX_PERM(s[0], s[1], s[2], s[3]);
++      s[0] ^= v1;
++      s[3] ^= v2;
++      FASTMIX_PERM(s[0], s[1], s[2], s[3]);
++      s[0] ^= v2;
+ }
+ 
+ #ifdef CONFIG_SMP
+@@ -1269,7 +978,7 @@ static void fast_mix(unsigned long s[4], const unsigned 
long *v)
+  * This function is called when the CPU has just come online, with
+  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
+  */
+-int random_online_cpu(unsigned int cpu)
++int __cold random_online_cpu(unsigned int cpu)
+ {
+       /*
+        * During CPU shutdown and before CPU onlining, add_interrupt_
+@@ -1287,33 +996,18 @@ int random_online_cpu(unsigned int cpu)
+ }
+ #endif
+ 
+-static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
+-{
+-      unsigned long *ptr = (unsigned long *)regs;
+-      unsigned int idx;
+-
+-      if (regs == NULL)
+-              return 0;
+-      idx = READ_ONCE(f->reg_idx);
+-      if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
+-              idx = 0;
+-      ptr += idx++;
+-      WRITE_ONCE(f->reg_idx, idx);
+-      return *ptr;
+-}
+-
+ static void mix_interrupt_randomness(struct work_struct *work)
+ {
+       struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+       /*
+-       * The size of the copied stack pool is explicitly 16 bytes so that we
+-       * tax mix_pool_byte()'s compression function the same amount on all
+-       * platforms. This means on 64-bit we copy half the pool into this,
+-       * while on 32-bit we copy all of it. The entropy is supposed to be
+-       * sufficiently dispersed between bits that in the sponge-like
+-       * half case, on average we don't wind up "losing" some.
++       * The size of the copied stack pool is explicitly 2 longs so that we
++       * only ever ingest half of the siphash output each time, retaining
++       * the other half as the next "key" that carries over. The entropy is
++       * supposed to be sufficiently dispersed between bits so on average
++       * we don't wind up "losing" some.
+        */
+-      u8 pool[16];
++      unsigned long pool[2];
++      unsigned int count;
+ 
+       /* Check to see if we're running on the wrong CPU due to hotplug. */
+       local_irq_disable();
+@@ -1327,17 +1021,13 @@ static void mix_interrupt_randomness(struct 
work_struct *work)
+        * consistent view, before we reenable irqs again.
+        */
+       memcpy(pool, fast_pool->pool, sizeof(pool));
++      count = fast_pool->count;
+       fast_pool->count = 0;
+       fast_pool->last = jiffies;
+       local_irq_enable();
+ 
+-      if (unlikely(crng_init == 0)) {
+-              crng_pre_init_inject(pool, sizeof(pool), true);
+-              mix_pool_bytes(pool, sizeof(pool));
+-      } else {
+-              mix_pool_bytes(pool, sizeof(pool));
+-              credit_entropy_bits(1);
+-      }
++      mix_pool_bytes(pool, sizeof(pool));
++      credit_init_bits(max(1u, (count & U16_MAX) / 64));
+ 
+       memzero_explicit(pool, sizeof(pool));
+ }
+@@ -1345,37 +1035,19 @@ static void mix_interrupt_randomness(struct 
work_struct *work)
+ void add_interrupt_randomness(int irq)
+ {
+       enum { MIX_INFLIGHT = 1U << 31 };
+-      unsigned long cycles = random_get_entropy(), now = jiffies;
++      unsigned long entropy = random_get_entropy();
+       struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+       struct pt_regs *regs = get_irq_regs();
+       unsigned int new_count;
+-      union {
+-              u32 u32[4];
+-              u64 u64[2];
+-              unsigned long longs[16 / sizeof(long)];
+-      } irq_data;
+-
+-      if (cycles == 0)
+-              cycles = get_reg(fast_pool, regs);
+-
+-      if (sizeof(unsigned long) == 8) {
+-              irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
+-              irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+-      } else {
+-              irq_data.u32[0] = cycles ^ irq;
+-              irq_data.u32[1] = now;
+-              irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
+-              irq_data.u32[3] = get_reg(fast_pool, regs);
+-      }
+ 
+-      fast_mix(fast_pool->pool, irq_data.longs);
++      fast_mix(fast_pool->pool, entropy,
++               (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
+       new_count = ++fast_pool->count;
+ 
+       if (new_count & MIX_INFLIGHT)
+               return;
+ 
+-      if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) ||
+-                             unlikely(crng_init == 0)))
++      if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
+               return;
+ 
+       if (unlikely(!fast_pool->mix.func))
+@@ -1385,6 +1057,126 @@ void add_interrupt_randomness(int irq)
+ }
+ EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+ 
++/* There is one of these per entropy source */
++struct timer_rand_state {
++      unsigned long last_time;
++      long last_delta, last_delta2;
++};
++
++/*
++ * This function adds entropy to the entropy "pool" by using timing
++ * delays. It uses the timer_rand_state structure to make an estimate
++ * of how many bits of entropy this call has added to the pool. The
++ * value "num" is also added to the pool; it should somehow describe
++ * the type of event that just happened.
++ */
++static void add_timer_randomness(struct timer_rand_state *state, unsigned int 
num)
++{
++      unsigned long entropy = random_get_entropy(), now = jiffies, flags;
++      long delta, delta2, delta3;
++      unsigned int bits;
++
++      /*
++       * If we're in a hard IRQ, add_interrupt_randomness() will be called
++       * sometime after, so mix into the fast pool.
++       */
++      if (in_hardirq()) {
++              fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
++      } else {
++              spin_lock_irqsave(&input_pool.lock, flags);
++              _mix_pool_bytes(&entropy, sizeof(entropy));
++              _mix_pool_bytes(&num, sizeof(num));
++              spin_unlock_irqrestore(&input_pool.lock, flags);
++      }
++
++      if (crng_ready())
++              return;
++
++      /*
++       * Calculate number of bits of randomness we probably added.
++       * We take into account the first, second and third-order deltas
++       * in order to make our estimate.
++       */
++      delta = now - READ_ONCE(state->last_time);
++      WRITE_ONCE(state->last_time, now);
++
++      delta2 = delta - READ_ONCE(state->last_delta);
++      WRITE_ONCE(state->last_delta, delta);
++
++      delta3 = delta2 - READ_ONCE(state->last_delta2);
++      WRITE_ONCE(state->last_delta2, delta2);
++
++      if (delta < 0)
++              delta = -delta;
++      if (delta2 < 0)
++              delta2 = -delta2;
++      if (delta3 < 0)
++              delta3 = -delta3;
++      if (delta > delta2)
++              delta = delta2;
++      if (delta > delta3)
++              delta = delta3;
++
++      /*
++       * delta is now minimum absolute delta. Round down by 1 bit
++       * on general principles, and limit entropy estimate to 11 bits.
++       */
++      bits = min(fls(delta >> 1), 11);
++
++      /*
++       * As mentioned above, if we're in a hard IRQ, 
add_interrupt_randomness()
++       * will run after this, which uses a different crediting scheme of 1 bit
++       * per every 64 interrupts. In order to let that function do accounting
++       * close to the one in this function, we credit a full 64/64 bit per 
bit,
++       * and then subtract one to account for the extra one added.
++       */
++      if (in_hardirq())
++              this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
++      else
++              _credit_init_bits(bits);
++}
++
++void add_input_randomness(unsigned int type, unsigned int code, unsigned int 
value)
++{
++      static unsigned char last_value;
++      static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
++
++      /* Ignore autorepeat and the like. */
++      if (value == last_value)
++              return;
++
++      last_value = value;
++      add_timer_randomness(&input_timer_state,
++                           (type << 4) ^ code ^ (code >> 4) ^ value);
++}
++EXPORT_SYMBOL_GPL(add_input_randomness);
++
++#ifdef CONFIG_BLOCK
++void add_disk_randomness(struct gendisk *disk)
++{
++      if (!disk || !disk->random)
++              return;
++      /* First major is 1, so we get >= 0x200 here. */
++      add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
++}
++EXPORT_SYMBOL_GPL(add_disk_randomness);
++
++void __cold rand_initialize_disk(struct gendisk *disk)
++{
++      struct timer_rand_state *state;
++
++      /*
++       * If kzalloc returns null, we just won't use that entropy
++       * source.
++       */
++      state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
++      if (state) {
++              state->last_time = INITIAL_JIFFIES;
++              disk->random = state;
++      }
++}
++#endif
++
+ /*
+  * Each time the timer fires, we expect that we got an unpredictable
+  * jump in the cycle counter. Even if the timer is running on another
+@@ -1398,40 +1190,40 @@ EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+  *
+  * So the re-arming always happens in the entropy loop itself.
+  */
+-static void entropy_timer(struct timer_list *t)
++static void __cold entropy_timer(struct timer_list *t)
+ {
+-      credit_entropy_bits(1);
++      credit_init_bits(1);
+ }
+ 
+ /*
+  * If we have an actual cycle counter, see if we can
+  * generate enough entropy with timing noise
+  */
+-static void try_to_generate_entropy(void)
++static void __cold try_to_generate_entropy(void)
+ {
+       struct {
+-              unsigned long cycles;
++              unsigned long entropy;
+               struct timer_list timer;
+       } stack;
+ 
+-      stack.cycles = random_get_entropy();
++      stack.entropy = random_get_entropy();
+ 
+       /* Slow counter - or none. Don't even bother */
+-      if (stack.cycles == random_get_entropy())
++      if (stack.entropy == random_get_entropy())
+               return;
+ 
+       timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+       while (!crng_ready() && !signal_pending(current)) {
+               if (!timer_pending(&stack.timer))
+                       mod_timer(&stack.timer, jiffies + 1);
+-              mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
++              mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+               schedule();
+-              stack.cycles = random_get_entropy();
++              stack.entropy = random_get_entropy();
+       }
+ 
+       del_timer_sync(&stack.timer);
+       destroy_timer_on_stack(&stack.timer);
+-      mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
++      mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ }
+ 
+ 
+@@ -1463,9 +1255,12 @@ static void try_to_generate_entropy(void)
+  *
+  **********************************************************************/
+ 
+-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+-              flags)
++SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, 
flags)
+ {
++      struct iov_iter iter;
++      struct iovec iov;
++      int ret;
++
+       if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+               return -EINVAL;
+ 
+@@ -1476,72 +1271,60 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, 
count, unsigned int,
+       if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | 
GRND_RANDOM))
+               return -EINVAL;
+ 
+-      if (count > INT_MAX)
+-              count = INT_MAX;
+-
+-      if (!(flags & GRND_INSECURE) && !crng_ready()) {
+-              int ret;
+-
++      if (!crng_ready() && !(flags & GRND_INSECURE)) {
+               if (flags & GRND_NONBLOCK)
+                       return -EAGAIN;
+               ret = wait_for_random_bytes();
+               if (unlikely(ret))
+                       return ret;
+       }
+-      return get_random_bytes_user(buf, count);
++
++      ret = import_single_range(READ, ubuf, len, &iov, &iter);
++      if (unlikely(ret))
++              return ret;
++      return get_random_bytes_user(&iter);
+ }
+ 
+ static __poll_t random_poll(struct file *file, poll_table *wait)
+ {
+-      __poll_t mask;
+-
+       poll_wait(file, &crng_init_wait, wait);
+-      poll_wait(file, &random_write_wait, wait);
+-      mask = 0;
+-      if (crng_ready())
+-              mask |= EPOLLIN | EPOLLRDNORM;
+-      if (input_pool.entropy_count < POOL_MIN_BITS)
+-              mask |= EPOLLOUT | EPOLLWRNORM;
+-      return mask;
++      return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
+ }
+ 
+-static int write_pool(const char __user *ubuf, size_t count)
++static ssize_t write_pool_user(struct iov_iter *iter)
+ {
+-      size_t len;
+-      int ret = 0;
+       u8 block[BLAKE2S_BLOCK_SIZE];
++      ssize_t ret = 0;
++      size_t copied;
+ 
+-      while (count) {
+-              len = min(count, sizeof(block));
+-              if (copy_from_user(block, ubuf, len)) {
+-                      ret = -EFAULT;
+-                      goto out;
++      if (unlikely(!iov_iter_count(iter)))
++              return 0;
++
++      for (;;) {
++              copied = copy_from_iter(block, sizeof(block), iter);
++              ret += copied;
++              mix_pool_bytes(block, copied);
++              if (!iov_iter_count(iter) || copied != sizeof(block))
++                      break;
++
++              BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
++              if (ret % PAGE_SIZE == 0) {
++                      if (signal_pending(current))
++                              break;
++                      cond_resched();
+               }
+-              count -= len;
+-              ubuf += len;
+-              mix_pool_bytes(block, len);
+-              cond_resched();
+       }
+ 
+-out:
+       memzero_explicit(block, sizeof(block));
+-      return ret;
++      return ret ? ret : -EFAULT;
+ }
+ 
+-static ssize_t random_write(struct file *file, const char __user *buffer,
+-                          size_t count, loff_t *ppos)
++static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
+ {
+-      int ret;
+-
+-      ret = write_pool(buffer, count);
+-      if (ret)
+-              return ret;
+-
+-      return (ssize_t)count;
++      return write_pool_user(iter);
+ }
+ 
+-static ssize_t urandom_read(struct file *file, char __user *buf, size_t 
nbytes,
+-                          loff_t *ppos)
++static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
+ {
+       static int maxwarn = 10;
+ 
+@@ -1552,37 +1335,38 @@ static ssize_t urandom_read(struct file *file, char 
__user *buf, size_t nbytes,
+       if (!crng_ready())
+               try_to_generate_entropy();
+ 
+-      if (!crng_ready() && maxwarn > 0) {
+-              maxwarn--;
+-              if (__ratelimit(&urandom_warning))
+-                      pr_notice("%s: uninitialized urandom read (%zd bytes 
read)\n",
+-                                current->comm, nbytes);
++      if (!crng_ready()) {
++              if (!ratelimit_disable && maxwarn <= 0)
++                      ++urandom_warning.missed;
++              else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
++                      --maxwarn;
++                      pr_notice("%s: uninitialized urandom read (%zu bytes 
read)\n",
++                                current->comm, iov_iter_count(iter));
++              }
+       }
+ 
+-      return get_random_bytes_user(buf, nbytes);
++      return get_random_bytes_user(iter);
+ }
+ 
+-static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
+-                         loff_t *ppos)
++static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
+ {
+       int ret;
+ 
+       ret = wait_for_random_bytes();
+       if (ret != 0)
+               return ret;
+-      return get_random_bytes_user(buf, nbytes);
++      return get_random_bytes_user(iter);
+ }
+ 
+ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ {
+-      int size, ent_count;
+       int __user *p = (int __user *)arg;
+-      int retval;
++      int ent_count;
+ 
+       switch (cmd) {
+       case RNDGETENTCNT:
+               /* Inherently racy, no point locking. */
+-              if (put_user(input_pool.entropy_count, p))
++              if (put_user(input_pool.init_bits, p))
+                       return -EFAULT;
+               return 0;
+       case RNDADDTOENTCNT:
+@@ -1592,41 +1376,46 @@ static long random_ioctl(struct file *f, unsigned int 
cmd, unsigned long arg)
+                       return -EFAULT;
+               if (ent_count < 0)
+                       return -EINVAL;
+-              credit_entropy_bits(ent_count);
++              credit_init_bits(ent_count);
+               return 0;
+-      case RNDADDENTROPY:
++      case RNDADDENTROPY: {
++              struct iov_iter iter;
++              struct iovec iov;
++              ssize_t ret;
++              int len;
++
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               if (get_user(ent_count, p++))
+                       return -EFAULT;
+               if (ent_count < 0)
+                       return -EINVAL;
+-              if (get_user(size, p++))
++              if (get_user(len, p++))
+                       return -EFAULT;
+-              retval = write_pool((const char __user *)p, size);
+-              if (retval < 0)
+-                      return retval;
+-              credit_entropy_bits(ent_count);
++              ret = import_single_range(WRITE, p, len, &iov, &iter);
++              if (unlikely(ret))
++                      return ret;
++              ret = write_pool_user(&iter);
++              if (unlikely(ret < 0))
++                      return ret;
++              /* Since we're crediting, enforce that it was all written into 
the pool. */
++              if (unlikely(ret != len))
++                      return -EFAULT;
++              credit_init_bits(ent_count);
+               return 0;
++      }
+       case RNDZAPENTCNT:
+       case RNDCLEARPOOL:
+-              /*
+-               * Clear the entropy pool counters. We no longer clear
+-               * the entropy pool, as that's silly.
+-               */
++              /* No longer has any effect. */
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+-              if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
+-                      wake_up_interruptible(&random_write_wait);
+-                      kill_fasync(&fasync, SIGIO, POLL_OUT);
+-              }
+               return 0;
+       case RNDRESEEDCRNG:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               if (!crng_ready())
+                       return -ENODATA;
+-              crng_reseed(false);
++              crng_reseed();
+               return 0;
+       default:
+               return -EINVAL;
+@@ -1639,22 +1428,26 @@ static int random_fasync(int fd, struct file *filp, 
int on)
+ }
+ 
+ const struct file_operations random_fops = {
+-      .read = random_read,
+-      .write = random_write,
++      .read_iter = random_read_iter,
++      .write_iter = random_write_iter,
+       .poll = random_poll,
+       .unlocked_ioctl = random_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+       .fasync = random_fasync,
+       .llseek = noop_llseek,
++      .splice_read = generic_file_splice_read,
++      .splice_write = iter_file_splice_write,
+ };
+ 
+ const struct file_operations urandom_fops = {
+-      .read = urandom_read,
+-      .write = random_write,
++      .read_iter = urandom_read_iter,
++      .write_iter = random_write_iter,
+       .unlocked_ioctl = random_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+       .fasync = random_fasync,
+       .llseek = noop_llseek,
++      .splice_read = generic_file_splice_read,
++      .splice_write = iter_file_splice_write,
+ };
+ 
+ 
+@@ -1678,7 +1471,7 @@ const struct file_operations urandom_fops = {
+  *
+  * - write_wakeup_threshold - the amount of entropy in the input pool
+  *   below which write polls to /dev/random will unblock, requesting
+- *   more entropy, tied to the POOL_MIN_BITS constant. It is writable
++ *   more entropy, tied to the POOL_READY_BITS constant. It is writable
+  *   to avoid breaking old userspaces, but writing to it does not
+  *   change any behavior of the RNG.
+  *
+@@ -1693,7 +1486,7 @@ const struct file_operations urandom_fops = {
+ #include <linux/sysctl.h>
+ 
+ static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+-static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
++static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
+ static int sysctl_poolsize = POOL_BITS;
+ static u8 sysctl_bootid[UUID_SIZE];
+ 
+@@ -1702,7 +1495,7 @@ static u8 sysctl_bootid[UUID_SIZE];
+  * UUID. The difference is in whether table->data is NULL; if it is,
+  * then a new UUID is generated and returned to the user.
+  */
+-static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
++static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+                       size_t *lenp, loff_t *ppos)
+ {
+       u8 tmp_uuid[UUID_SIZE], *uuid;
+@@ -1729,14 +1522,14 @@ static int proc_do_uuid(struct ctl_table *table, int 
write, void *buffer,
+       }
+ 
+       snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+-      return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
++      return proc_dostring(&fake_table, 0, buf, lenp, ppos);
+ }
+ 
+ /* The same as proc_dointvec, but writes don't change anything. */
+-static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
++static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+                           size_t *lenp, loff_t *ppos)
+ {
+-      return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
++      return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
+ }
+ 
+ static struct ctl_table random_table[] = {
+@@ -1749,7 +1542,7 @@ static struct ctl_table random_table[] = {
+       },
+       {
+               .procname       = "entropy_avail",
+-              .data           = &input_pool.entropy_count,
++              .data           = &input_pool.init_bits,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
+@@ -1783,8 +1576,8 @@ static struct ctl_table random_table[] = {
+ };
+ 
+ /*
+- * rand_initialize() is called before sysctl_init(),
+- * so we cannot call register_sysctl_init() in rand_initialize()
++ * random_init() is called before sysctl_init(),
++ * so we cannot call register_sysctl_init() in random_init()
+  */
+ static int __init random_sysctls_init(void)
+ {
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c 
b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+index c5de0ec4f9d03..444acd9e2cd6a 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+@@ -227,6 +227,17 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
+               dev_dbg(dev, "sid 0x%x status 0x%x\n",
+                       cl_data->sensor_idx[i], cl_data->sensor_sts[i]);
+       }
++      if (privdata->mp2_ops->discovery_status &&
++          privdata->mp2_ops->discovery_status(privdata) == 0) {
++              amd_sfh_hid_client_deinit(privdata);
++              for (i = 0; i < cl_data->num_hid_devices; i++) {
++                      devm_kfree(dev, cl_data->feature_report[i]);
++                      devm_kfree(dev, in_data->input_report[i]);
++                      devm_kfree(dev, cl_data->report_descr[i]);
++              }
++              dev_warn(dev, "Failed to discover, sensors not enabled\n");
++              return -EOPNOTSUPP;
++      }
+       schedule_delayed_work(&cl_data->work_buffer, 
msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+       return 0;
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c 
b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index 6b5fd90b0bd1b..e18a4efd8839e 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -130,6 +130,12 @@ static int amd_sfh_irq_init_v2(struct amd_mp2_dev 
*privdata)
+       return 0;
+ }
+ 
++static int amd_sfh_dis_sts_v2(struct amd_mp2_dev *privdata)
++{
++      return (readl(privdata->mmio + AMD_P2C_MSG(1)) &
++                    SENSOR_DISCOVERY_STATUS_MASK) >> 
SENSOR_DISCOVERY_STATUS_SHIFT;
++}
++
+ void amd_start_sensor(struct amd_mp2_dev *privdata, struct 
amd_mp2_sensor_info info)
+ {
+       union sfh_cmd_param cmd_param;
+@@ -245,6 +251,7 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
+       .response = amd_sfh_wait_response_v2,
+       .clear_intr = amd_sfh_clear_intr_v2,
+       .init_intr = amd_sfh_irq_init_v2,
++      .discovery_status = amd_sfh_dis_sts_v2,
+ };
+ 
+ static const struct amd_mp2_ops amd_sfh_ops = {
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h 
b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+index 97b99861fae25..9aa88a91ac8d1 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+@@ -39,6 +39,9 @@
+ 
+ #define AMD_SFH_IDLE_LOOP     200
+ 
++#define SENSOR_DISCOVERY_STATUS_MASK          GENMASK(5, 3)
++#define SENSOR_DISCOVERY_STATUS_SHIFT         3
++
+ /* SFH Command register */
+ union sfh_cmd_base {
+       u32 ul;
+@@ -143,5 +146,6 @@ struct amd_mp2_ops {
+        int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+        void (*clear_intr)(struct amd_mp2_dev *privdata);
+        int (*init_intr)(struct amd_mp2_dev *privdata);
++       int (*discovery_status)(struct amd_mp2_dev *privdata);
+ };
+ #endif
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 9f44254af8ce9..b0183450e484b 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2677,6 +2677,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+                                  unsigned long flags, struct page **pages);
+ 
+ unsigned long randomize_stack_top(unsigned long stack_top);
++unsigned long randomize_page(unsigned long start, unsigned long range);
+ 
+ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned 
long, unsigned long, unsigned long);
+ 
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h
+index 056d31317e499..a4aadd2dc153e 100644
+--- a/include/linux/prandom.h
++++ b/include/linux/prandom.h
+@@ -10,6 +10,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/percpu.h>
++#include <linux/siphash.h>
+ 
+ u32 prandom_u32(void);
+ void prandom_bytes(void *buf, size_t nbytes);
+@@ -27,15 +28,10 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
+  * The core SipHash round function.  Each line can be executed in
+  * parallel given enough CPU resources.
+  */
+-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+-      v0 += v1, v1 = rol64(v1, 13),  v2 += v3, v3 = rol64(v3, 16), \
+-      v1 ^= v0, v0 = rol64(v0, 32),  v3 ^= v2,                     \
+-      v0 += v3, v3 = rol64(v3, 21),  v2 += v1, v1 = rol64(v1, 17), \
+-      v3 ^= v0,                      v1 ^= v2, v2 = rol64(v2, 32)  \
+-)
++#define PRND_SIPROUND(v0, v1, v2, v3) SIPHASH_PERMUTATION(v0, v1, v2, v3)
+ 
+-#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
+-#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
++#define PRND_K0 (SIPHASH_CONST_0 ^ SIPHASH_CONST_2)
++#define PRND_K1 (SIPHASH_CONST_1 ^ SIPHASH_CONST_3)
+ 
+ #elif BITS_PER_LONG == 32
+ /*
+@@ -43,14 +39,9 @@ DECLARE_PER_CPU(unsigned long, net_rand_noise);
+  * This is weaker, but 32-bit machines are not used for high-traffic
+  * applications, so there is less output for an attacker to analyze.
+  */
+-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
+-      v0 += v1, v1 = rol32(v1,  5),  v2 += v3, v3 = rol32(v3,  8), \
+-      v1 ^= v0, v0 = rol32(v0, 16),  v3 ^= v2,                     \
+-      v0 += v3, v3 = rol32(v3,  7),  v2 += v1, v1 = rol32(v1, 13), \
+-      v3 ^= v0,                      v1 ^= v2, v2 = rol32(v2, 16)  \
+-)
+-#define PRND_K0 0x6c796765
+-#define PRND_K1 0x74656462
++#define PRND_SIPROUND(v0, v1, v2, v3) HSIPHASH_PERMUTATION(v0, v1, v2, v3)
++#define PRND_K0 (HSIPHASH_CONST_0 ^ HSIPHASH_CONST_2)
++#define PRND_K1 (HSIPHASH_CONST_1 ^ HSIPHASH_CONST_3)
+ 
+ #else
+ #error Unsupported BITS_PER_LONG
+diff --git a/include/linux/random.h b/include/linux/random.h
+index f673fbb838b35..4364de2300be6 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -12,45 +12,33 @@
+ 
+ struct notifier_block;
+ 
+-extern void add_device_randomness(const void *, size_t);
+-extern void add_bootloader_randomness(const void *, size_t);
++void add_device_randomness(const void *buf, size_t len);
++void add_bootloader_randomness(const void *buf, size_t len);
++void add_input_randomness(unsigned int type, unsigned int code,
++                        unsigned int value) __latent_entropy;
++void add_interrupt_randomness(int irq) __latent_entropy;
++void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ 
+ #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ static inline void add_latent_entropy(void)
+ {
+-      add_device_randomness((const void *)&latent_entropy,
+-                            sizeof(latent_entropy));
++      add_device_randomness((const void *)&latent_entropy, 
sizeof(latent_entropy));
+ }
+ #else
+-static inline void add_latent_entropy(void) {}
++static inline void add_latent_entropy(void) { }
+ #endif
+ 
+-extern void add_input_randomness(unsigned int type, unsigned int code,
+-                               unsigned int value) __latent_entropy;
+-extern void add_interrupt_randomness(int irq) __latent_entropy;
+-extern void add_hwgenerator_randomness(const void *buffer, size_t count,
+-                                     size_t entropy);
+ #if IS_ENABLED(CONFIG_VMGENID)
+-extern void add_vmfork_randomness(const void *unique_vm_id, size_t size);
+-extern int register_random_vmfork_notifier(struct notifier_block *nb);
+-extern int unregister_random_vmfork_notifier(struct notifier_block *nb);
++void add_vmfork_randomness(const void *unique_vm_id, size_t len);
++int register_random_vmfork_notifier(struct notifier_block *nb);
++int unregister_random_vmfork_notifier(struct notifier_block *nb);
+ #else
+ static inline int register_random_vmfork_notifier(struct notifier_block *nb) 
{ return 0; }
+ static inline int unregister_random_vmfork_notifier(struct notifier_block 
*nb) { return 0; }
+ #endif
+ 
+-extern void get_random_bytes(void *buf, size_t nbytes);
+-extern int wait_for_random_bytes(void);
+-extern int __init rand_initialize(void);
+-extern bool rng_is_initialized(void);
+-extern int register_random_ready_notifier(struct notifier_block *nb);
+-extern int unregister_random_ready_notifier(struct notifier_block *nb);
+-extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes);
+-
+-#ifndef MODULE
+-extern const struct file_operations random_fops, urandom_fops;
+-#endif
+-
++void get_random_bytes(void *buf, size_t len);
++size_t __must_check get_random_bytes_arch(void *buf, size_t len);
+ u32 get_random_u32(void);
+ u64 get_random_u64(void);
+ static inline unsigned int get_random_int(void)
+@@ -82,11 +70,15 @@ static inline unsigned long get_random_long(void)
+ 
+ static inline unsigned long get_random_canary(void)
+ {
+-      unsigned long val = get_random_long();
+-
+-      return val & CANARY_MASK;
++      return get_random_long() & CANARY_MASK;
+ }
+ 
++int __init random_init(const char *command_line);
++bool rng_is_initialized(void);
++int wait_for_random_bytes(void);
++int register_random_ready_notifier(struct notifier_block *nb);
++int unregister_random_ready_notifier(struct notifier_block *nb);
++
+ /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
+  * Returns the result of the call to wait_for_random_bytes. */
+ static inline int get_random_bytes_wait(void *buf, size_t nbytes)
+@@ -96,22 +88,20 @@ static inline int get_random_bytes_wait(void *buf, size_t 
nbytes)
+       return ret;
+ }
+ 
+-#define declare_get_random_var_wait(var) \
+-      static inline int get_random_ ## var ## _wait(var *out) { \
++#define declare_get_random_var_wait(name, ret_type) \
++      static inline int get_random_ ## name ## _wait(ret_type *out) { \
+               int ret = wait_for_random_bytes(); \
+               if (unlikely(ret)) \
+                       return ret; \
+-              *out = get_random_ ## var(); \
++              *out = get_random_ ## name(); \
+               return 0; \
+       }
+-declare_get_random_var_wait(u32)
+-declare_get_random_var_wait(u64)
+-declare_get_random_var_wait(int)
+-declare_get_random_var_wait(long)
++declare_get_random_var_wait(u32, u32)
++declare_get_random_var_wait(u64, u32)
++declare_get_random_var_wait(int, unsigned int)
++declare_get_random_var_wait(long, unsigned long)
+ #undef declare_get_random_var
+ 
+-unsigned long randomize_page(unsigned long start, unsigned long range);
+-
+ /*
+  * This is designed to be standalone for just prandom
+  * users, but for now we include it from <linux/random.h>
+@@ -122,22 +112,10 @@ unsigned long randomize_page(unsigned long start, 
unsigned long range);
+ #ifdef CONFIG_ARCH_RANDOM
+ # include <asm/archrandom.h>
+ #else
+-static inline bool __must_check arch_get_random_long(unsigned long *v)
+-{
+-      return false;
+-}
+-static inline bool __must_check arch_get_random_int(unsigned int *v)
+-{
+-      return false;
+-}
+-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+-{
+-      return false;
+-}
+-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+-{
+-      return false;
+-}
++static inline bool __must_check arch_get_random_long(unsigned long *v) { 
return false; }
++static inline bool __must_check arch_get_random_int(unsigned int *v) { return 
false; }
++static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { 
return false; }
++static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { 
return false; }
+ #endif
+ 
+ /*
+@@ -161,8 +139,12 @@ static inline bool __init 
arch_get_random_long_early(unsigned long *v)
+ #endif
+ 
+ #ifdef CONFIG_SMP
+-extern int random_prepare_cpu(unsigned int cpu);
+-extern int random_online_cpu(unsigned int cpu);
++int random_prepare_cpu(unsigned int cpu);
++int random_online_cpu(unsigned int cpu);
++#endif
++
++#ifndef MODULE
++extern const struct file_operations random_fops, urandom_fops;
+ #endif
+ 
+ #endif /* _LINUX_RANDOM_H */
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 25b3ef71f495e..7fc4e9f49f542 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -121,10 +121,12 @@ enum lockdown_reason {
+       LOCKDOWN_DEBUGFS,
+       LOCKDOWN_XMON_WR,
+       LOCKDOWN_BPF_WRITE_USER,
++      LOCKDOWN_DBG_WRITE_KERNEL,
+       LOCKDOWN_INTEGRITY_MAX,
+       LOCKDOWN_KCORE,
+       LOCKDOWN_KPROBES,
+       LOCKDOWN_BPF_READ_KERNEL,
++      LOCKDOWN_DBG_READ_KERNEL,
+       LOCKDOWN_PERF,
+       LOCKDOWN_TRACEFS,
+       LOCKDOWN_XMON_RW,
+diff --git a/include/linux/siphash.h b/include/linux/siphash.h
+index cce8a9acc76cb..3af1428da5597 100644
+--- a/include/linux/siphash.h
++++ b/include/linux/siphash.h
+@@ -138,4 +138,32 @@ static inline u32 hsiphash(const void *data, size_t len,
+       return ___hsiphash_aligned(data, len, key);
+ }
+ 
++/*
++ * These macros expose the raw SipHash and HalfSipHash permutations.
++ * Do not use them directly! If you think you have a use for them,
++ * be sure to CC the maintainer of this file explaining why.
++ */
++
++#define SIPHASH_PERMUTATION(a, b, c, d) ( \
++      (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
++      (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
++      (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
++      (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
++
++#define SIPHASH_CONST_0 0x736f6d6570736575ULL
++#define SIPHASH_CONST_1 0x646f72616e646f6dULL
++#define SIPHASH_CONST_2 0x6c7967656e657261ULL
++#define SIPHASH_CONST_3 0x7465646279746573ULL
++
++#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
++      (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
++      (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
++      (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
++      (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
++
++#define HSIPHASH_CONST_0 0U
++#define HSIPHASH_CONST_1 0U
++#define HSIPHASH_CONST_2 0x6c796765U
++#define HSIPHASH_CONST_3 0x74656462U
++
+ #endif /* _LINUX_SIPHASH_H */
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index 5745c90c88005..3871b06bd302c 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -62,6 +62,8 @@
+ #include <linux/types.h>
+ #include <linux/param.h>
+ 
++unsigned long random_get_entropy_fallback(void);
++
+ #include <asm/timex.h>
+ 
+ #ifndef random_get_entropy
+@@ -74,8 +76,14 @@
+  *
+  * By default we use get_cycles() for this purpose, but individual
+  * architectures may override this in their asm/timex.h header file.
++ * If a given arch does not have get_cycles(), then we fallback to
++ * using random_get_entropy_fallback().
+  */
++#ifdef get_cycles
+ #define random_get_entropy()  ((unsigned long)get_cycles())
++#else
++#define random_get_entropy()  random_get_entropy_fallback()
++#endif
+ #endif
+ 
+ /*
+diff --git a/init/main.c b/init/main.c
+index 98182c3c2c4b3..f057c49f1d9d8 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1035,21 +1035,18 @@ asmlinkage __visible void __init __no_sanitize_address 
start_kernel(void)
+       softirq_init();
+       timekeeping_init();
+       kfence_init();
++      time_init();
+ 
+       /*
+        * For best initial stack canary entropy, prepare it after:
+        * - setup_arch() for any UEFI RNG entropy and boot cmdline access
+-       * - timekeeping_init() for ktime entropy used in rand_initialize()
+-       * - rand_initialize() to get any arch-specific entropy like RDRAND
+-       * - add_latent_entropy() to get any latent entropy
+-       * - adding command line entropy
++       * - timekeeping_init() for ktime entropy used in random_init()
++       * - time_init() for making random_get_entropy() work on some platforms
++       * - random_init() to initialize the RNG from from early entropy sources
+        */
+-      rand_initialize();
+-      add_latent_entropy();
+-      add_device_randomness(command_line, strlen(command_line));
++      random_init(command_line);
+       boot_init_stack_canary();
+ 
+-      time_init();
+       perf_event_init();
+       profile_init();
+       call_function_init();
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index da06a5553835b..7beceb447211d 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -53,6 +53,7 @@
+ #include <linux/vmacache.h>
+ #include <linux/rcupdate.h>
+ #include <linux/irq.h>
++#include <linux/security.h>
+ 
+ #include <asm/cacheflush.h>
+ #include <asm/byteorder.h>
+@@ -752,6 +753,29 @@ cpu_master_loop:
+                               continue;
+                       kgdb_connected = 0;
+               } else {
++                      /*
++                       * This is a brutal way to interfere with the debugger
++                       * and prevent gdb being used to poke at kernel memory.
++                       * This could cause trouble if lockdown is applied when
++                       * there is already an active gdb session. For now the
++                       * answer is simply "don't do that". Typically lockdown
++                       * *will* be applied before the debug core gets started
++                       * so only developers using kgdb for fairly advanced
++                       * early kernel debug can be biten by this. Hopefully
++                       * they are sophisticated enough to take care of
++                       * themselves, especially with help from the lockdown
++                       * message printed on the console!
++                       */
++                      if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
++                              if (IS_ENABLED(CONFIG_KGDB_KDB)) {
++                                      /* Switch back to kdb if possible... */
++                                      dbg_kdb_mode = 1;
++                                      continue;
++                              } else {
++                                      /* ... otherwise just bail */
++                                      break;
++                              }
++                      }
+                       error = gdb_serial_stub(ks);
+               }
+ 
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 0852a537dad4c..ead4da9471270 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -45,6 +45,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/uaccess.h>
+ #include <linux/slab.h>
++#include <linux/security.h>
+ #include "kdb_private.h"
+ 
+ #undef        MODULE_PARAM_PREFIX
+@@ -166,10 +167,62 @@ struct task_struct *kdb_curr_task(int cpu)
+ }
+ 
+ /*
+- * Check whether the flags of the current command and the permissions
+- * of the kdb console has allow a command to be run.
++ * Update the permissions flags (kdb_cmd_enabled) to match the
++ * current lockdown state.
++ *
++ * Within this function the calls to security_locked_down() are "lazy". We
++ * avoid calling them if the current value of kdb_cmd_enabled already excludes
++ * flags that might be subject to lockdown. Additionally we deliberately check
++ * the lockdown flags independently (even though read lockdown implies write
++ * lockdown) since that results in both simpler code and clearer messages to
++ * the user on first-time debugger entry.
++ *
++ * The permission masks during a read+write lockdown permits the following
++ * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE).
++ *
++ * The INSPECT commands are not blocked during lockdown because they are
++ * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes
++ * forcing them to have no arguments) and lsmod. These commands do expose
++ * some kernel state but do not allow the developer seated at the console to
++ * choose what state is reported. SIGNAL and REBOOT should not be 
controversial,
++ * given these are allowed for root during lockdown already.
++ */
++static void kdb_check_for_lockdown(void)
++{
++      const int write_flags = KDB_ENABLE_MEM_WRITE |
++                              KDB_ENABLE_REG_WRITE |
++                              KDB_ENABLE_FLOW_CTRL;
++      const int read_flags = KDB_ENABLE_MEM_READ |
++                             KDB_ENABLE_REG_READ;
++
++      bool need_to_lockdown_write = false;
++      bool need_to_lockdown_read = false;
++
++      if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags))
++              need_to_lockdown_write =
++                      security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL);
++
++      if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags))
++              need_to_lockdown_read =
++                      security_locked_down(LOCKDOWN_DBG_READ_KERNEL);
++
++      /* De-compose KDB_ENABLE_ALL if required */
++      if (need_to_lockdown_write || need_to_lockdown_read)
++              if (kdb_cmd_enabled & KDB_ENABLE_ALL)
++                      kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL;
++
++      if (need_to_lockdown_write)
++              kdb_cmd_enabled &= ~write_flags;
++
++      if (need_to_lockdown_read)
++              kdb_cmd_enabled &= ~read_flags;
++}
++
++/*
++ * Check whether the flags of the current command, the permissions of the kdb
++ * console and the lockdown state allow a command to be run.
+  */
+-static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
++static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+                                  bool no_args)
+ {
+       /* permissions comes from userspace so needs massaging slightly */
+@@ -1180,6 +1233,9 @@ static int kdb_local(kdb_reason_t reason, int error, 
struct pt_regs *regs,
+               kdb_curr_task(raw_smp_processor_id());
+ 
+       KDB_DEBUG_STATE("kdb_local 1", reason);
++
++      kdb_check_for_lockdown();
++
+       kdb_go_count = 0;
+       if (reason == KDB_REASON_DEBUG) {
+               /* special case below */
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 3b1398fbddaf8..871c912860ed5 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -17,6 +17,7 @@
+ #include <linux/clocksource.h>
+ #include <linux/jiffies.h>
+ #include <linux/time.h>
++#include <linux/timex.h>
+ #include <linux/tick.h>
+ #include <linux/stop_machine.h>
+ #include <linux/pvclock_gtod.h>
+@@ -2380,6 +2381,20 @@ static int timekeeping_validate_timex(const struct 
__kernel_timex *txc)
+       return 0;
+ }
+ 
++/**
++ * random_get_entropy_fallback - Returns the raw clock source value,
++ * used by random.c for platforms with no valid random_get_entropy().
++ */
++unsigned long random_get_entropy_fallback(void)
++{
++      struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono;
++      struct clocksource *clock = READ_ONCE(tkr->clock);
++
++      if (unlikely(timekeeping_suspended || !clock))
++              return 0;
++      return clock->read(clock);
++}
++EXPORT_SYMBOL_GPL(random_get_entropy_fallback);
+ 
+ /**
+  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 075cd25363ac3..7e282970177a8 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1616,8 +1616,7 @@ config WARN_ALL_UNSEEDED_RANDOM
+         so architecture maintainers really need to do what they can
+         to get the CRNG seeded sooner after the system is booted.
+         However, since users cannot do anything actionable to
+-        address this, by default the kernel will issue only a single
+-        warning for the first use of unseeded randomness.
++        address this, by default this option is disabled.
+ 
+         Say Y here if you want to receive warnings for all uses of
+         unseeded randomness.  This will be of use primarily for
+diff --git a/lib/siphash.c b/lib/siphash.c
+index 72b9068ab57bf..71d315a6ad623 100644
+--- a/lib/siphash.c
++++ b/lib/siphash.c
+@@ -18,19 +18,13 @@
+ #include <asm/word-at-a-time.h>
+ #endif
+ 
+-#define SIPROUND \
+-      do { \
+-      v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+-      v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+-      v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+-      v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+-      } while (0)
++#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+ 
+ #define PREAMBLE(len) \
+-      u64 v0 = 0x736f6d6570736575ULL; \
+-      u64 v1 = 0x646f72616e646f6dULL; \
+-      u64 v2 = 0x6c7967656e657261ULL; \
+-      u64 v3 = 0x7465646279746573ULL; \
++      u64 v0 = SIPHASH_CONST_0; \
++      u64 v1 = SIPHASH_CONST_1; \
++      u64 v2 = SIPHASH_CONST_2; \
++      u64 v3 = SIPHASH_CONST_3; \
+       u64 b = ((u64)(len)) << 56; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, 
const u32 third,
+ }
+ EXPORT_SYMBOL(hsiphash_4u32);
+ #else
+-#define HSIPROUND \
+-      do { \
+-      v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+-      v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+-      v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+-      v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+-      } while (0)
++#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
+ 
+ #define HPREAMBLE(len) \
+-      u32 v0 = 0; \
+-      u32 v1 = 0; \
+-      u32 v2 = 0x6c796765U; \
+-      u32 v3 = 0x74656462U; \
++      u32 v0 = HSIPHASH_CONST_0; \
++      u32 v1 = HSIPHASH_CONST_1; \
++      u32 v2 = HSIPHASH_CONST_2; \
++      u32 v3 = HSIPHASH_CONST_3; \
+       u32 b = ((u32)(len)) << 24; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+diff --git a/mm/util.c b/mm/util.c
+index 3492a9e81aa3a..ac63e5ca8b211 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -343,6 +343,38 @@ unsigned long randomize_stack_top(unsigned long stack_top)
+ #endif
+ }
+ 
++/**
++ * randomize_page - Generate a random, page aligned address
++ * @start:    The smallest acceptable address the caller will take.
++ * @range:    The size of the area, starting at @start, within which the
++ *            random address must fall.
++ *
++ * If @start + @range would overflow, @range is capped.
++ *
++ * NOTE: Historical use of randomize_range, which this replaces, presumed that
++ * @start was already page aligned.  We now align it regardless.
++ *
++ * Return: A page aligned address within [start, start + range).  On error,
++ * @start is returned.
++ */
++unsigned long randomize_page(unsigned long start, unsigned long range)
++{
++      if (!PAGE_ALIGNED(start)) {
++              range -= PAGE_ALIGN(start) - start;
++              start = PAGE_ALIGN(start);
++      }
++
++      if (start > ULONG_MAX - range)
++              range = ULONG_MAX - start;
++
++      range >>= PAGE_SHIFT;
++
++      if (range == 0)
++              return start;
++
++      return start + (get_random_long() % range << PAGE_SHIFT);
++}
++
+ #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+diff --git a/security/security.c b/security/security.c
+index b7cf5cbfdc677..aaf6566deb9f0 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -59,10 +59,12 @@ const char *const 
lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+       [LOCKDOWN_DEBUGFS] = "debugfs access",
+       [LOCKDOWN_XMON_WR] = "xmon write access",
+       [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
++      [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
+       [LOCKDOWN_INTEGRITY_MAX] = "integrity",
+       [LOCKDOWN_KCORE] = "/proc/kcore access",
+       [LOCKDOWN_KPROBES] = "use of kprobes",
+       [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
++      [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
+       [LOCKDOWN_PERF] = "unsafe use of perf",
+       [LOCKDOWN_TRACEFS] = "use of tracefs",
+       [LOCKDOWN_XMON_RW] = "xmon read and write access",
+diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
+index 78f35e88aed6b..fbdb8a3d5b8e5 100644
+--- a/sound/pci/ctxfi/ctatc.c
++++ b/sound/pci/ctxfi/ctatc.c
+@@ -36,6 +36,7 @@
+                           | ((IEC958_AES3_CON_FS_48000) << 24))
+ 
+ static const struct snd_pci_quirk subsys_20k1_list[] = {
++      SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0021, "SB046x", CTSB046X),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0022, "SB055x", CTSB055X),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x002f, "SB055x", CTSB055X),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0029, "SB073x", CTSB073X),
+@@ -64,6 +65,7 @@ static const struct snd_pci_quirk subsys_20k2_list[] = {
+ 
+ static const char *ct_subsys_name[NUM_CTCARDS] = {
+       /* 20k1 models */
++      [CTSB046X]      = "SB046x",
+       [CTSB055X]      = "SB055x",
+       [CTSB073X]      = "SB073x",
+       [CTUAA]         = "UAA",
+diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
+index f406b626a28c4..2875cec83b8f2 100644
+--- a/sound/pci/ctxfi/cthardware.h
++++ b/sound/pci/ctxfi/cthardware.h
+@@ -26,8 +26,9 @@ enum CHIPTYP {
+ 
+ enum CTCARDS {
+       /* 20k1 models */
++      CTSB046X,
++      CT20K1_MODEL_FIRST = CTSB046X,
+       CTSB055X,
+-      CT20K1_MODEL_FIRST = CTSB055X,
+       CTSB073X,
+       CTUAA,
+       CT20K1_UNKNOWN,

Reply via email to