commit:     71d470c013d9ba1388d32e220ff5cf87fb8eb6cd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jun  6 11:00:51 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jun  6 11:00:51 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=71d470c0

Linux patch 5.18.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1001_linux-5.18.2.patch | 2830 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2834 insertions(+)

diff --git a/0000_README b/0000_README
index 62ab5b31..561c7140 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.18.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.18.1
 
+Patch:  1001_linux-5.18.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.18.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.18.2.patch b/1001_linux-5.18.2.patch
new file mode 100644
index 00000000..609efb82
--- /dev/null
+++ b/1001_linux-5.18.2.patch
@@ -0,0 +1,2830 @@
+diff --git a/Documentation/process/submitting-patches.rst 
b/Documentation/process/submitting-patches.rst
+index fb496b2ebfd38..92d3432460d75 100644
+--- a/Documentation/process/submitting-patches.rst
++++ b/Documentation/process/submitting-patches.rst
+@@ -77,7 +77,7 @@ as you intend it to.
+ 
+ The maintainer will thank you if you write your patch description in a
+ form which can be easily pulled into Linux's source code management
+-system, ``git``, as a "commit log".  See :ref:`explicit_in_reply_to`.
++system, ``git``, as a "commit log".  See :ref:`the_canonical_patch_format`.
+ 
+ Solve only one problem per patch.  If your description starts to get
+ long, that's a sign that you probably need to split up your patch.
+diff --git a/Makefile b/Makefile
+index 2bb168acb8f43..6b1d606a92f6f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 18
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi 
b/arch/arm/boot/dts/s5pv210-aries.dtsi
+index c8f1c324a6c26..26f2be2d9faa2 100644
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -895,7 +895,7 @@
+               device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
+               interrupt-parent = <&gph2>;
+               interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+-              interrupt-names = "host-wake";
++              interrupt-names = "host-wakeup";
+       };
+ };
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
+index 45c993dd05f5e..36f2314c58e5f 100644
+--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
+@@ -361,13 +361,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, 
struct kvm *kvm,
+ static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot 
*memslot,
+               struct kvm *kvm, unsigned long *gfn)
+ {
+-      struct kvmppc_uvmem_slot *p;
++      struct kvmppc_uvmem_slot *p = NULL, *iter;
+       bool ret = false;
+       unsigned long i;
+ 
+-      list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
+-              if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
++      list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
++              if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + 
iter->nr_pfns) {
++                      p = iter;
+                       break;
++              }
+       if (!p)
+               return ret;
+       /*
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index f78e2b3501a19..35f222aa66bfc 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -382,6 +382,103 @@ do {                                                     
                \
+ 
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+ 
++#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)        
({ \
++      bool success;                                                   \
++      __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
++      __typeof__(*(_ptr)) __old = *_old;                              \
++      __typeof__(*(_ptr)) __new = (_new);                             \
++      asm_volatile_goto("\n"                                          \
++                   "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
++                   _ASM_EXTABLE_UA(1b, %l[label])                     \
++                   : CC_OUT(z) (success),                             \
++                     [ptr] "+m" (*_ptr),                              \
++                     [old] "+a" (__old)                               \
++                   : [new] ltype (__new)                              \
++                   : "memory"                                         \
++                   : label);                                          \
++      if (unlikely(!success))                                         \
++              *_old = __old;                                          \
++      likely(success);                                        })
++
++#ifdef CONFIG_X86_32
++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)    ({      \
++      bool success;                                                   \
++      __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
++      __typeof__(*(_ptr)) __old = *_old;                              \
++      __typeof__(*(_ptr)) __new = (_new);                             \
++      asm_volatile_goto("\n"                                          \
++                   "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"             \
++                   _ASM_EXTABLE_UA(1b, %l[label])                     \
++                   : CC_OUT(z) (success),                             \
++                     "+A" (__old),                                    \
++                     [ptr] "+m" (*_ptr)                               \
++                   : "b" ((u32)__new),                                \
++                     "c" ((u32)((u64)__new >> 32))                    \
++                   : "memory"                                         \
++                   : label);                                          \
++      if (unlikely(!success))                                         \
++              *_old = __old;                                          \
++      likely(success);                                        })
++#endif // CONFIG_X86_32
++#else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
++#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)        
({ \
++      int __err = 0;                                                  \
++      bool success;                                                   \
++      __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
++      __typeof__(*(_ptr)) __old = *_old;                              \
++      __typeof__(*(_ptr)) __new = (_new);                             \
++      asm volatile("\n"                                               \
++                   "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
++                   CC_SET(z)                                          \
++                   "2:\n"                                             \
++                   _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,  \
++                                         %[errout])                   \
++                   : CC_OUT(z) (success),                             \
++                     [errout] "+r" (__err),                           \
++                     [ptr] "+m" (*_ptr),                              \
++                     [old] "+a" (__old)                               \
++                   : [new] ltype (__new)                              \
++                   : "memory", "cc");                                 \
++      if (unlikely(__err))                                            \
++              goto label;                                             \
++      if (unlikely(!success))                                         \
++              *_old = __old;                                          \
++      likely(success);                                        })
++
++#ifdef CONFIG_X86_32
++/*
++ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
++ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
++ * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
++ * both ESI and EDI for the memory operand, compilation will fail if the error
++ * is an input+output as there will be no register available for input.
++ */
++#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)    ({      \
++      int __result;                                                   \
++      __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
++      __typeof__(*(_ptr)) __old = *_old;                              \
++      __typeof__(*(_ptr)) __new = (_new);                             \
++      asm volatile("\n"                                               \
++                   "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"             \
++                   "mov $0, %%ecx\n\t"                                \
++                   "setz %%cl\n"                                      \
++                   "2:\n"                                             \
++                   _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
++                   : [result]"=c" (__result),                         \
++                     "+A" (__old),                                    \
++                     [ptr] "+m" (*_ptr)                               \
++                   : "b" ((u32)__new),                                \
++                     "c" ((u32)((u64)__new >> 32))                    \
++                   : "memory", "cc");                                 \
++      if (unlikely(__result < 0))                                     \
++              goto label;                                             \
++      if (unlikely(!__result))                                        \
++              *_old = __old;                                          \
++      likely(__result);                                       })
++#endif // CONFIG_X86_32
++#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
++
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+ #define __m(x) (*(struct __large_struct __user *)(x))
+@@ -474,6 +571,51 @@ do {                                                      
                        \
+ } while (0)
+ #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+ 
++extern void __try_cmpxchg_user_wrong_size(void);
++
++#ifndef CONFIG_X86_32
++#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)          \
++      __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
++#endif
++
++/*
++ * Force the pointer to u<size> to match the size expected by the asm helper.
++ * clang/LLVM compiles all cases and only discards the unused paths after
++ * processing errors, which breaks i386 if the pointer is an 8-byte value.
++ */
++#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({                
        \
++      bool __ret;                                                             
\
++      __chk_user_ptr(_ptr);                                                   
\
++      switch (sizeof(*(_ptr))) {                                              
\
++      case 1: __ret = __try_cmpxchg_user_asm("b", "q",                        
\
++                                             (__force u8 *)(_ptr), (_oldp),   
\
++                                             (_nval), _label);                
\
++              break;                                                          
\
++      case 2: __ret = __try_cmpxchg_user_asm("w", "r",                        
\
++                                             (__force u16 *)(_ptr), (_oldp),  
\
++                                             (_nval), _label);                
\
++              break;                                                          
\
++      case 4: __ret = __try_cmpxchg_user_asm("l", "r",                        
\
++                                             (__force u32 *)(_ptr), (_oldp),  
\
++                                             (_nval), _label);                
\
++              break;                                                          
\
++      case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), 
(_oldp),\
++                                               (_nval), _label);              
\
++              break;                                                          
\
++      default: __try_cmpxchg_user_wrong_size();                               
\
++      }                                                                       
\
++      __ret;                                          })
++
++/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
++#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)        ({              
\
++      int __ret = -EFAULT;                                            \
++      __uaccess_begin_nospec();                                       \
++      __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);   \
++_label:                                                                       
\
++      __uaccess_end();                                                \
++      __ret;                                                          \
++                                                      })
++
+ /*
+  * We want the unsafe accessors to always be inlined and use
+  * the error labels - thus the macro games.
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 7c63a1911fae9..3c24e6124d955 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -12,6 +12,92 @@
+ #include "encls.h"
+ #include "sgx.h"
+ 
++#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
++/*
++ * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
++ * determine the page index associated with the first PCMD entry
++ * within a PCMD page.
++ */
++#define PCMD_FIRST_MASK GENMASK(4, 0)
++
++/**
++ * reclaimer_writing_to_pcmd() - Query if any enclave page associated with
++ *                               a PCMD page is in process of being reclaimed.
++ * @encl:        Enclave to which PCMD page belongs
++ * @start_addr:  Address of enclave page using first entry within the PCMD 
page
++ *
++ * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
++ * stored. The PCMD data of a reclaimed enclave page contains enough
++ * information for the processor to verify the page at the time
++ * it is loaded back into the Enclave Page Cache (EPC).
++ *
++ * The backing storage to which enclave pages are reclaimed is laid out as
++ * follows:
++ * Encrypted enclave pages:SECS page:PCMD pages
++ *
++ * Each PCMD page contains the PCMD metadata of
++ * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
++ *
++ * A PCMD page can only be truncated if it is (a) empty, and (b) not in the
++ * process of getting data (and thus soon being non-empty). (b) is tested with
++ * a check if an enclave page sharing the PCMD page is in the process of being
++ * reclaimed.
++ *
++ * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
++ * intends to reclaim that enclave page - it means that the PCMD page
++ * associated with that enclave page is about to get some data and thus
++ * even if the PCMD page is empty, it should not be truncated.
++ *
++ * Context: Enclave mutex (&sgx_encl->lock) must be held.
++ * Return: 1 if the reclaimer is about to write to the PCMD page
++ *         0 if the reclaimer has no intention to write to the PCMD page
++ */
++static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
++                                   unsigned long start_addr)
++{
++      int reclaimed = 0;
++      int i;
++
++      /*
++       * PCMD_FIRST_MASK is based on number of PCMD entries within
++       * PCMD page being 32.
++       */
++      BUILD_BUG_ON(PCMDS_PER_PAGE != 32);
++
++      for (i = 0; i < PCMDS_PER_PAGE; i++) {
++              struct sgx_encl_page *entry;
++              unsigned long addr;
++
++              addr = start_addr + i * PAGE_SIZE;
++
++              /*
++               * Stop when reaching the SECS page - it does not
++               * have a page_array entry and its reclaim is
++               * started and completed with enclave mutex held so
++               * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
++               * flag.
++               */
++              if (addr == encl->base + encl->size)
++                      break;
++
++              entry = xa_load(&encl->page_array, PFN_DOWN(addr));
++              if (!entry)
++                      continue;
++
++              /*
++               * VA page slot ID uses same bit as the flag so it is important
++               * to ensure that the page is not already in backing store.
++               */
++              if (entry->epc_page &&
++                  (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
++                      reclaimed = 1;
++                      break;
++              }
++      }
++
++      return reclaimed;
++}
++
+ /*
+  * Calculate byte offset of a PCMD struct associated with an enclave page. 
PCMD's
+  * follow right after the EPC data in the backing storage. In addition to the
+@@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
+       unsigned long va_offset = encl_page->desc & 
SGX_ENCL_PAGE_VA_OFFSET_MASK;
+       struct sgx_encl *encl = encl_page->encl;
+       pgoff_t page_index, page_pcmd_off;
++      unsigned long pcmd_first_page;
+       struct sgx_pageinfo pginfo;
+       struct sgx_backing b;
+       bool pcmd_page_empty;
+@@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
+       else
+               page_index = PFN_DOWN(encl->size);
+ 
++      /*
++       * Address of enclave page using the first entry within the PCMD page.
++       */
++      pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;
++
+       page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
+ 
+       ret = sgx_encl_get_backing(encl, page_index, &b);
+@@ -84,6 +176,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
+       }
+ 
+       memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
++      set_page_dirty(b.pcmd);
+ 
+       /*
+        * The area for the PCMD in the page was zeroed above.  Check if the
+@@ -94,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_encl_page 
*encl_page,
+       kunmap_atomic(pcmd_page);
+       kunmap_atomic((void *)(unsigned long)pginfo.contents);
+ 
+-      sgx_encl_put_backing(&b, false);
++      get_page(b.pcmd);
++      sgx_encl_put_backing(&b);
+ 
+       sgx_encl_truncate_backing_page(encl, page_index);
+ 
+-      if (pcmd_page_empty)
++      if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, 
pcmd_first_page)) {
+               sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
++              pcmd_page = kmap_atomic(b.pcmd);
++              if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
++                      pr_warn("PCMD page not empty after truncate.\n");
++              kunmap_atomic(pcmd_page);
++      }
++
++      put_page(b.pcmd);
+ 
+       return ret;
+ }
+@@ -645,15 +746,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned 
long page_index,
+ /**
+  * sgx_encl_put_backing() - Unpin the backing storage
+  * @backing:  data for accessing backing storage for the page
+- * @do_write: mark pages dirty
+  */
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
++void sgx_encl_put_backing(struct sgx_backing *backing)
+ {
+-      if (do_write) {
+-              set_page_dirty(backing->pcmd);
+-              set_page_dirty(backing->contents);
+-      }
+-
+       put_page(backing->pcmd);
+       put_page(backing->contents);
+ }
+diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
+index fec43ca65065b..d44e7372151f0 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.h
++++ b/arch/x86/kernel/cpu/sgx/encl.h
+@@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref);
+ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
+ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+                        struct sgx_backing *backing);
+-void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
++void sgx_encl_put_backing(struct sgx_backing *backing);
+ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+                                 struct sgx_encl_page *page);
+ 
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
+index 8e4bc6453d263..ab4ec54bbdd94 100644
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -191,6 +191,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, 
void *va_slot,
+                         backing->pcmd_offset;
+ 
+       ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
++      set_page_dirty(backing->pcmd);
++      set_page_dirty(backing->contents);
+ 
+       kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
+                                             backing->pcmd_offset));
+@@ -308,6 +310,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page 
*epc_page,
+       sgx_encl_ewb(epc_page, backing);
+       encl_page->epc_page = NULL;
+       encl->secs_child_cnt--;
++      sgx_encl_put_backing(backing);
+ 
+       if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, 
&encl->flags)) {
+               ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
+@@ -320,7 +323,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page 
*epc_page,
+               sgx_encl_free_epc_page(encl->secs.epc_page);
+               encl->secs.epc_page = NULL;
+ 
+-              sgx_encl_put_backing(&secs_backing, true);
++              sgx_encl_put_backing(&secs_backing);
+       }
+ 
+ out:
+@@ -379,11 +382,14 @@ static void sgx_reclaim_pages(void)
+                       goto skip;
+ 
+               page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
++
++              mutex_lock(&encl_page->encl->lock);
+               ret = sgx_encl_get_backing(encl_page->encl, page_index, 
&backing[i]);
+-              if (ret)
++              if (ret) {
++                      mutex_unlock(&encl_page->encl->lock);
+                       goto skip;
++              }
+ 
+-              mutex_lock(&encl_page->encl->lock);
+               encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
+               mutex_unlock(&encl_page->encl->lock);
+               continue;
+@@ -411,7 +417,6 @@ skip:
+ 
+               encl_page = epc_page->owner;
+               sgx_reclaimer_write(epc_page, &backing[i]);
+-              sgx_encl_put_backing(&backing[i], true);
+ 
+               kref_put(&encl_page->encl->refcount, sgx_encl_release);
+               epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index e28ab0ecc5378..0fdc807ae13f8 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -14,6 +14,8 @@
+ #include <asm/traps.h>
+ #include <asm/irq_regs.h>
+ 
++#include <uapi/asm/kvm.h>
++
+ #include <linux/hardirq.h>
+ #include <linux/pkeys.h>
+ #include <linux/vmalloc.h>
+@@ -232,7 +234,20 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
+       gfpu->fpstate           = fpstate;
+       gfpu->xfeatures         = fpu_user_cfg.default_features;
+       gfpu->perm              = fpu_user_cfg.default_features;
+-      gfpu->uabi_size         = fpu_user_cfg.default_size;
++
++      /*
++       * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
++       * to userspace, even when XSAVE is unsupported, so that restoring FPU
++       * state on a different CPU that does support XSAVE can cleanly load
++       * the incoming state using its natural XSAVE.  In other words, KVM's
++       * uABI size may be larger than this host's default size.  Conversely,
++       * the default size should never be larger than KVM's base uABI size;
++       * all features that can expand the uABI size must be opt-in.
++       */
++      gfpu->uabi_size         = sizeof(struct kvm_xsave);
++      if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
++              gfpu->uabi_size = fpu_user_cfg.default_size;
++
+       fpu_init_guest_permissions(gfpu);
+ 
+       return true;
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 8b1c45c9cda87..1a55bf700f926 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -191,7 +191,7 @@ void kvm_async_pf_task_wake(u32 token)
+ {
+       u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+       struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+-      struct kvm_task_sleep_node *n;
++      struct kvm_task_sleep_node *n, *dummy = NULL;
+ 
+       if (token == ~0) {
+               apf_task_wake_all();
+@@ -203,28 +203,41 @@ again:
+       n = _find_apf_task(b, token);
+       if (!n) {
+               /*
+-               * async PF was not yet handled.
+-               * Add dummy entry for the token.
++               * Async #PF not yet handled, add a dummy entry for the token.
++               * Allocating the token must be down outside of the raw lock
++               * as the allocator is preemptible on PREEMPT_RT kernels.
+                */
+-              n = kzalloc(sizeof(*n), GFP_ATOMIC);
+-              if (!n) {
++              if (!dummy) {
++                      raw_spin_unlock(&b->lock);
++                      dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
++
+                       /*
+-                       * Allocation failed! Busy wait while other cpu
+-                       * handles async PF.
++                       * Continue looping on allocation failure, eventually
++                       * the async #PF will be handled and allocating a new
++                       * node will be unnecessary.
++                       */
++                      if (!dummy)
++                              cpu_relax();
++
++                      /*
++                       * Recheck for async #PF completion before enqueueing
++                       * the dummy token to avoid duplicate list entries.
+                        */
+-                      raw_spin_unlock(&b->lock);
+-                      cpu_relax();
+                       goto again;
+               }
+-              n->token = token;
+-              n->cpu = smp_processor_id();
+-              init_swait_queue_head(&n->wq);
+-              hlist_add_head(&n->link, &b->list);
++              dummy->token = token;
++              dummy->cpu = smp_processor_id();
++              init_swait_queue_head(&dummy->wq);
++              hlist_add_head(&dummy->link, &b->list);
++              dummy = NULL;
+       } else {
+               apf_task_wake_one(n);
+       }
+       raw_spin_unlock(&b->lock);
+-      return;
++
++      /* A dummy token might be allocated and ultimately not used.  */
++      if (dummy)
++              kfree(dummy);
+ }
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 45e1573f8f1d3..cf48ac96ceecb 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -1843,17 +1843,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
+         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
+               if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
+ 
+-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
++static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                        struct list_head *invalid_list)
+ {
+       int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
+ 
+-      if (ret < 0) {
++      if (ret < 0)
+               kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
+-              return false;
+-      }
+-
+-      return !!ret;
++      return ret;
+ }
+ 
+ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
+@@ -1975,7 +1972,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
+ 
+               for_each_sp(pages, sp, parents, i) {
+                       kvm_unlink_unsync_page(vcpu->kvm, sp);
+-                      flush |= kvm_sync_page(vcpu, sp, &invalid_list);
++                      flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
+                       mmu_pages_clear_parents(&parents);
+               }
+               if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
+@@ -2016,6 +2013,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
+       struct hlist_head *sp_list;
+       unsigned quadrant;
+       struct kvm_mmu_page *sp;
++      int ret;
+       int collisions = 0;
+       LIST_HEAD(invalid_list);
+ 
+@@ -2068,11 +2066,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
+                        * If the sync fails, the page is zapped.  If so, break
+                        * in order to rebuild it.
+                        */
+-                      if (!kvm_sync_page(vcpu, sp, &invalid_list))
++                      ret = kvm_sync_page(vcpu, sp, &invalid_list);
++                      if (ret < 0)
+                               break;
+ 
+                       WARN_ON(!list_empty(&invalid_list));
+-                      kvm_flush_remote_tlbs(vcpu->kvm);
++                      if (ret > 0)
++                              kvm_flush_remote_tlbs(vcpu->kvm);
+               }
+ 
+               __clear_sp_write_flooding_count(sp);
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index 01fee5f67ac37..beb3ce8d94eb3 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -144,42 +144,6 @@ static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, 
u64 gpte, int level)
+              FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
+ }
+ 
+-static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+-                             pt_element_t __user *ptep_user, unsigned index,
+-                             pt_element_t orig_pte, pt_element_t new_pte)
+-{
+-      signed char r;
+-
+-      if (!user_access_begin(ptep_user, sizeof(pt_element_t)))
+-              return -EFAULT;
+-
+-#ifdef CMPXCHG
+-      asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n"
+-                   "setnz %b[r]\n"
+-                   "2:"
+-                   _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+-                   : [ptr] "+m" (*ptep_user),
+-                     [old] "+a" (orig_pte),
+-                     [r] "=q" (r)
+-                   : [new] "r" (new_pte)
+-                   : "memory");
+-#else
+-      asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n"
+-                   "setnz %b[r]\n"
+-                   "2:"
+-                   _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %k[r])
+-                   : [ptr] "+m" (*ptep_user),
+-                     [old] "+A" (orig_pte),
+-                     [r] "=q" (r)
+-                   : [new_lo] "b" ((u32)new_pte),
+-                     [new_hi] "c" ((u32)(new_pte >> 32))
+-                   : "memory");
+-#endif
+-
+-      user_access_end();
+-      return r;
+-}
+-
+ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *sp, u64 *spte,
+                                 u64 gpte)
+@@ -278,7 +242,7 @@ static int FNAME(update_accessed_dirty_bits)(struct 
kvm_vcpu *vcpu,
+               if (unlikely(!walker->pte_writable[level - 1]))
+                       continue;
+ 
+-              ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, 
orig_pte, pte);
++              ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
+               if (ret)
+                       return ret;
+ 
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 96bab464967f2..1a9b60cb6bcb8 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -819,9 +819,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
+       struct kvm_host_map map;
+       int rc;
+ 
+-      /* Triple faults in L2 should never escape. */
+-      WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
+-
+       rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
+       if (rc) {
+               if (rc == -EINVAL)
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 7c392873626fd..4b7d490c0b639 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -688,7 +688,7 @@ static int sev_launch_measure(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
+               if (params.len > SEV_FW_BLOB_MAX_SIZE)
+                       return -EINVAL;
+ 
+-              blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
++              blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
+               if (!blob)
+                       return -ENOMEM;
+ 
+@@ -808,7 +808,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, 
unsigned long paddr,
+       if (!IS_ALIGNED(dst_paddr, 16) ||
+           !IS_ALIGNED(paddr,     16) ||
+           !IS_ALIGNED(size,      16)) {
+-              tpage = (void *)alloc_page(GFP_KERNEL);
++              tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!tpage)
+                       return -ENOMEM;
+ 
+@@ -1094,7 +1094,7 @@ static int sev_get_attestation_report(struct kvm *kvm, 
struct kvm_sev_cmd *argp)
+               if (params.len > SEV_FW_BLOB_MAX_SIZE)
+                       return -EINVAL;
+ 
+-              blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
++              blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
+               if (!blob)
+                       return -ENOMEM;
+ 
+@@ -1176,7 +1176,7 @@ static int sev_send_start(struct kvm *kvm, struct 
kvm_sev_cmd *argp)
+               return -EINVAL;
+ 
+       /* allocate the memory to hold the session data blob */
+-      session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
++      session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
+       if (!session_data)
+               return -ENOMEM;
+ 
+@@ -1300,11 +1300,11 @@ static int sev_send_update_data(struct kvm *kvm, 
struct kvm_sev_cmd *argp)
+ 
+       /* allocate memory for header and transport buffer */
+       ret = -ENOMEM;
+-      hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
++      hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
+       if (!hdr)
+               goto e_unpin;
+ 
+-      trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
++      trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
+       if (!trans_data)
+               goto e_free_hdr;
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 856c875638833..880d0b0c9315b 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4518,9 +4518,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 
vm_exit_reason,
+       /* trying to cancel vmlaunch/vmresume is a bug */
+       WARN_ON_ONCE(vmx->nested.nested_run_pending);
+ 
+-      /* Similarly, triple faults in L2 should never escape. */
+-      WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
+-
+       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+               /*
+                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 610355b9ccceb..982df9c000d31 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7856,7 +7856,7 @@ static unsigned int vmx_handle_intel_pt_intr(void)
+       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+ 
+       /* '0' on failure so that the !PT case can use a RET0 static call. */
+-      if (!kvm_arch_pmi_in_guest(vcpu))
++      if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
+               return 0;
+ 
+       kvm_make_request(KVM_REQ_PMI, vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4790f0d7d40b8..39c571224ac28 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7229,15 +7229,8 @@ static int emulator_write_emulated(struct 
x86_emulate_ctxt *ctxt,
+                                  exception, &write_emultor);
+ }
+ 
+-#define CMPXCHG_TYPE(t, ptr, old, new) \
+-      (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
+-
+-#ifdef CONFIG_X86_64
+-#  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
+-#else
+-#  define CMPXCHG64(ptr, old, new) \
+-      (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
+-#endif
++#define emulator_try_cmpxchg_user(t, ptr, old, new) \
++      (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault 
## t))
+ 
+ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
+                                    unsigned long addr,
+@@ -7246,12 +7239,11 @@ static int emulator_cmpxchg_emulated(struct 
x86_emulate_ctxt *ctxt,
+                                    unsigned int bytes,
+                                    struct x86_exception *exception)
+ {
+-      struct kvm_host_map map;
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       u64 page_line_mask;
++      unsigned long hva;
+       gpa_t gpa;
+-      char *kaddr;
+-      bool exchanged;
++      int r;
+ 
+       /* guests cmpxchg8b have to be emulated atomically */
+       if (bytes > 8 || (bytes & (bytes - 1)))
+@@ -7275,31 +7267,32 @@ static int emulator_cmpxchg_emulated(struct 
x86_emulate_ctxt *ctxt,
+       if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
+               goto emul_write;
+ 
+-      if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
++      hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
++      if (kvm_is_error_hva(hva))
+               goto emul_write;
+ 
+-      kaddr = map.hva + offset_in_page(gpa);
++      hva += offset_in_page(gpa);
+ 
+       switch (bytes) {
+       case 1:
+-              exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
++              r = emulator_try_cmpxchg_user(u8, hva, old, new);
+               break;
+       case 2:
+-              exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
++              r = emulator_try_cmpxchg_user(u16, hva, old, new);
+               break;
+       case 4:
+-              exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
++              r = emulator_try_cmpxchg_user(u32, hva, old, new);
+               break;
+       case 8:
+-              exchanged = CMPXCHG64(kaddr, old, new);
++              r = emulator_try_cmpxchg_user(u64, hva, old, new);
+               break;
+       default:
+               BUG();
+       }
+ 
+-      kvm_vcpu_unmap(vcpu, &map, true);
+-
+-      if (!exchanged)
++      if (r < 0)
++              goto emul_write;
++      if (r)
+               return X86EMUL_CMPXCHG_FAILED;
+ 
+       kvm_page_track_write(vcpu, gpa, new, bytes);
+@@ -8251,7 +8244,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+ 
+-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
+ {
+       if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
+           (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
+@@ -8320,25 +8313,23 @@ static bool is_vmware_backdoor_opcode(struct 
x86_emulate_ctxt *ctxt)
+ }
+ 
+ /*
+- * Decode to be emulated instruction. Return EMULATION_OK if success.
++ * Decode an instruction for emulation.  The caller is responsible for 
handling
++ * code breakpoints.  Note, manually detecting code breakpoints is unnecessary
++ * (and wrong) when emulating on an intercepted fault-like exception[*], as
++ * code breakpoints have higher priority and thus have already been done by
++ * hardware.
++ *
++ * [*] Except #MC, which is higher priority, but KVM should never emulate in
++ *     response to a machine check.
+  */
+ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+                                   void *insn, int insn_len)
+ {
+-      int r = EMULATION_OK;
+       struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
++      int r;
+ 
+       init_emulate_ctxt(vcpu);
+ 
+-      /*
+-       * We will reenter on the same instruction since we do not set
+-       * complete_userspace_io. This does not handle watchpoints yet,
+-       * those would be handled in the emulate_ops.
+-       */
+-      if (!(emulation_type & EMULTYPE_SKIP) &&
+-          kvm_vcpu_check_breakpoint(vcpu, &r))
+-              return r;
+-
+       r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
+ 
+       trace_kvm_emulate_insn_start(vcpu);
+@@ -8371,6 +8362,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, 
gpa_t cr2_or_gpa,
+       if (!(emulation_type & EMULTYPE_NO_DECODE)) {
+               kvm_clear_exception_queue(vcpu);
+ 
++              /*
++               * Return immediately if RIP hits a code breakpoint, such #DBs
++               * are fault-like and are higher priority than any faults on
++               * the code fetch itself.
++               */
++              if (!(emulation_type & EMULTYPE_SKIP) &&
++                  kvm_vcpu_check_code_breakpoint(vcpu, &r))
++                      return r;
++
+               r = x86_decode_emulated_instruction(vcpu, emulation_type,
+                                                   insn, insn_len);
+               if (r != EMULATION_OK)  {
+@@ -11747,20 +11747,15 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu 
*vcpu)
+       vcpu_put(vcpu);
+ }
+ 
+-static void kvm_free_vcpus(struct kvm *kvm)
++static void kvm_unload_vcpu_mmus(struct kvm *kvm)
+ {
+       unsigned long i;
+       struct kvm_vcpu *vcpu;
+ 
+-      /*
+-       * Unpin any mmu pages first.
+-       */
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_unload_vcpu_mmu(vcpu);
+       }
+-
+-      kvm_destroy_vcpus(kvm);
+ }
+ 
+ void kvm_arch_sync_events(struct kvm *kvm)
+@@ -11866,11 +11861,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+               __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
+               mutex_unlock(&kvm->slots_lock);
+       }
++      kvm_unload_vcpu_mmus(kvm);
+       static_call_cond(kvm_x86_vm_destroy)(kvm);
+       kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, 
&kvm->srcu, 1));
+       kvm_pic_destroy(kvm);
+       kvm_ioapic_destroy(kvm);
+-      kvm_free_vcpus(kvm);
++      kvm_destroy_vcpus(kvm);
+       kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+       kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 
1));
+       kvm_mmu_uninit_vm(kvm);
+diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
+index b32ffcaad9adf..f3c6b5e15e75b 100644
+--- a/crypto/ecrdsa.c
++++ b/crypto/ecrdsa.c
+@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
+ 
+       /* Step 1: verify that 0 < r < q, 0 < s < q */
+       if (vli_is_zero(r, ndigits) ||
+-          vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
++          vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
+           vli_is_zero(s, ndigits) ||
+-          vli_cmp(s, ctx->curve->n, ndigits) == 1)
++          vli_cmp(s, ctx->curve->n, ndigits) >= 0)
+               return -EKEYREJECTED;
+ 
+       /* Step 2: calculate hash (h) of the message (passed as input) */
+       /* Step 3: calculate e = h \mod q */
+       vli_from_le64(e, digest, ndigits);
+-      if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
++      if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
+               vli_sub(e, e, ctx->curve->n, ndigits);
+       if (vli_is_zero(e, ndigits))
+               e[0] = 1;
+@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
+       /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
+       ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
+                             ctx->curve);
+-      if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
++      if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
+               vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
+ 
+       /* Step 7: if R == r signature is valid */
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index f6e91fb432a3b..eab34e24d9446 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu)
+       skb_queue_purge(&qca->tx_wait_q);
+       skb_queue_purge(&qca->txq);
+       skb_queue_purge(&qca->rx_memdump_q);
+-      del_timer(&qca->tx_idle_timer);
+-      del_timer(&qca->wake_retrans_timer);
+       destroy_workqueue(qca->workqueue);
++      del_timer_sync(&qca->tx_idle_timer);
++      del_timer_sync(&qca->wake_retrans_timer);
+       qca->hu = NULL;
+ 
+       kfree_skb(qca->rx_skb);
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 4704fa553098b..04a3e23a4afc7 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 
property_id,  u32 *value,
+       if (!rc) {
+               out = (struct tpm2_get_cap_out *)
+                       &buf.data[TPM_HEADER_SIZE];
+-              *value = be32_to_cpu(out->value);
++              /*
++               * To prevent failing boot up of some systems, Infineon TPM2.0
++               * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
++               * the TPM2_Getcapability command returns a zero length list
++               * in field upgrade mode.
++               */
++              if (be32_to_cpu(out->property_cnt) > 0)
++                      *value = be32_to_cpu(out->value);
++              else
++                      rc = -ENODATA;
+       }
+       tpm_buf_destroy(&buf);
+       return rc;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 3af4c07a9342f..d3989b257f422 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+       if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+                               ibmvtpm->rtce_buf != NULL,
+                               HZ)) {
++              rc = -ENODEV;
+               dev_err(dev, "CRQ response timed out\n");
+               goto init_irq_cleanup;
+       }
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index ca0361b2dbb07..f87aa2169e5f5 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version 
*mc_version, u32 major,
+ }
+ #endif
+ 
++static bool needs_entropy_delay_adjustment(void)
++{
++      if (of_machine_is_compatible("fsl,imx6sx"))
++              return true;
++      return false;
++}
++
+ /* Probe routine for CAAM top (controller) level */
+ static int caam_probe(struct platform_device *pdev)
+ {
+@@ -855,6 +862,8 @@ static int caam_probe(struct platform_device *pdev)
+                        * Also, if a handle was instantiated, do not change
+                        * the TRNG parameters.
+                        */
++                      if (needs_entropy_delay_adjustment())
++                              ent_delay = 12000;
+                       if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+                               dev_info(dev,
+                                        "Entropy delay = %u\n",
+@@ -871,6 +880,15 @@ static int caam_probe(struct platform_device *pdev)
+                        */
+                       ret = instantiate_rng(dev, inst_handles,
+                                             gen_sk);
++                      /*
++                       * Entropy delay is determined via TRNG 
characterization.
++                       * TRNG characterization is run across different 
voltages
++                       * and temperatures.
++                       * If worst case value for ent_dly is identified,
++                       * the loop can be skipped for that platform.
++                       */
++                      if (needs_entropy_delay_adjustment())
++                              break;
+                       if (ret == -EAGAIN)
+                               /*
+                                * if here, the loop will rerun,
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h 
b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+index a03c6cf723312..dfa7ee41c5e9c 100644
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+@@ -152,9 +152,9 @@ struct adf_pfvf_ops {
+       int (*enable_comms)(struct adf_accel_dev *accel_dev);
+       u32 (*get_pf2vf_offset)(u32 i);
+       u32 (*get_vf2pf_offset)(u32 i);
+-      u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
+       void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+       void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
++      u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
+       int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message 
msg,
+                       u32 pfvf_offset, struct mutex *csr_lock);
+       struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c 
b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+index 1a9072aac2ca9..def4cc8e1039a 100644
+--- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
++++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
+@@ -13,6 +13,7 @@
+ #include "adf_pfvf_utils.h"
+ 
+  /* VF2PF interrupts */
++#define ADF_GEN2_VF_MSK                       0xFFFF
+ #define ADF_GEN2_ERR_REG_VF2PF(vf_src)        (((vf_src) & 0x01FFFE00) >> 9)
+ #define ADF_GEN2_ERR_MSK_VF2PF(vf_mask)       (((vf_mask) & 0xFFFF) << 9)
+ 
+@@ -50,23 +51,6 @@ static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+       return ADF_GEN2_VF_PF2VF_OFFSET;
+ }
+ 
+-static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
+-{
+-      u32 errsou3, errmsk3, vf_int_mask;
+-
+-      /* Get the interrupt sources triggered by VFs */
+-      errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+-      vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+-
+-      /* To avoid adding duplicate entries to work queue, clear
+-       * vf_int_mask_sets bits that are already masked in ERRMSK register.
+-       */
+-      errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+-      vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+-
+-      return vf_int_mask;
+-}
+-
+ static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+                                            u32 vf_mask)
+ {
+@@ -89,6 +73,44 @@ static void adf_gen2_disable_vf2pf_interrupts(void __iomem 
*pmisc_addr,
+       }
+ }
+ 
++static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
++{
++      u32 sources, disabled, pending;
++      u32 errsou3, errmsk3;
++
++      /* Get the interrupt sources triggered by VFs */
++      errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
++      sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
++
++      if (!sources)
++              return 0;
++
++      /* Get the already disabled interrupts */
++      errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
++      disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
++
++      pending = sources & ~disabled;
++      if (!pending)
++              return 0;
++
++      /* Due to HW limitations, when disabling the interrupts, we can't
++       * just disable the requested sources, as this would lead to missed
++       * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
++       * To work around it, disable all and re-enable only the sources that
++       * are not in vf_mask and were not already disabled. Re-enabling will
++       * trigger a new interrupt for the sources that have changed in the
++       * meantime, if any.
++       */
++      errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
++
++      errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
++
++      /* Return the sources of the (new) interrupt(s) */
++      return pending;
++}
++
+ static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+ {
+       return ADF_PFVF_INT << offset;
+@@ -362,9 +384,9 @@ void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops 
*pfvf_ops)
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+-      pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+       pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
++      pfvf_ops->disable_pending_vf2pf_interrupts = 
adf_gen2_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+       pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+ }
+diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c 
b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+index d80d493a77568..40fdab857f959 100644
+--- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
++++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
+@@ -15,6 +15,7 @@
+ /* VF2PF interrupt source registers */
+ #define ADF_4XXX_VM2PF_SOU            0x41A180
+ #define ADF_4XXX_VM2PF_MSK            0x41A1C0
++#define ADF_GEN4_VF_MSK                       0xFFFF
+ 
+ #define ADF_PFVF_GEN4_MSGTYPE_SHIFT   2
+ #define ADF_PFVF_GEN4_MSGTYPE_MASK    0x3F
+@@ -36,16 +37,6 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+       return ADF_4XXX_VM2PF_OFFSET(i);
+ }
+ 
+-static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
+-{
+-      u32 sou, mask;
+-
+-      sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+-      mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
+-
+-      return sou & ~mask;
+-}
+-
+ static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+                                            u32 vf_mask)
+ {
+@@ -64,6 +55,37 @@ static void adf_gen4_disable_vf2pf_interrupts(void __iomem 
*pmisc_addr,
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+ }
+ 
++static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
++{
++      u32 sources, disabled, pending;
++
++      /* Get the interrupt sources triggered by VFs */
++      sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
++      if (!sources)
++              return 0;
++
++      /* Get the already disabled interrupts */
++      disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
++
++      pending = sources & ~disabled;
++      if (!pending)
++              return 0;
++
++      /* Due to HW limitations, when disabling the interrupts, we can't
++       * just disable the requested sources, as this would lead to missed
++       * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
++       * To work around it, disable all and re-enable only the sources that
++       * are not in vf_mask and were not already disabled. Re-enabling will
++       * trigger a new interrupt for the sources that have changed in the
++       * meantime, if any.
++       */
++      ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
++      ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
++
++      /* Return the sources of the (new) interrupt(s) */
++      return pending;
++}
++
+ static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+                             struct pfvf_message msg, u32 pfvf_offset,
+                             struct mutex *csr_lock)
+@@ -115,9 +137,9 @@ void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops 
*pfvf_ops)
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+-      pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+       pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts;
++      pfvf_ops->disable_pending_vf2pf_interrupts = 
adf_gen4_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen4_pfvf_send;
+       pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+ }
+diff --git a/drivers/crypto/qat/qat_common/adf_isr.c 
b/drivers/crypto/qat/qat_common/adf_isr.c
+index a35149f8bf1ee..23f7fff32c642 100644
+--- a/drivers/crypto/qat/qat_common/adf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_isr.c
+@@ -76,32 +76,29 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev 
*accel_dev, u32 vf_mask)
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+ }
+ 
+-static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+-                                           u32 vf_mask)
++static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev 
*accel_dev)
+ {
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
++      u32 pending;
+ 
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+-      GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
++      pending = 
GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
++
++      return pending;
+ }
+ 
+ static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+ {
+-      void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       bool irq_handled = false;
+       unsigned long vf_mask;
+ 
+-      /* Get the interrupt sources triggered by VFs */
+-      vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr);
+-
++      /* Get the interrupt sources triggered by VFs, except for those already 
disabled */
++      vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
+       if (vf_mask) {
+               struct adf_accel_vf_info *vf_info;
+               int i;
+ 
+-              /* Disable VF2PF interrupts for VFs with pending ints */
+-              adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
+-
+               /*
+                * Handle VF2PF interrupt unless the VF is malicious and
+                * is attempting to flood the host OS with VF2PF interrupts.
+diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c 
b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+index 09599fe4d2f3f..1e7bed8b011fe 100644
+--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
++++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+@@ -7,6 +7,8 @@
+ #include "adf_dh895xcc_hw_data.h"
+ #include "icp_qat_hw.h"
+ 
++#define ADF_DH895XCC_VF_MSK   0xFFFFFFFF
++
+ /* Worker thread to service arbiter mappings */
+ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+@@ -114,29 +116,6 @@ static void adf_enable_ints(struct adf_accel_dev 
*accel_dev)
+                  ADF_DH895XCC_SMIA1_MASK);
+ }
+ 
+-static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
+-{
+-      u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask;
+-
+-      /* Get the interrupt sources triggered by VFs */
+-      errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3);
+-      vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3);
+-
+-      /* To avoid adding duplicate entries to work queue, clear
+-       * vf_int_mask_sets bits that are already masked in ERRMSK register.
+-       */
+-      errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3);
+-      vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3);
+-
+-      /* Do the same for ERRSOU5 */
+-      errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
+-      errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
+-      vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+-      vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+-
+-      return vf_int_mask;
+-}
+-
+ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+ {
+       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+@@ -150,7 +129,6 @@ static void enable_vf2pf_interrupts(void __iomem 
*pmisc_addr, u32 vf_mask)
+       if (vf_mask >> 16) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+-
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+       }
+ }
+@@ -173,6 +151,54 @@ static void disable_vf2pf_interrupts(void __iomem 
*pmisc_addr, u32 vf_mask)
+       }
+ }
+ 
++static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
++{
++      u32 sources, pending, disabled;
++      u32 errsou3, errmsk3;
++      u32 errsou5, errmsk5;
++
++      /* Get the interrupt sources triggered by VFs */
++      errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
++      errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
++      sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
++                | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
++
++      if (!sources)
++              return 0;
++
++      /* Get the already disabled interrupts */
++      errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
++      errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
++      disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
++                 | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
++
++      pending = sources & ~disabled;
++      if (!pending)
++              return 0;
++
++      /* Due to HW limitations, when disabling the interrupts, we can't
++       * just disable the requested sources, as this would lead to missed
++       * interrupts if sources changes just before writing to ERRMSK3 and
++       * ERRMSK5.
++       * To work around it, disable all and re-enable only the sources that
++       * are not in vf_mask and were not already disabled. Re-enabling will
++       * trigger a new interrupt for the sources that have changed in the
++       * meantime, if any.
++       */
++      errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
++      errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
++
++      errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
++      errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
++      ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
++
++      /* Return the sources of the (new) interrupt(s) */
++      return pending;
++}
++
+ static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool 
enable)
+ {
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+@@ -220,9 +246,9 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data 
*hw_data)
+       hw_data->disable_iov = adf_disable_sriov;
+ 
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+-      hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources;
+       hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+       hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts;
++      hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = 
disable_pending_vf2pf_interrupts;
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 9333f732cda8e..5167d63010b99 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2859,7 +2859,7 @@ static void ilk_compute_wm_level(const struct 
drm_i915_private *dev_priv,
+ }
+ 
+ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+-                                u16 wm[8])
++                                u16 wm[])
+ {
+       struct intel_uncore *uncore = &dev_priv->uncore;
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 053853a891c50..c297c63f3ec5c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -768,6 +768,7 @@
+ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
+ #define USB_DEVICE_ID_LENOVO_X1_TAB   0x60a3
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3  0x60b5
++#define USB_DEVICE_ID_LENOVO_X12_TAB  0x60fe
+ #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E   0x600e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D    0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019    0x6019
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 99eabfb4145b5..6bb3890b0f2c9 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2034,6 +2034,12 @@ static const struct hid_device_id mt_devices[] = {
+                          USB_VENDOR_ID_LENOVO,
+                          USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ 
++      /* Lenovo X12 TAB Gen 1 */
++      { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++              HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++                         USB_VENDOR_ID_LENOVO,
++                         USB_DEVICE_ID_LENOVO_X12_TAB) },
++
+       /* MosArt panels */
+       { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+               MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
+@@ -2178,6 +2184,9 @@ static const struct hid_device_id mt_devices[] = {
+       { .driver_data = MT_CLS_GOOGLE,
+               HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
+                       USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
++      { .driver_data = MT_CLS_GOOGLE,
++              HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, 
USB_VENDOR_ID_GOOGLE,
++                      USB_DEVICE_ID_GOOGLE_WHISKERS) },
+ 
+       /* Generic MT device */
+       { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) 
},
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index c16157ee8c520..6078fa0c0d488 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -528,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ 
+       case I2C_SMBUS_BLOCK_PROC_CALL:
+               dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n");
++              if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
++                      return -EINVAL;
++
+               dma_size = I2C_SMBUS_BLOCK_MAX;
+               desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1);
+               desc->wr_len_cmd = data->block[0] + 1;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index fb80539865d7c..159c6806c19b8 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -3439,6 +3439,11 @@ static int crypt_map(struct dm_target *ti, struct bio 
*bio)
+       return DM_MAPIO_SUBMITTED;
+ }
+ 
++static char hex2asc(unsigned char c)
++{
++      return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
++}
++
+ static void crypt_status(struct dm_target *ti, status_type_t type,
+                        unsigned status_flags, char *result, unsigned maxlen)
+ {
+@@ -3457,9 +3462,12 @@ static void crypt_status(struct dm_target *ti, 
status_type_t type,
+               if (cc->key_size > 0) {
+                       if (cc->key_string)
+                               DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+-                      else
+-                              for (i = 0; i < cc->key_size; i++)
+-                                      DMEMIT("%02x", cc->key[i]);
++                      else {
++                              for (i = 0; i < cc->key_size; i++) {
++                                      DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
++                                             hex2asc(cc->key[i] & 0xf));
++                              }
++                      }
+               } else
+                       DMEMIT("-");
+ 
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 36ae30b73a6e0..3d5a0ce123c90 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4494,8 +4494,6 @@ try_smaller_buffer:
+       }
+ 
+       if (should_write_sb) {
+-              int r;
+-
+               init_journal(ic, 0, ic->journal_sections, 0);
+               r = dm_integrity_failed(ic);
+               if (unlikely(r)) {
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index 0e039a8c0bf2e..a3f2050b9c9b4 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -225,6 +225,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
+                                      atomic_read(&shared->in_flight[READ]),
+                                      atomic_read(&shared->in_flight[WRITE]));
+                       }
++                      cond_resched();
+               }
+               dm_stat_free(&s->rcu_head);
+       }
+@@ -330,6 +331,7 @@ static int dm_stats_create(struct dm_stats *stats, 
sector_t start, sector_t end,
+       for (ni = 0; ni < n_entries; ni++) {
+               atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+               atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
++              cond_resched();
+       }
+ 
+       if (s->n_histogram_entries) {
+@@ -342,6 +344,7 @@ static int dm_stats_create(struct dm_stats *stats, 
sector_t start, sector_t end,
+               for (ni = 0; ni < n_entries; ni++) {
+                       s->stat_shared[ni].tmp.histogram = hi;
+                       hi += s->n_histogram_entries + 1;
++                      cond_resched();
+               }
+       }
+ 
+@@ -362,6 +365,7 @@ static int dm_stats_create(struct dm_stats *stats, 
sector_t start, sector_t end,
+                       for (ni = 0; ni < n_entries; ni++) {
+                               p[ni].histogram = hi;
+                               hi += s->n_histogram_entries + 1;
++                              cond_resched();
+                       }
+               }
+       }
+@@ -497,6 +501,7 @@ static int dm_stats_list(struct dm_stats *stats, const 
char *program,
+                       }
+                       DMEMIT("\n");
+               }
++              cond_resched();
+       }
+       mutex_unlock(&stats->mutex);
+ 
+@@ -774,6 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t 
idx_start, size_t idx_end,
+                               local_irq_enable();
+                       }
+               }
++              cond_resched();
+       }
+ }
+ 
+@@ -889,6 +895,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
+ 
+               if (unlikely(sz + 1 >= maxlen))
+                       goto buffer_overflow;
++
++              cond_resched();
+       }
+ 
+       if (clear)
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 80133aae0db37..d6dbd47492a85 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -1312,6 +1312,7 @@ bad:
+ 
+ static struct target_type verity_target = {
+       .name           = "verity",
++      .features       = DM_TARGET_IMMUTABLE,
+       .version        = {1, 8, 0},
+       .module         = THIS_MODULE,
+       .ctr            = verity_ctr,
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 351d341a1ffa4..d6ce5a09fd358 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *conf)
+       return degraded;
+ }
+ 
+-static int has_failed(struct r5conf *conf)
++static bool has_failed(struct r5conf *conf)
+ {
+-      int degraded;
++      int degraded = conf->mddev->degraded;
+ 
+-      if (conf->mddev->reshape_position == MaxSector)
+-              return conf->mddev->degraded > conf->max_degraded;
++      if (test_bit(MD_BROKEN, &conf->mddev->flags))
++              return true;
+ 
+-      degraded = raid5_calc_degraded(conf);
+-      if (degraded > conf->max_degraded)
+-              return 1;
+-      return 0;
++      if (conf->mddev->reshape_position != MaxSector)
++              degraded = raid5_calc_degraded(conf);
++
++      return degraded > conf->max_degraded;
+ }
+ 
+ struct stripe_head *
+@@ -2863,34 +2863,31 @@ static void raid5_error(struct mddev *mddev, struct 
md_rdev *rdev)
+       unsigned long flags;
+       pr_debug("raid456: error called\n");
+ 
++      pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
++              mdname(mddev), bdevname(rdev->bdev, b));
++
+       spin_lock_irqsave(&conf->device_lock, flags);
++      set_bit(Faulty, &rdev->flags);
++      clear_bit(In_sync, &rdev->flags);
++      mddev->degraded = raid5_calc_degraded(conf);
+ 
+-      if (test_bit(In_sync, &rdev->flags) &&
+-          mddev->degraded == conf->max_degraded) {
+-              /*
+-               * Don't allow to achieve failed state
+-               * Don't try to recover this device
+-               */
++      if (has_failed(conf)) {
++              set_bit(MD_BROKEN, &conf->mddev->flags);
+               conf->recovery_disabled = mddev->recovery_disabled;
+-              spin_unlock_irqrestore(&conf->device_lock, flags);
+-              return;
++
++              pr_crit("md/raid:%s: Cannot continue operation (%d/%d 
failed).\n",
++                      mdname(mddev), mddev->degraded, conf->raid_disks);
++      } else {
++              pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
++                      mdname(mddev), conf->raid_disks - mddev->degraded);
+       }
+ 
+-      set_bit(Faulty, &rdev->flags);
+-      clear_bit(In_sync, &rdev->flags);
+-      mddev->degraded = raid5_calc_degraded(conf);
+       spin_unlock_irqrestore(&conf->device_lock, flags);
+       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 
+       set_bit(Blocked, &rdev->flags);
+       set_mask_bits(&mddev->sb_flags, 0,
+                     BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+-      pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+-              "md/raid:%s: Operation continuing on %d devices.\n",
+-              mdname(mddev),
+-              bdevname(rdev->bdev, b),
+-              mdname(mddev),
+-              conf->raid_disks - mddev->degraded);
+       r5c_update_on_rdev_error(mddev, rdev);
+ }
+ 
+diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
+index be3f6ea555597..84279a6808730 100644
+--- a/drivers/media/i2c/imx412.c
++++ b/drivers/media/i2c/imx412.c
+@@ -1011,7 +1011,7 @@ static int imx412_power_on(struct device *dev)
+       struct imx412 *imx412 = to_imx412(sd);
+       int ret;
+ 
+-      gpiod_set_value_cansleep(imx412->reset_gpio, 1);
++      gpiod_set_value_cansleep(imx412->reset_gpio, 0);
+ 
+       ret = clk_prepare_enable(imx412->inclk);
+       if (ret) {
+@@ -1024,7 +1024,7 @@ static int imx412_power_on(struct device *dev)
+       return 0;
+ 
+ error_reset:
+-      gpiod_set_value_cansleep(imx412->reset_gpio, 0);
++      gpiod_set_value_cansleep(imx412->reset_gpio, 1);
+ 
+       return ret;
+ }
+@@ -1040,10 +1040,10 @@ static int imx412_power_off(struct device *dev)
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct imx412 *imx412 = to_imx412(sd);
+ 
+-      gpiod_set_value_cansleep(imx412->reset_gpio, 0);
+-
+       clk_disable_unprepare(imx412->inclk);
+ 
++      gpiod_set_value_cansleep(imx412->reset_gpio, 1);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
+index cea7b2e2ce969..53764f3c0c7e4 100644
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, 
u32 count,
+                */
+               if (data->endpoint.config.aggregation) {
+                       limit += SZ_1K * aggr_byte_limit_max(ipa->version);
+-                      if (buffer_size > limit) {
++                      if (buffer_size - NET_SKB_PAD > limit) {
+                               dev_err(dev, "RX buffer size too large for 
aggregated RX endpoint %u (%u > %u)\n",
+-                                      data->endpoint_id, buffer_size, limit);
++                                      data->endpoint_id,
++                                      buffer_size - NET_SKB_PAD, limit);
+ 
+                               return false;
+                       }
+@@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint 
*endpoint)
+       if (endpoint->data->aggregation) {
+               if (!endpoint->toward_ipa) {
+                       const struct ipa_endpoint_rx_data *rx_data;
++                      u32 buffer_size;
+                       bool close_eof;
+                       u32 limit;
+ 
+@@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint 
*endpoint)
+                       val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
+                       val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ 
+-                      limit = ipa_aggr_size_kb(rx_data->buffer_size);
++                      buffer_size = rx_data->buffer_size;
++                      limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
+                       val |= aggr_byte_limit_encoded(version, limit);
+ 
+                       limit = IPA_AGGR_TIME_LIMIT;
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index 03f1423071749..9f42f25fab920 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode, unsigned int 
clu, bool sync)
+       struct super_block *sb = inode->i_sb;
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ 
+-      WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++      if (!is_valid_cluster(sbi, clu))
++              return -EINVAL;
++
+       ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+       i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+       b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int 
clu, bool sync)
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+       struct exfat_mount_options *opts = &sbi->options;
+ 
+-      WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++      if (!is_valid_cluster(sbi, clu))
++              return;
++
+       ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+       i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+       b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index c6800b8809203..42d06c68d5c5e 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -381,6 +381,12 @@ static inline int exfat_sector_to_cluster(struct 
exfat_sb_info *sbi,
+               EXFAT_RESERVED_CLUSTERS;
+ }
+ 
++static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
++              unsigned int clus)
++{
++      return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
++}
++
+ /* super.c */
+ int exfat_set_volume_dirty(struct super_block *sb);
+ int exfat_clear_volume_dirty(struct super_block *sb);
+diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
+index a3464e56a7e16..421c273531049 100644
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -81,12 +81,6 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
+       return 0;
+ }
+ 
+-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
+-              unsigned int clus)
+-{
+-      return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
+-}
+-
+ int exfat_ent_get(struct super_block *sb, unsigned int loc,
+               unsigned int *content)
+ {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 7eefa16ed381b..8f8cd6e2d4dbc 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -841,6 +841,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
+       case 0:
+       case -ERESTARTSYS:
+       case -EINTR:
++      case -ENOMEM:
+               return false;
+       }
+       return nfs_error_is_fatal(err);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 234e852fcdfad..d6e1f95ccfd8a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7330,16 +7330,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+               if (sop->so_is_open_owner || !same_owner_str(sop, owner))
+                       continue;
+ 
+-              /* see if there are still any locks associated with it */
+-              lo = lockowner(sop);
+-              list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
+-                      if (check_for_locks(stp->st_stid.sc_file, lo)) {
+-                              status = nfserr_locks_held;
+-                              spin_unlock(&clp->cl_lock);
+-                              return status;
+-                      }
++              if (atomic_read(&sop->so_count) != 1) {
++                      spin_unlock(&clp->cl_lock);
++                      return nfserr_locks_held;
+               }
+ 
++              lo = lockowner(sop);
+               nfs4_get_stateowner(sop);
+               break;
+       }
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 278dcf5024102..b2b54c4553f91 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -668,9 +668,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb)
+ 
+ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
+ {
+-      return boot->sectors_per_clusters <= 0x80
+-                     ? boot->sectors_per_clusters
+-                     : (1u << (0 - boot->sectors_per_clusters));
++      if (boot->sectors_per_clusters <= 0x80)
++              return boot->sectors_per_clusters;
++      if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
++              return 1U << (0 - boot->sectors_per_clusters);
++      return -EINVAL;
+ }
+ 
+ /*
+@@ -713,6 +715,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 
sector_size,
+ 
+       /* cluster size: 512, 1K, 2K, 4K, ... 2M */
+       sct_per_clst = true_sectors_per_clst(boot);
++      if ((int)sct_per_clst < 0)
++              goto out;
+       if (!is_power_of_2(sct_per_clst))
+               goto out;
+ 
+diff --git a/fs/pipe.c b/fs/pipe.c
+index e140ea150bbb1..74ae9fafd25a1 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -653,7 +653,7 @@ pipe_poll(struct file *filp, poll_table *wait)
+       unsigned int head, tail;
+ 
+       /* Epoll has some historical nasty semantics, this enables them */
+-      pipe->poll_usage = 1;
++      WRITE_ONCE(pipe->poll_usage, true);
+ 
+       /*
+        * Reading pipe state only -- no need for acquiring the semaphore.
+@@ -1245,30 +1245,33 @@ unsigned int round_pipe_size(unsigned long size)
+ 
+ /*
+  * Resize the pipe ring to a number of slots.
++ *
++ * Note the pipe can be reduced in capacity, but only if the current
++ * occupancy doesn't exceed nr_slots; if it does, EBUSY will be
++ * returned instead.
+  */
+ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
+ {
+       struct pipe_buffer *bufs;
+       unsigned int head, tail, mask, n;
+ 
+-      /*
+-       * We can shrink the pipe, if arg is greater than the ring occupancy.
+-       * Since we don't expect a lot of shrink+grow operations, just free and
+-       * allocate again like we would do for growing.  If the pipe currently
+-       * contains more buffers than arg, then return busy.
+-       */
+-      mask = pipe->ring_size - 1;
+-      head = pipe->head;
+-      tail = pipe->tail;
+-      n = pipe_occupancy(pipe->head, pipe->tail);
+-      if (nr_slots < n)
+-              return -EBUSY;
+-
+       bufs = kcalloc(nr_slots, sizeof(*bufs),
+                      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+       if (unlikely(!bufs))
+               return -ENOMEM;
+ 
++      spin_lock_irq(&pipe->rd_wait.lock);
++      mask = pipe->ring_size - 1;
++      head = pipe->head;
++      tail = pipe->tail;
++
++      n = pipe_occupancy(head, tail);
++      if (nr_slots < n) {
++              spin_unlock_irq(&pipe->rd_wait.lock);
++              kfree(bufs);
++              return -EBUSY;
++      }
++
+       /*
+        * The pipe array wraps around, so just start the new one at zero
+        * and adjust the indices.
+@@ -1300,6 +1303,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, 
unsigned int nr_slots)
+       pipe->tail = tail;
+       pipe->head = head;
+ 
++      spin_unlock_irq(&pipe->rd_wait.lock);
++
+       /* This might have made more room for writers */
+       wake_up_interruptible(&pipe->wr_wait);
+       return 0;
+diff --git a/include/linux/bpf_local_storage.h 
b/include/linux/bpf_local_storage.h
+index 493e632584970..7ea18d4da84b8 100644
+--- a/include/linux/bpf_local_storage.h
++++ b/include/linux/bpf_local_storage.h
+@@ -143,9 +143,9 @@ void bpf_selem_link_storage_nolock(struct 
bpf_local_storage *local_storage,
+ 
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+                                    struct bpf_local_storage_elem *selem,
+-                                   bool uncharge_omem);
++                                   bool uncharge_omem, bool use_trace_rcu);
+ 
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool 
use_trace_rcu);
+ 
+ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+                       struct bpf_local_storage_elem *selem);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index c00c618ef290d..cb0fd633a6106 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -71,7 +71,7 @@ struct pipe_inode_info {
+       unsigned int files;
+       unsigned int r_counter;
+       unsigned int w_counter;
+-      unsigned int poll_usage;
++      bool poll_usage;
+       struct page *tmp_page;
+       struct fasync_struct *fasync_readers;
+       struct fasync_struct *fasync_writers;
+diff --git a/include/net/netfilter/nf_conntrack_core.h 
b/include/net/netfilter/nf_conntrack_core.h
+index 13807ea94cd2b..2d524782f53b7 100644
+--- a/include/net/netfilter/nf_conntrack_core.h
++++ b/include/net/netfilter/nf_conntrack_core.h
+@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
+       int ret = NF_ACCEPT;
+ 
+       if (ct) {
+-              if (!nf_ct_is_confirmed(ct))
++              if (!nf_ct_is_confirmed(ct)) {
+                       ret = __nf_conntrack_confirm(skb);
++
++                      if (ret == NF_ACCEPT)
++                              ct = (struct nf_conn *)skb_nfct(skb);
++              }
++
+               if (likely(ret == NF_ACCEPT))
+                       nf_ct_deliver_cached_events(ct);
+       }
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
+index 96be8d518885c..10424a1cda51d 100644
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -90,7 +90,7 @@ void bpf_inode_storage_free(struct inode *inode)
+                */
+               bpf_selem_unlink_map(selem);
+               free_inode_storage = bpf_selem_unlink_storage_nolock(
+-                      local_storage, selem, false);
++                      local_storage, selem, false, false);
+       }
+       raw_spin_unlock_bh(&local_storage->lock);
+       rcu_read_unlock();
+@@ -149,7 +149,7 @@ static int inode_storage_delete(struct inode *inode, 
struct bpf_map *map)
+       if (!sdata)
+               return -ENOENT;
+ 
+-      bpf_selem_unlink(SELEM(sdata));
++      bpf_selem_unlink(SELEM(sdata), true);
+ 
+       return 0;
+ }
+diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
+index 01aa2b51ec4df..8ce40fd869f6a 100644
+--- a/kernel/bpf/bpf_local_storage.c
++++ b/kernel/bpf/bpf_local_storage.c
+@@ -106,7 +106,7 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
+  */
+ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+                                    struct bpf_local_storage_elem *selem,
+-                                   bool uncharge_mem)
++                                   bool uncharge_mem, bool use_trace_rcu)
+ {
+       struct bpf_local_storage_map *smap;
+       bool free_local_storage;
+@@ -150,11 +150,16 @@ bool bpf_selem_unlink_storage_nolock(struct 
bpf_local_storage *local_storage,
+           SDATA(selem))
+               RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
+ 
+-      call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
++      if (use_trace_rcu)
++              call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
++      else
++              kfree_rcu(selem, rcu);
++
+       return free_local_storage;
+ }
+ 
+-static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
++static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
++                                     bool use_trace_rcu)
+ {
+       struct bpf_local_storage *local_storage;
+       bool free_local_storage = false;
+@@ -169,12 +174,16 @@ static void __bpf_selem_unlink_storage(struct 
bpf_local_storage_elem *selem)
+       raw_spin_lock_irqsave(&local_storage->lock, flags);
+       if (likely(selem_linked_to_storage(selem)))
+               free_local_storage = bpf_selem_unlink_storage_nolock(
+-                      local_storage, selem, true);
++                      local_storage, selem, true, use_trace_rcu);
+       raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+ 
+-      if (free_local_storage)
+-              call_rcu_tasks_trace(&local_storage->rcu,
++      if (free_local_storage) {
++              if (use_trace_rcu)
++                      call_rcu_tasks_trace(&local_storage->rcu,
+                                    bpf_local_storage_free_rcu);
++              else
++                      kfree_rcu(local_storage, rcu);
++      }
+ }
+ 
+ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+@@ -214,14 +223,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map 
*smap,
+       raw_spin_unlock_irqrestore(&b->lock, flags);
+ }
+ 
+-void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
++void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool 
use_trace_rcu)
+ {
+       /* Always unlink from map before unlinking from local_storage
+        * because selem will be freed after successfully unlinked from
+        * the local_storage.
+        */
+       bpf_selem_unlink_map(selem);
+-      __bpf_selem_unlink_storage(selem);
++      __bpf_selem_unlink_storage(selem, use_trace_rcu);
+ }
+ 
+ struct bpf_local_storage_data *
+@@ -466,7 +475,7 @@ bpf_local_storage_update(void *owner, struct 
bpf_local_storage_map *smap,
+       if (old_sdata) {
+               bpf_selem_unlink_map(SELEM(old_sdata));
+               bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
+-                                              false);
++                                              false, true);
+       }
+ 
+ unlock:
+@@ -548,7 +557,7 @@ void bpf_local_storage_map_free(struct 
bpf_local_storage_map *smap,
+                               migrate_disable();
+                               __this_cpu_inc(*busy_counter);
+                       }
+-                      bpf_selem_unlink(selem);
++                      bpf_selem_unlink(selem, false);
+                       if (busy_counter) {
+                               __this_cpu_dec(*busy_counter);
+                               migrate_enable();
+diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
+index 6638a0ecc3d21..57904263a710f 100644
+--- a/kernel/bpf/bpf_task_storage.c
++++ b/kernel/bpf/bpf_task_storage.c
+@@ -102,7 +102,7 @@ void bpf_task_storage_free(struct task_struct *task)
+                */
+               bpf_selem_unlink_map(selem);
+               free_task_storage = bpf_selem_unlink_storage_nolock(
+-                      local_storage, selem, false);
++                      local_storage, selem, false, false);
+       }
+       raw_spin_unlock_irqrestore(&local_storage->lock, flags);
+       bpf_task_storage_unlock();
+@@ -192,7 +192,7 @@ static int task_storage_delete(struct task_struct *task, 
struct bpf_map *map)
+       if (!sdata)
+               return -ENOENT;
+ 
+-      bpf_selem_unlink(SELEM(sdata));
++      bpf_selem_unlink(SELEM(sdata), true);
+ 
+       return 0;
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 13e9dbeeedf36..05e701f0da81d 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -873,7 +873,7 @@ static size_t select_bpf_prog_pack_size(void)
+       return size;
+ }
+ 
+-static struct bpf_prog_pack *alloc_new_pack(void)
++static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t 
bpf_fill_ill_insns)
+ {
+       struct bpf_prog_pack *pack;
+ 
+@@ -886,6 +886,7 @@ static struct bpf_prog_pack *alloc_new_pack(void)
+               kfree(pack);
+               return NULL;
+       }
++      bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
+       bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
+       list_add_tail(&pack->list, &pack_list);
+ 
+@@ -895,7 +896,7 @@ static struct bpf_prog_pack *alloc_new_pack(void)
+       return pack;
+ }
+ 
+-static void *bpf_prog_pack_alloc(u32 size)
++static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t 
bpf_fill_ill_insns)
+ {
+       unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
+       struct bpf_prog_pack *pack;
+@@ -910,6 +911,7 @@ static void *bpf_prog_pack_alloc(u32 size)
+               size = round_up(size, PAGE_SIZE);
+               ptr = module_alloc(size);
+               if (ptr) {
++                      bpf_fill_ill_insns(ptr, size);
+                       set_vm_flush_reset_perms(ptr);
+                       set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
+                       set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
+@@ -923,7 +925,7 @@ static void *bpf_prog_pack_alloc(u32 size)
+                       goto found_free_area;
+       }
+ 
+-      pack = alloc_new_pack();
++      pack = alloc_new_pack(bpf_fill_ill_insns);
+       if (!pack)
+               goto out;
+ 
+@@ -1102,7 +1104,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 
**image_ptr,
+ 
+       if (bpf_jit_charge_modmem(size))
+               return NULL;
+-      ro_header = bpf_prog_pack_alloc(size);
++      ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
+       if (!ro_header) {
+               bpf_jit_uncharge_modmem(size);
+               return NULL;
+@@ -1434,6 +1436,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct 
bpf_prog *prog)
+       insn = clone->insnsi;
+ 
+       for (i = 0; i < insn_cnt; i++, insn++) {
++              if (bpf_pseudo_func(insn)) {
++                      /* ld_imm64 with an address of bpf subprog is not
++                       * a user controlled constant. Don't randomize it,
++                       * since it will conflict with jit_subprogs() logic.
++                       */
++                      insn++;
++                      i++;
++                      continue;
++              }
++
+               /* We temporarily need to hold the original ld64 insn
+                * so that we can still access the first part in the
+                * second blinding run.
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index 34725bfa1e97b..5c6c96d0e634d 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -100,7 +100,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr 
*attr)
+               return ERR_PTR(-E2BIG);
+ 
+       cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
+-      cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
+       smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
+       if (!smap)
+               return ERR_PTR(-ENOMEM);
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index ada97751ae1b2..5d8bfb5ef239d 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -411,7 +411,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct 
bpf_trampoline *tr)
+ {
+       enum bpf_tramp_prog_type kind;
+       int err = 0;
+-      int cnt;
++      int cnt = 0, i;
+ 
+       kind = bpf_attach_type_to_tramp(prog);
+       mutex_lock(&tr->mutex);
+@@ -422,7 +422,10 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, 
struct bpf_trampoline *tr)
+               err = -EBUSY;
+               goto out;
+       }
+-      cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
++
++      for (i = 0; i < BPF_TRAMP_MAX; i++)
++              cnt += tr->progs_cnt[i];
++
+       if (kind == BPF_TRAMP_REPLACE) {
+               /* Cannot attach extension if fentry/fexit are in use. */
+               if (cnt) {
+@@ -500,16 +503,19 @@ out:
+ 
+ void bpf_trampoline_put(struct bpf_trampoline *tr)
+ {
++      int i;
++
+       if (!tr)
+               return;
+       mutex_lock(&trampoline_mutex);
+       if (!refcount_dec_and_test(&tr->refcnt))
+               goto out;
+       WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
+-      if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
+-              goto out;
+-      if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
+-              goto out;
++
++      for (i = 0; i < BPF_TRAMP_MAX; i++)
++              if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
++                      goto out;
++
+       /* This code will be executed even when the last bpf_tramp_image
+        * is alive. All progs are detached from the trampoline and the
+        * trampoline image is patched with jmp into epilogue to skip
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d175b70067b30..9c1a02b82ecd0 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4861,6 +4861,11 @@ static int check_helper_mem_access(struct 
bpf_verifier_env *env, int regno,
+               return check_packet_access(env, regno, reg->off, access_size,
+                                          zero_size_allowed);
+       case PTR_TO_MAP_KEY:
++              if (meta && meta->raw_mode) {
++                      verbose(env, "R%d cannot write into %s\n", regno,
++                              reg_type_str(env, reg->type));
++                      return -EACCES;
++              }
+               return check_mem_region_access(env, regno, reg->off, 
access_size,
+                                              reg->map_ptr->key_size, false);
+       case PTR_TO_MAP_VALUE:
+@@ -4871,13 +4876,23 @@ static int check_helper_mem_access(struct 
bpf_verifier_env *env, int regno,
+               return check_map_access(env, regno, reg->off, access_size,
+                                       zero_size_allowed);
+       case PTR_TO_MEM:
++              if (type_is_rdonly_mem(reg->type)) {
++                      if (meta && meta->raw_mode) {
++                              verbose(env, "R%d cannot write into %s\n", 
regno,
++                                      reg_type_str(env, reg->type));
++                              return -EACCES;
++                      }
++              }
+               return check_mem_region_access(env, regno, reg->off,
+                                              access_size, reg->mem_size,
+                                              zero_size_allowed);
+       case PTR_TO_BUF:
+               if (type_is_rdonly_mem(reg->type)) {
+-                      if (meta && meta->raw_mode)
++                      if (meta && meta->raw_mode) {
++                              verbose(env, "R%d cannot write into %s\n", 
regno,
++                                      reg_type_str(env, reg->type));
+                               return -EACCES;
++                      }
+ 
+                       max_access = &env->prog->aux->max_rdonly_access;
+               } else {
+@@ -4919,8 +4934,7 @@ static int check_mem_size_reg(struct bpf_verifier_env 
*env,
+        * out. Only upper bounds can be learned because retval is an
+        * int type and negative retvals are allowed.
+        */
+-      if (meta)
+-              meta->msize_max_value = reg->umax_value;
++      meta->msize_max_value = reg->umax_value;
+ 
+       /* The register is SCALAR_VALUE; the access check
+        * happens using its boundaries.
+@@ -4963,24 +4977,33 @@ static int check_mem_size_reg(struct bpf_verifier_env 
*env,
+ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+                  u32 regno, u32 mem_size)
+ {
++      bool may_be_null = type_may_be_null(reg->type);
++      struct bpf_reg_state saved_reg;
++      struct bpf_call_arg_meta meta;
++      int err;
++
+       if (register_is_null(reg))
+               return 0;
+ 
+-      if (type_may_be_null(reg->type)) {
+-              /* Assuming that the register contains a value check if the 
memory
+-               * access is safe. Temporarily save and restore the register's 
state as
+-               * the conversion shouldn't be visible to a caller.
+-               */
+-              const struct bpf_reg_state saved_reg = *reg;
+-              int rv;
+-
++      memset(&meta, 0, sizeof(meta));
++      /* Assuming that the register contains a value check if the memory
++       * access is safe. Temporarily save and restore the register's state as
++       * the conversion shouldn't be visible to a caller.
++       */
++      if (may_be_null) {
++              saved_reg = *reg;
+               mark_ptr_not_null_reg(reg);
+-              rv = check_helper_mem_access(env, regno, mem_size, true, NULL);
+-              *reg = saved_reg;
+-              return rv;
+       }
+ 
+-      return check_helper_mem_access(env, regno, mem_size, true, NULL);
++      err = check_helper_mem_access(env, regno, mem_size, true, &meta);
++      /* Check access for BPF_WRITE */
++      meta.raw_mode = true;
++      err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
++
++      if (may_be_null)
++              *reg = saved_reg;
++
++      return err;
+ }
+ 
+ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct 
bpf_reg_state *reg,
+@@ -4989,16 +5012,22 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env 
*env, struct bpf_reg_state
+       struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
+       bool may_be_null = type_may_be_null(mem_reg->type);
+       struct bpf_reg_state saved_reg;
++      struct bpf_call_arg_meta meta;
+       int err;
+ 
+       WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
+ 
++      memset(&meta, 0, sizeof(meta));
++
+       if (may_be_null) {
+               saved_reg = *mem_reg;
+               mark_ptr_not_null_reg(mem_reg);
+       }
+ 
+-      err = check_mem_size_reg(env, reg, regno, true, NULL);
++      err = check_mem_size_reg(env, reg, regno, true, &meta);
++      /* Check access for BPF_WRITE */
++      meta.raw_mode = true;
++      err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
+ 
+       if (may_be_null)
+               *mem_reg = saved_reg;
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 079c72e26493e..ca0b4f360c1a0 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array,
+       struct assoc_array_ptr *cursor, *ptr;
+       struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
+       unsigned long nr_leaves_on_tree;
++      bool retained;
+       int keylen, slot, nr_free, next_slot, i;
+ 
+       pr_devel("-->%s()\n", __func__);
+@@ -1536,6 +1537,7 @@ continue_node:
+               goto descend;
+       }
+ 
++retry_compress:
+       pr_devel("-- compress node %p --\n", new_n);
+ 
+       /* Count up the number of empty slots in this node and work out the
+@@ -1553,6 +1555,7 @@ continue_node:
+       pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
+ 
+       /* See what we can fold in */
++      retained = false;
+       next_slot = 0;
+       for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+               struct assoc_array_shortcut *s;
+@@ -1602,9 +1605,14 @@ continue_node:
+                       pr_devel("[%d] retain node %lu/%d [nx %d]\n",
+                                slot, child->nr_leaves_on_branch, nr_free + 1,
+                                next_slot);
++                      retained = true;
+               }
+       }
+ 
++      if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
++              pr_devel("internal nodes remain despite enough space, 
retrying\n");
++              goto retry_compress;
++      }
+       pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
+ 
+       nr_leaves_on_tree = new_n->nr_leaves_on_branch;
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 9152fbde33b50..5d5fc04385b8d 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct 
size_class *class,
+  */
+ static void lock_zspage(struct zspage *zspage)
+ {
+-      struct page *page = get_first_page(zspage);
++      struct page *curr_page, *page;
+ 
+-      do {
+-              lock_page(page);
+-      } while ((page = get_next_page(page)) != NULL);
++      /*
++       * Pages we haven't locked yet can be migrated off the list while we're
++       * trying to lock them, so we need to be careful and only attempt to
++       * lock each page under migrate_read_lock(). Otherwise, the page we lock
++       * may no longer belong to the zspage. This means that we may wait for
++       * the wrong page to unlock, so we must take a reference to the page
++       * prior to waiting for it to unlock outside migrate_read_lock().
++       */
++      while (1) {
++              migrate_read_lock(zspage);
++              page = get_first_page(zspage);
++              if (trylock_page(page))
++                      break;
++              get_page(page);
++              migrate_read_unlock(zspage);
++              wait_on_page_locked(page);
++              put_page(page);
++      }
++
++      curr_page = page;
++      while ((page = get_next_page(curr_page))) {
++              if (trylock_page(page)) {
++                      curr_page = page;
++              } else {
++                      get_page(page);
++                      migrate_read_unlock(zspage);
++                      wait_on_page_locked(page);
++                      put_page(page);
++                      migrate_read_lock(zspage);
++              }
++      }
++      migrate_read_unlock(zspage);
+ }
+ 
+ static int zs_init_fs_context(struct fs_context *fc)
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index e3ac363805203..83d7641ef67b0 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct sock *sk, struct 
bpf_map *map)
+       if (!sdata)
+               return -ENOENT;
+ 
+-      bpf_selem_unlink(SELEM(sdata));
++      bpf_selem_unlink(SELEM(sdata), true);
+ 
+       return 0;
+ }
+@@ -75,8 +75,8 @@ void bpf_sk_storage_free(struct sock *sk)
+                * sk_storage.
+                */
+               bpf_selem_unlink_map(selem);
+-              free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
+-                                                                selem, true);
++              free_sk_storage = bpf_selem_unlink_storage_nolock(
++                      sk_storage, selem, true, false);
+       }
+       raw_spin_unlock_bh(&sk_storage->lock);
+       rcu_read_unlock();
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 64470a727ef77..966796b345e78 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1687,7 +1687,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, 
u32, offset,
+ 
+       if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
+               return -EINVAL;
+-      if (unlikely(offset > 0xffff))
++      if (unlikely(offset > INT_MAX))
+               return -EFAULT;
+       if (unlikely(bpf_try_make_writable(skb, offset + len)))
+               return -EFAULT;
+@@ -1722,7 +1722,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, 
skb, u32, offset,
+ {
+       void *ptr;
+ 
+-      if (unlikely(offset > 0xffff))
++      if (unlikely(offset > INT_MAX))
+               goto err_clear;
+ 
+       ptr = skb_header_pointer(skb, offset, len, to);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index a096b9fbbbdff..b6a9208130051 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -222,12 +222,18 @@ err_register:
+ }
+ 
+ static void nft_netdev_unregister_hooks(struct net *net,
+-                                      struct list_head *hook_list)
++                                      struct list_head *hook_list,
++                                      bool release_netdev)
+ {
+-      struct nft_hook *hook;
++      struct nft_hook *hook, *next;
+ 
+-      list_for_each_entry(hook, hook_list, list)
++      list_for_each_entry_safe(hook, next, hook_list, list) {
+               nf_unregister_net_hook(net, &hook->ops);
++              if (release_netdev) {
++                      list_del(&hook->list);
++                      kfree_rcu(hook, rcu);
++              }
++      }
+ }
+ 
+ static int nf_tables_register_hook(struct net *net,
+@@ -253,9 +259,10 @@ static int nf_tables_register_hook(struct net *net,
+       return nf_register_net_hook(net, &basechain->ops);
+ }
+ 
+-static void nf_tables_unregister_hook(struct net *net,
+-                                    const struct nft_table *table,
+-                                    struct nft_chain *chain)
++static void __nf_tables_unregister_hook(struct net *net,
++                                      const struct nft_table *table,
++                                      struct nft_chain *chain,
++                                      bool release_netdev)
+ {
+       struct nft_base_chain *basechain;
+       const struct nf_hook_ops *ops;
+@@ -270,11 +277,19 @@ static void nf_tables_unregister_hook(struct net *net,
+               return basechain->type->ops_unregister(net, ops);
+ 
+       if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
+-              nft_netdev_unregister_hooks(net, &basechain->hook_list);
++              nft_netdev_unregister_hooks(net, &basechain->hook_list,
++                                          release_netdev);
+       else
+               nf_unregister_net_hook(net, &basechain->ops);
+ }
+ 
++static void nf_tables_unregister_hook(struct net *net,
++                                    const struct nft_table *table,
++                                    struct nft_chain *chain)
++{
++      return __nf_tables_unregister_hook(net, table, chain, false);
++}
++
+ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans 
*trans)
+ {
+       struct nftables_pernet *nft_net = nft_pernet(net);
+@@ -2873,27 +2888,31 @@ static struct nft_expr *nft_expr_init(const struct 
nft_ctx *ctx,
+ 
+       err = nf_tables_expr_parse(ctx, nla, &expr_info);
+       if (err < 0)
+-              goto err1;
++              goto err_expr_parse;
++
++      err = -EOPNOTSUPP;
++      if (!(expr_info.ops->type->flags & NFT_EXPR_STATEFUL))
++              goto err_expr_stateful;
+ 
+       err = -ENOMEM;
+       expr = kzalloc(expr_info.ops->size, GFP_KERNEL_ACCOUNT);
+       if (expr == NULL)
+-              goto err2;
++              goto err_expr_stateful;
+ 
+       err = nf_tables_newexpr(ctx, &expr_info, expr);
+       if (err < 0)
+-              goto err3;
++              goto err_expr_new;
+ 
+       return expr;
+-err3:
++err_expr_new:
+       kfree(expr);
+-err2:
++err_expr_stateful:
+       owner = expr_info.ops->type->owner;
+       if (expr_info.ops->type->release_ops)
+               expr_info.ops->type->release_ops(expr_info.ops);
+ 
+       module_put(owner);
+-err1:
++err_expr_parse:
+       return ERR_PTR(err);
+ }
+ 
+@@ -4242,6 +4261,9 @@ static int nft_set_desc_concat_parse(const struct nlattr 
*attr,
+       u32 len;
+       int err;
+ 
++      if (desc->field_count >= ARRAY_SIZE(desc->field_len))
++              return -E2BIG;
++
+       err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
+                                         nft_concat_policy, NULL);
+       if (err < 0)
+@@ -4251,9 +4273,8 @@ static int nft_set_desc_concat_parse(const struct nlattr 
*attr,
+               return -EINVAL;
+ 
+       len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
+-
+-      if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
+-              return -E2BIG;
++      if (!len || len > U8_MAX)
++              return -EINVAL;
+ 
+       desc->field_len[desc->field_count++] = len;
+ 
+@@ -4264,7 +4285,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
+                              const struct nlattr *nla)
+ {
+       struct nlattr *attr;
+-      int rem, err;
++      u32 num_regs = 0;
++      int rem, err, i;
+ 
+       nla_for_each_nested(attr, nla, rem) {
+               if (nla_type(attr) != NFTA_LIST_ELEM)
+@@ -4275,6 +4297,12 @@ static int nft_set_desc_concat(struct nft_set_desc 
*desc,
+                       return err;
+       }
+ 
++      for (i = 0; i < desc->field_count; i++)
++              num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
++
++      if (num_regs > NFT_REG32_COUNT)
++              return -E2BIG;
++
+       return 0;
+ }
+ 
+@@ -5413,9 +5441,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct 
nft_ctx *ctx,
+               return expr;
+ 
+       err = -EOPNOTSUPP;
+-      if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
+-              goto err_set_elem_expr;
+-
+       if (expr->ops->type->flags & NFT_EXPR_GC) {
+               if (set->flags & NFT_SET_TIMEOUT)
+                       goto err_set_elem_expr;
+@@ -7291,13 +7316,25 @@ static void nft_unregister_flowtable_hook(struct net 
*net,
+                                   FLOW_BLOCK_UNBIND);
+ }
+ 
+-static void nft_unregister_flowtable_net_hooks(struct net *net,
+-                                             struct list_head *hook_list)
++static void __nft_unregister_flowtable_net_hooks(struct net *net,
++                                               struct list_head *hook_list,
++                                               bool release_netdev)
+ {
+-      struct nft_hook *hook;
++      struct nft_hook *hook, *next;
+ 
+-      list_for_each_entry(hook, hook_list, list)
++      list_for_each_entry_safe(hook, next, hook_list, list) {
+               nf_unregister_net_hook(net, &hook->ops);
++              if (release_netdev) {
++                      list_del(&hook->list);
++                      kfree_rcu(hook);
++              }
++      }
++}
++
++static void nft_unregister_flowtable_net_hooks(struct net *net,
++                                             struct list_head *hook_list)
++{
++      __nft_unregister_flowtable_net_hooks(net, hook_list, false);
+ }
+ 
+ static int nft_register_flowtable_net_hooks(struct net *net,
+@@ -9741,9 +9778,10 @@ static void __nft_release_hook(struct net *net, struct 
nft_table *table)
+       struct nft_chain *chain;
+ 
+       list_for_each_entry(chain, &table->chains, list)
+-              nf_tables_unregister_hook(net, table, chain);
++              __nf_tables_unregister_hook(net, table, chain, true);
+       list_for_each_entry(flowtable, &table->flowtables, list)
+-              nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
++              __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list,
++                                                   true);
+ }
+ 
+ static void __nft_release_hooks(struct net *net)
+@@ -9882,7 +9920,11 @@ static int __net_init nf_tables_init_net(struct net 
*net)
+ 
+ static void __net_exit nf_tables_pre_exit_net(struct net *net)
+ {
++      struct nftables_pernet *nft_net = nft_pernet(net);
++
++      mutex_lock(&nft_net->commit_mutex);
+       __nft_release_hooks(net);
++      mutex_unlock(&nft_net->commit_mutex);
+ }
+ 
+ static void __net_exit nf_tables_exit_net(struct net *net)
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 04ea8b9bf2028..981addb2d0515 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -213,6 +213,8 @@ static int nft_limit_pkts_clone(struct nft_expr *dst, 
const struct nft_expr *src
+       struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
+       struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
+ 
++      priv_dst->cost = priv_src->cost;
++
+       return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
+ }
+ 
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 4dfe76416794f..33db334e65566 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -572,6 +572,17 @@ static int set_sample_rate_v2v3(struct snd_usb_audio 
*chip,
+               /* continue processing */
+       }
+ 
++      /* FIXME - TEAC devices require the immediate interface setup */
++      if (USB_ID_VENDOR(chip->usb_id) == 0x0644) {
++              bool cur_base_48k = (rate % 48000 == 0);
++              bool prev_base_48k = (prev_rate % 48000 == 0);
++              if (cur_base_48k != prev_base_48k) {
++                      usb_set_interface(chip->dev, fmt->iface, 
fmt->altsetting);
++                      if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
++                              msleep(50);
++              }
++      }
++
+ validation:
+       /* validate clock after rate change */
+       if (!uac_clock_source_is_valid(chip, fmt, clock))
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 6d699065e81a2..b470404a5376c 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -439,16 +439,21 @@ static int configure_endpoints(struct snd_usb_audio 
*chip,
+               /* stop any running stream beforehand */
+               if (stop_endpoints(subs, false))
+                       sync_pending_stops(subs);
++              if (subs->sync_endpoint) {
++                      err = snd_usb_endpoint_configure(chip, 
subs->sync_endpoint);
++                      if (err < 0)
++                              return err;
++              }
+               err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
+               if (err < 0)
+                       return err;
+               snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
+-      }
+-
+-      if (subs->sync_endpoint) {
+-              err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
+-              if (err < 0)
+-                      return err;
++      } else {
++              if (subs->sync_endpoint) {
++                      err = snd_usb_endpoint_configure(chip, 
subs->sync_endpoint);
++                      if (err < 0)
++                              return err;
++              }
+       }
+ 
+       return 0;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 40a5e3eb4ef26..78eb41b621d63 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2672,6 +2672,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
++                                      .ep_idx = 1,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .datainterval = 1,
+                                       .maxpacksize = 0x0126,
+@@ -2875,6 +2876,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+                                       .altset_idx = 1,
+                                       .attributes = 0x4,
+                                       .endpoint = 0x81,
++                                      .ep_idx = 1,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                               USB_ENDPOINT_SYNC_ASYNC,
+                                       .maxpacksize = 0x130,
+@@ -3391,6 +3393,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x03,
++                                      .ep_idx = 1,
+                                       .rates = SNDRV_PCM_RATE_96000,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                                  USB_ENDPOINT_SYNC_ASYNC,
+diff --git a/tools/memory-model/README b/tools/memory-model/README
+index 9edd402704c4f..dab38904206a0 100644
+--- a/tools/memory-model/README
++++ b/tools/memory-model/README
+@@ -54,7 +54,8 @@ klitmus7 Compatibility Table
+            -- 4.14  7.48 --
+       4.15 -- 4.19  7.49 --
+       4.20 -- 5.5   7.54 --
+-      5.6  --       7.56 --
++      5.6  -- 5.16  7.56 --
++      5.17 --       7.56.1 --
+       ============  ==========
+ 
+ 

Reply via email to