commit:     be3f37519a9c9f700d9c4e582384b625b43a6996
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 19 19:53:55 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 19 19:53:55 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=be3f3751

Linux patch 4.9.169

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1168_linux-4.9.169.patch | 3242 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3246 insertions(+)

diff --git a/0000_README b/0000_README
index 31f02c0..3f7c1b9 100644
--- a/0000_README
+++ b/0000_README
@@ -715,6 +715,10 @@ Patch:  1167_linux-4.9.168.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.168
 
+Patch:  1168_linux-4.9.169.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.169
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1168_linux-4.9.169.patch b/1168_linux-4.9.169.patch
new file mode 100644
index 0000000..a466612
--- /dev/null
+++ b/1168_linux-4.9.169.patch
@@ -0,0 +1,3242 @@
+diff --git a/Makefile b/Makefile
+index f44094d2b147..23cc23c47adf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 168
++SUBLEVEL = 169
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+@@ -507,7 +507,7 @@ endif
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+ CLANG_FLAGS   := --target=$(notdir $(CROSS_COMPILE:%-=%))
+-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS   += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h 
b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index 8a394f336003..ee65702f9645 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -517,7 +517,7 @@
+ #define PIN_PC9__GPIO                 PINMUX_PIN(PIN_PC9, 0, 0)
+ #define PIN_PC9__FIQ                  PINMUX_PIN(PIN_PC9, 1, 3)
+ #define PIN_PC9__GTSUCOMP             PINMUX_PIN(PIN_PC9, 2, 1)
+-#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 2, 1)
++#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 3, 1)
+ #define PIN_PC9__TIOA4                        PINMUX_PIN(PIN_PC9, 4, 2)
+ #define PIN_PC10                      74
+ #define PIN_PC10__GPIO                        PINMUX_PIN(PIN_PC10, 0, 0)
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 2a5090fb9113..d7116f5935fb 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -33,8 +33,8 @@
+ "     prfm    pstl1strm, %2\n"                                        \
+ "1:   ldxr    %w1, %2\n"                                              \
+       insn "\n"                                                       \
+-"2:   stlxr   %w3, %w0, %2\n"                                         \
+-"     cbnz    %w3, 1b\n"                                              \
++"2:   stlxr   %w0, %w3, %2\n"                                         \
++"     cbnz    %w0, 1b\n"                                              \
+ "     dmb     ish\n"                                                  \
+ "3:\n"                                                                        
\
+ "     .pushsection .fixup,\"ax\"\n"                                   \
+@@ -53,29 +53,29 @@
+ static inline int
+ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+ {
+-      int oldval = 0, ret, tmp;
++      int oldval, ret, tmp;
+ 
+       pagefault_disable();
+ 
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op("mov  %w0, %w4",
++              __futex_atomic_op("mov  %w3, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op("add  %w0, %w1, %w4",
++              __futex_atomic_op("add  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_OR:
+-              __futex_atomic_op("orr  %w0, %w1, %w4",
++              __futex_atomic_op("orr  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+-              __futex_atomic_op("and  %w0, %w1, %w4",
++              __futex_atomic_op("and  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, ~oparg);
+               break;
+       case FUTEX_OP_XOR:
+-              __futex_atomic_op("eor  %w0, %w1, %w4",
++              __futex_atomic_op("eor  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       default:
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index fa6b2fad7a3d..5d3df68272f5 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -272,7 +272,7 @@ void __init arm64_memblock_init(void)
+                * memory spans, randomize the linear region as well.
+                */
+               if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+-                      range = range / ARM64_MEMSTART_ALIGN + 1;
++                      range /= ARM64_MEMSTART_ALIGN;
+                       memstart_addr -= ARM64_MEMSTART_ALIGN *
+                                        ((range * memstart_offset_seed) >> 16);
+               }
+diff --git a/arch/parisc/include/asm/processor.h 
b/arch/parisc/include/asm/processor.h
+index 2e674e13e005..656984ec1958 100644
+--- a/arch/parisc/include/asm/processor.h
++++ b/arch/parisc/include/asm/processor.h
+@@ -323,6 +323,8 @@ extern int _parisc_requires_coherency;
+ #define parisc_requires_coherency()   (0)
+ #endif
+ 
++extern int running_on_qemu;
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif /* __ASM_PARISC_PROCESSOR_H */
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index c3a532abac03..2e5216c28bb1 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -206,12 +206,6 @@ void __cpuidle arch_cpu_idle(void)
+ 
+ static int __init parisc_idle_init(void)
+ {
+-      const char *marker;
+-
+-      /* check QEMU/SeaBIOS marker in PAGE0 */
+-      marker = (char *) &PAGE0->pad0;
+-      running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+-
+       if (!running_on_qemu)
+               cpu_idle_poll_ctrl(1);
+ 
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index 2e66a887788e..581b0c66e521 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -403,6 +403,9 @@ void start_parisc(void)
+       int ret, cpunum;
+       struct pdc_coproc_cfg coproc_cfg;
+ 
++      /* check QEMU/SeaBIOS marker in PAGE0 */
++      running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
++
+       cpunum = smp_processor_id();
+ 
+       set_firmware_width_unlocked();
+diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
+index 47ef8fdcd382..22754e0c3bda 100644
+--- a/arch/parisc/kernel/time.c
++++ b/arch/parisc/kernel/time.c
+@@ -299,7 +299,7 @@ static int __init init_cr16_clocksource(void)
+        * The cr16 interval timers are not syncronized across CPUs, so mark
+        * them unstable and lower rating on SMP systems.
+        */
+-      if (num_online_cpus() > 1) {
++      if (num_online_cpus() > 1 && !running_on_qemu) {
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
+       }
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 0a6bb48854e3..fa8f2aa88189 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -128,7 +128,7 @@ config PPC
+       select ARCH_HAS_GCOV_PROFILE_ALL
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_CMOS_UPDATE
+-      select GENERIC_CPU_VULNERABILITIES      if PPC_BOOK3S_64
++      select GENERIC_CPU_VULNERABILITIES      if PPC_BARRIER_NOSPEC
+       select GENERIC_TIME_VSYSCALL_OLD
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+@@ -164,6 +164,11 @@ config PPC
+       select HAVE_ARCH_HARDENED_USERCOPY
+       select HAVE_KERNEL_GZIP
+ 
++config PPC_BARRIER_NOSPEC
++    bool
++    default y
++    depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
++
+ config GENERIC_CSUM
+       def_bool CPU_LITTLE_ENDIAN
+ 
+diff --git a/arch/powerpc/include/asm/asm-prototypes.h 
b/arch/powerpc/include/asm/asm-prototypes.h
+index e0baba1535e6..f3daa175f86c 100644
+--- a/arch/powerpc/include/asm/asm-prototypes.h
++++ b/arch/powerpc/include/asm/asm-prototypes.h
+@@ -121,4 +121,10 @@ extern s64 __ashrdi3(s64, int);
+ extern int __cmpdi2(s64, s64);
+ extern int __ucmpdi2(u64, u64);
+ 
++/* Patch sites */
++extern s32 patch__call_flush_count_cache;
++extern s32 patch__flush_count_cache_return;
++
++extern long flush_count_cache;
++
+ #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
+diff --git a/arch/powerpc/include/asm/barrier.h 
b/arch/powerpc/include/asm/barrier.h
+index 798ab37c9930..80024c4f2093 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -77,6 +77,27 @@ do {                                                        
                \
+ 
+ #define smp_mb__before_spinlock()   smp_mb()
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
++#define NOSPEC_BARRIER_SLOT   nop
++#elif defined(CONFIG_PPC_FSL_BOOK3E)
++#define NOSPEC_BARRIER_SLOT   nop; nop
++#endif
++
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++/*
++ * Prevent execution of subsequent instructions until preceding branches have
++ * been fully resolved and are no longer executing speculatively.
++ */
++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
++
++// This also acts as a compiler barrier due to the memory clobber.
++#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
++
++#else /* !CONFIG_PPC_BARRIER_NOSPEC */
++#define barrier_nospec_asm
++#define barrier_nospec()
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
++
+ #include <asm-generic/barrier.h>
+ 
+ #endif /* _ASM_POWERPC_BARRIER_H */
+diff --git a/arch/powerpc/include/asm/code-patching-asm.h 
b/arch/powerpc/include/asm/code-patching-asm.h
+new file mode 100644
+index 000000000000..ed7b1448493a
+--- /dev/null
++++ b/arch/powerpc/include/asm/code-patching-asm.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
++#define _ASM_POWERPC_CODE_PATCHING_ASM_H
++
++/* Define a "site" that can be patched */
++.macro patch_site label name
++      .pushsection ".rodata"
++      .balign 4
++      .global \name
++\name:
++      .4byte  \label - .
++      .popsection
++.endm
++
++#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
+diff --git a/arch/powerpc/include/asm/code-patching.h 
b/arch/powerpc/include/asm/code-patching.h
+index b4ab1f497335..ab934f8232bd 100644
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -28,6 +28,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
+                               unsigned long target, int flags);
+ int patch_branch(unsigned int *addr, unsigned long target, int flags);
+ int patch_instruction(unsigned int *addr, unsigned int instr);
++int patch_instruction_site(s32 *addr, unsigned int instr);
++int patch_branch_site(s32 *site, unsigned long target, int flags);
+ 
+ int instr_is_relative_branch(unsigned int instr);
+ int instr_is_relative_link_branch(unsigned int instr);
+diff --git a/arch/powerpc/include/asm/feature-fixups.h 
b/arch/powerpc/include/asm/feature-fixups.h
+index 0bf8202feca6..175128e19025 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -213,6 +213,25 @@ void setup_feature_keys(void);
+       FTR_ENTRY_OFFSET 951b-952b;                     \
+       .popsection;
+ 
++#define NOSPEC_BARRIER_FIXUP_SECTION                  \
++953:                                                  \
++      .pushsection __barrier_nospec_fixup,"a";        \
++      .align 2;                                       \
++954:                                                  \
++      FTR_ENTRY_OFFSET 953b-954b;                     \
++      .popsection;
++
++#define START_BTB_FLUSH_SECTION                       \
++955:                                                  \
++
++#define END_BTB_FLUSH_SECTION                 \
++956:                                                  \
++      .pushsection __btb_flush_fixup,"a";     \
++      .align 2;                                                       \
++957:                                          \
++      FTR_ENTRY_OFFSET 955b-957b;                     \
++      FTR_ENTRY_OFFSET 956b-957b;                     \
++      .popsection;
+ 
+ #ifndef __ASSEMBLY__
+ 
+@@ -220,6 +239,8 @@ extern long stf_barrier_fallback;
+ extern long __start___stf_entry_barrier_fixup, 
__stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
++extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
++extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+ 
+ #endif
+ 
+diff --git a/arch/powerpc/include/asm/hvcall.h 
b/arch/powerpc/include/asm/hvcall.h
+index 9d978102bf0d..9587d301db55 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -316,10 +316,12 @@
+ #define H_CPU_CHAR_BRANCH_HINTS_HONORED       (1ull << 58) // IBM bit 5
+ #define H_CPU_CHAR_THREAD_RECONFIG_CTRL       (1ull << 57) // IBM bit 6
+ #define H_CPU_CHAR_COUNT_CACHE_DISABLED       (1ull << 56) // IBM bit 7
++#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
+ 
+ #define H_CPU_BEHAV_FAVOUR_SECURITY   (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR      (1ull << 62) // IBM bit 1
+ #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
++#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
+ 
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+diff --git a/arch/powerpc/include/asm/ppc_asm.h 
b/arch/powerpc/include/asm/ppc_asm.h
+index c73750b0d9fa..bbd35ba36a22 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -437,7 +437,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
+ .machine push ;                                       \
+ .machine "power4" ;                           \
+        lis     scratch,0x60000000@h;          \
+-       dcbt    r0,scratch,0b01010;            \
++       dcbt    0,scratch,0b01010;             \
+ .machine pop
+ 
+ /*
+@@ -780,4 +780,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
+       .long 0x2400004c  /* rfid                               */
+ #endif /* !CONFIG_PPC_BOOK3E */
+ #endif /*  __ASSEMBLY__ */
++
++/*
++ * Helper macro for exception table entries
++ */
++#define EX_TABLE(_fault, _target)             \
++      stringify_in_c(.section __ex_table,"a";)\
++      stringify_in_c(.balign 4;)              \
++      stringify_in_c(.long (_fault) - . ;)    \
++      stringify_in_c(.long (_target) - . ;)   \
++      stringify_in_c(.previous)
++
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define BTB_FLUSH(reg)                        \
++      lis reg,BUCSR_INIT@h;           \
++      ori reg,reg,BUCSR_INIT@l;       \
++      mtspr SPRN_BUCSR,reg;           \
++      isync;
++#else
++#define BTB_FLUSH(reg)
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
+ #endif /* _ASM_POWERPC_PPC_ASM_H */
+diff --git a/arch/powerpc/include/asm/security_features.h 
b/arch/powerpc/include/asm/security_features.h
+index 44989b22383c..759597bf0fd8 100644
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -22,6 +22,7 @@ enum stf_barrier_type {
+ 
+ void setup_stf_barrier(void);
+ void do_stf_barrier_fixups(enum stf_barrier_type types);
++void setup_count_cache_flush(void);
+ 
+ static inline void security_ftr_set(unsigned long feature)
+ {
+@@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long 
feature)
+ // Indirect branch prediction cache disabled
+ #define SEC_FTR_COUNT_CACHE_DISABLED  0x0000000000000020ull
+ 
++// bcctr 2,0,0 triggers a hardware assisted count cache flush
++#define SEC_FTR_BCCTR_FLUSH_ASSIST    0x0000000000000800ull
++
+ 
+ // Features indicating need for Spectre/Meltdown mitigations
+ 
+@@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long 
feature)
+ // Firmware configuration indicates user favours security over performance
+ #define SEC_FTR_FAVOUR_SECURITY               0x0000000000000200ull
+ 
++// Software required to flush count cache on context switch
++#define SEC_FTR_FLUSH_COUNT_CACHE     0x0000000000000400ull
++
+ 
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+diff --git a/arch/powerpc/include/asm/setup.h 
b/arch/powerpc/include/asm/setup.h
+index 3f160cd20107..862ebce3ae54 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -8,6 +8,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
+ 
+ extern unsigned int rtas_data;
+ extern unsigned long long memory_limit;
++extern bool init_mem_is_free;
+ extern unsigned long klimit;
+ extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+ 
+@@ -50,6 +51,26 @@ enum l1d_flush_type {
+ 
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++void setup_barrier_nospec(void);
++#else
++static inline void setup_barrier_nospec(void) { };
++#endif
++void do_barrier_nospec_fixups(bool enable);
++extern bool barrier_nospec_enabled;
++
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
++#else
++static inline void do_barrier_nospec_fixups_range(bool enable, void *start, 
void *end) { };
++#endif
++
++#ifdef CONFIG_PPC_FSL_BOOK3E
++void setup_spectre_v2(void);
++#else
++static inline void setup_spectre_v2(void) {};
++#endif
++void do_btb_flush_fixups(void);
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
+index 31913b3ac7ab..da852153c1f8 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -269,6 +269,7 @@ do {                                                       
        \
+       __chk_user_ptr(ptr);                                    \
+       if (!is_kernel_addr((unsigned long)__gu_addr))          \
+               might_fault();                                  \
++      barrier_nospec();                                       \
+       __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       __gu_err;                                               \
+@@ -280,8 +281,10 @@ do {                                                      
        \
+       unsigned long  __gu_val = 0;                                    \
+       __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
+       might_fault();                                                  \
+-      if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
++      if (access_ok(VERIFY_READ, __gu_addr, (size))) {                \
++              barrier_nospec();                                       \
+               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
++      }                                                               \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                             
\
+       __gu_err;                                                       \
+ })
+@@ -292,6 +295,7 @@ do {                                                       
        \
+       unsigned long __gu_val;                                 \
+       __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
+       __chk_user_ptr(ptr);                                    \
++      barrier_nospec();                                       \
+       __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       __gu_err;                                               \
+@@ -348,15 +352,19 @@ static inline unsigned long 
__copy_from_user_inatomic(void *to,
+ 
+               switch (n) {
+               case 1:
++                      barrier_nospec();
+                       __get_user_size(*(u8 *)to, from, 1, ret);
+                       break;
+               case 2:
++                      barrier_nospec();
+                       __get_user_size(*(u16 *)to, from, 2, ret);
+                       break;
+               case 4:
++                      barrier_nospec();
+                       __get_user_size(*(u32 *)to, from, 4, ret);
+                       break;
+               case 8:
++                      barrier_nospec();
+                       __get_user_size(*(u64 *)to, from, 8, ret);
+                       break;
+               }
+@@ -366,6 +374,7 @@ static inline unsigned long __copy_from_user_inatomic(void 
*to,
+ 
+       check_object_size(to, n, false);
+ 
++      barrier_nospec();
+       return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+ 
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 13885786282b..d80fbf0884ff 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -44,9 +44,10 @@ obj-$(CONFIG_PPC64)         += setup_64.o sys_ppc32.o \
+ obj-$(CONFIG_VDSO32)          += vdso32/
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT)      += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64)   += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64)   += cpu_setup_power.o security.o
++obj-$(CONFIG_PPC_BOOK3S_64)   += cpu_setup_power.o
+ obj-$(CONFIG_PPC_BOOK3S_64)   += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64)   += exceptions-64e.o idle_book3e.o
++obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
+ obj-$(CONFIG_PPC64)           += vdso64/
+ obj-$(CONFIG_ALTIVEC)         += vecemu.o
+ obj-$(CONFIG_PPC_970_NAP)     += idle_power4.o
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 370645687cc7..bdd88f9d7926 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -34,6 +34,7 @@
+ #include <asm/ftrace.h>
+ #include <asm/ptrace.h>
+ #include <asm/export.h>
++#include <asm/barrier.h>
+ 
+ /*
+  * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
+@@ -347,6 +348,15 @@ syscall_dotrace_cont:
+       ori     r10,r10,sys_call_table@l
+       slwi    r0,r0,2
+       bge-    66f
++
++      barrier_nospec_asm
++      /*
++       * Prevent the load of the handler below (based on the user-passed
++       * system call number) being speculatively executed until the test
++       * against NR_syscalls and branch to .66f above has
++       * committed.
++       */
++
+       lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
+       mtlr    r10
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index e24ae0fa80ed..390ebf4ef384 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -26,6 +26,7 @@
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+ #include <asm/thread_info.h>
++#include <asm/code-patching-asm.h>
+ #include <asm/ppc_asm.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cputable.h>
+@@ -38,6 +39,7 @@
+ #include <asm/context_tracking.h>
+ #include <asm/tm.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/barrier.h>
+ #include <asm/export.h>
+ #ifdef CONFIG_PPC_BOOK3S
+ #include <asm/exception-64s.h>
+@@ -78,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+       std     r0,GPR0(r1)
+       std     r10,GPR1(r1)
+       beq     2f                      /* if from kernel mode */
++#ifdef CONFIG_PPC_FSL_BOOK3E
++START_BTB_FLUSH_SECTION
++      BTB_FLUSH(r10)
++END_BTB_FLUSH_SECTION
++#endif
+       ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
+ 2:    std     r2,GPR2(r1)
+       std     r3,GPR3(r1)
+@@ -180,6 +187,15 @@ system_call:                      /* label this so stack 
traces look sane */
+       clrldi  r8,r8,32
+ 15:
+       slwi    r0,r0,4
++
++      barrier_nospec_asm
++      /*
++       * Prevent the load of the handler below (based on the user-passed
++       * system call number) being speculatively executed until the test
++       * against NR_syscalls and branch to .Lsyscall_enosys above has
++       * committed.
++       */
++
+       ldx     r12,r11,r0      /* Fetch system call handler [ptr] */
+       mtctr   r12
+       bctrl                   /* Call handler */
+@@ -473,6 +489,57 @@ _GLOBAL(ret_from_kernel_thread)
+       li      r3,0
+       b       .Lsyscall_exit
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
++
++#define FLUSH_COUNT_CACHE     \
++1:    nop;                    \
++      patch_site 1b, patch__call_flush_count_cache
++
++
++#define BCCTR_FLUSH   .long 0x4c400420
++
++.macro nops number
++      .rept \number
++      nop
++      .endr
++.endm
++
++.balign 32
++.global flush_count_cache
++flush_count_cache:
++      /* Save LR into r9 */
++      mflr    r9
++
++      .rept 64
++      bl      .+4
++      .endr
++      b       1f
++      nops    6
++
++      .balign 32
++      /* Restore LR */
++1:    mtlr    r9
++      li      r9,0x7fff
++      mtctr   r9
++
++      BCCTR_FLUSH
++
++2:    nop
++      patch_site 2b patch__flush_count_cache_return
++
++      nops    3
++
++      .rept 278
++      .balign 32
++      BCCTR_FLUSH
++      nops    7
++      .endr
++
++      blr
++#else
++#define FLUSH_COUNT_CACHE
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
+ /*
+  * This routine switches between two different tasks.  The process
+  * state of one is saved on its kernel stack.  Then the state
+@@ -504,6 +571,8 @@ _GLOBAL(_switch)
+       std     r23,_CCR(r1)
+       std     r1,KSP(r3)      /* Set old stack pointer */
+ 
++      FLUSH_COUNT_CACHE
++
+ #ifdef CONFIG_SMP
+       /* We need a sync somewhere here to make sure that if the
+        * previous task gets rescheduled on another CPU, it sees all
+diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
+index ca03eb229a9a..423b5257d3a1 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -295,7 +295,8 @@ ret_from_mc_except:
+       andi.   r10,r11,MSR_PR;         /* save stack pointer */            \
+       beq     1f;                     /* branch around if supervisor */   \
+       ld      r1,PACAKSAVE(r13);      /* get kernel stack coming from usr */\
+-1:    cmpdi   cr1,r1,0;               /* check if SP makes sense */       \
++1:    type##_BTB_FLUSH                \
++      cmpdi   cr1,r1,0;               /* check if SP makes sense */       \
+       bge-    cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
+       mfspr   r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
+ 
+@@ -327,6 +328,30 @@ ret_from_mc_except:
+ #define SPRN_MC_SRR0  SPRN_MCSRR0
+ #define SPRN_MC_SRR1  SPRN_MCSRR1
+ 
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define GEN_BTB_FLUSH                 \
++      START_BTB_FLUSH_SECTION         \
++              beq 1f;                 \
++              BTB_FLUSH(r10)                  \
++              1:              \
++      END_BTB_FLUSH_SECTION
++
++#define CRIT_BTB_FLUSH                        \
++      START_BTB_FLUSH_SECTION         \
++              BTB_FLUSH(r10)          \
++      END_BTB_FLUSH_SECTION
++
++#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
++#define MC_BTB_FLUSH CRIT_BTB_FLUSH
++#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
++#else
++#define GEN_BTB_FLUSH
++#define CRIT_BTB_FLUSH
++#define DBG_BTB_FLUSH
++#define MC_BTB_FLUSH
++#define GDBELL_BTB_FLUSH
++#endif
++
+ #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition)                      \
+       EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
+ 
+diff --git a/arch/powerpc/kernel/head_booke.h 
b/arch/powerpc/kernel/head_booke.h
+index a620203f7de3..7b98c7351f6c 100644
+--- a/arch/powerpc/kernel/head_booke.h
++++ b/arch/powerpc/kernel/head_booke.h
+@@ -31,6 +31,16 @@
+  */
+ #define THREAD_NORMSAVE(offset)       (THREAD_NORMSAVES + (offset * 4))
+ 
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define BOOKE_CLEAR_BTB(reg)                                                  
                \
++START_BTB_FLUSH_SECTION                                                       
        \
++      BTB_FLUSH(reg)                                                          
        \
++END_BTB_FLUSH_SECTION
++#else
++#define BOOKE_CLEAR_BTB(reg)
++#endif
++
++
+ #define NORMAL_EXCEPTION_PROLOG(intno)                                        
             \
+       mtspr   SPRN_SPRG_WSCRATCH0, r10;       /* save one register */      \
+       mfspr   r10, SPRN_SPRG_THREAD;                                       \
+@@ -42,6 +52,7 @@
+       andi.   r11, r11, MSR_PR;       /* check whether user or kernel    */\
+       mr      r11, r1;                                                     \
+       beq     1f;                                                          \
++      BOOKE_CLEAR_BTB(r11)                                            \
+       /* if from user, start at top of this thread's kernel stack */       \
+       lwz     r11, THREAD_INFO-THREAD(r10);                                \
+       ALLOC_STACK_FRAME(r11, THREAD_SIZE);                                 \
+@@ -127,6 +138,7 @@
+       stw     r9,_CCR(r8);            /* save CR on stack                */\
+       mfspr   r11,exc_level_srr1;     /* check whether user or kernel    */\
+       DO_KVM  BOOKE_INTERRUPT_##intno exc_level_srr1;                      \
++      BOOKE_CLEAR_BTB(r10)                                            \
+       andi.   r11,r11,MSR_PR;                                              \
+       mfspr   r11,SPRN_SPRG_THREAD;   /* if from user, start at top of   */\
+       lwz     r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
+index bf4c6021515f..60a0aeefc4a7 100644
+--- a/arch/powerpc/kernel/head_fsl_booke.S
++++ b/arch/powerpc/kernel/head_fsl_booke.S
+@@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+       mfcr    r13
+       stw     r13, THREAD_NORMSAVE(3)(r10)
+       DO_KVM  BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
++START_BTB_FLUSH_SECTION
++      mfspr r11, SPRN_SRR1
++      andi. r10,r11,MSR_PR
++      beq 1f
++      BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
+       mfspr   r10, SPRN_DEAR          /* Get faulting address */
+ 
+       /* If we are faulting a kernel address, we have to use the
+@@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+       mfcr    r13
+       stw     r13, THREAD_NORMSAVE(3)(r10)
+       DO_KVM  BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
++START_BTB_FLUSH_SECTION
++      mfspr r11, SPRN_SRR1
++      andi. r10,r11,MSR_PR
++      beq 1f
++      BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
++
+       mfspr   r10, SPRN_SRR0          /* Get faulting address */
+ 
+       /* If we are faulting a kernel address, we have to use the
+diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
+index 30b89d5cbb03..3b1c3bb91025 100644
+--- a/arch/powerpc/kernel/module.c
++++ b/arch/powerpc/kernel/module.c
+@@ -72,7 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
+               do_feature_fixups(powerpc_firmware_features,
+                                 (void *)sect->sh_addr,
+                                 (void *)sect->sh_addr + sect->sh_size);
+-#endif
++#endif /* CONFIG_PPC64 */
++
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++      sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
++      if (sect != NULL)
++              do_barrier_nospec_fixups_range(barrier_nospec_enabled,
++                                (void *)sect->sh_addr,
++                                (void *)sect->sh_addr + sect->sh_size);
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+ 
+       sect = find_section(hdr, sechdrs, "__lwsync_fixup");
+       if (sect != NULL)
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 2277df84ef6e..30542e833ebe 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -9,11 +9,121 @@
+ #include <linux/device.h>
+ #include <linux/seq_buf.h>
+ 
++#include <asm/asm-prototypes.h>
++#include <asm/code-patching.h>
++#include <asm/debug.h>
+ #include <asm/security_features.h>
++#include <asm/setup.h>
+ 
+ 
+ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+ 
++enum count_cache_flush_type {
++      COUNT_CACHE_FLUSH_NONE  = 0x1,
++      COUNT_CACHE_FLUSH_SW    = 0x2,
++      COUNT_CACHE_FLUSH_HW    = 0x4,
++};
++static enum count_cache_flush_type count_cache_flush_type = 
COUNT_CACHE_FLUSH_NONE;
++
++bool barrier_nospec_enabled;
++static bool no_nospec;
++static bool btb_flush_enabled;
++#ifdef CONFIG_PPC_FSL_BOOK3E
++static bool no_spectrev2;
++#endif
++
++static void enable_barrier_nospec(bool enable)
++{
++      barrier_nospec_enabled = enable;
++      do_barrier_nospec_fixups(enable);
++}
++
++void setup_barrier_nospec(void)
++{
++      bool enable;
++
++      /*
++       * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
++       * But there's a good reason not to. The two flags we check below are
++       * both are enabled by default in the kernel, so if the hcall is not
++       * functional they will be enabled.
++       * On a system where the host firmware has been updated (so the ori
++       * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
++       * not been updated, we would like to enable the barrier. Dropping the
++       * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
++       * we potentially enable the barrier on systems where the host firmware
++       * is not updated, but that's harmless as it's a no-op.
++       */
++      enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++               security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
++
++      if (!no_nospec)
++              enable_barrier_nospec(enable);
++}
++
++static int __init handle_nospectre_v1(char *p)
++{
++      no_nospec = true;
++
++      return 0;
++}
++early_param("nospectre_v1", handle_nospectre_v1);
++
++#ifdef CONFIG_DEBUG_FS
++static int barrier_nospec_set(void *data, u64 val)
++{
++      switch (val) {
++      case 0:
++      case 1:
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (!!val == !!barrier_nospec_enabled)
++              return 0;
++
++      enable_barrier_nospec(!!val);
++
++      return 0;
++}
++
++static int barrier_nospec_get(void *data, u64 *val)
++{
++      *val = barrier_nospec_enabled ? 1 : 0;
++      return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
++                      barrier_nospec_get, barrier_nospec_set, "%llu\n");
++
++static __init int barrier_nospec_debugfs_init(void)
++{
++      debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
++                          &fops_barrier_nospec);
++      return 0;
++}
++device_initcall(barrier_nospec_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
++
++#ifdef CONFIG_PPC_FSL_BOOK3E
++static int __init handle_nospectre_v2(char *p)
++{
++      no_spectrev2 = true;
++
++      return 0;
++}
++early_param("nospectre_v2", handle_nospectre_v2);
++void setup_spectre_v2(void)
++{
++      if (no_spectrev2)
++              do_btb_flush_fixups();
++      else
++              btb_flush_enabled = true;
++}
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
++#ifdef CONFIG_PPC_BOOK3S_64
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, 
char *buf)
+ {
+       bool thread_priv;
+@@ -46,25 +156,39 @@ ssize_t cpu_show_meltdown(struct device *dev, struct 
device_attribute *attr, cha
+ 
+       return sprintf(buf, "Vulnerable\n");
+ }
++#endif
+ 
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute 
*attr, char *buf)
+ {
+-      if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
+-              return sprintf(buf, "Not affected\n");
++      struct seq_buf s;
+ 
+-      return sprintf(buf, "Vulnerable\n");
++      seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++      if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
++              if (barrier_nospec_enabled)
++                      seq_buf_printf(&s, "Mitigation: __user pointer 
sanitization");
++              else
++                      seq_buf_printf(&s, "Vulnerable");
++
++              if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
++                      seq_buf_printf(&s, ", ori31 speculation barrier 
enabled");
++
++              seq_buf_printf(&s, "\n");
++      } else
++              seq_buf_printf(&s, "Not affected\n");
++
++      return s.len;
+ }
+ 
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute 
*attr, char *buf)
+ {
+-      bool bcs, ccd, ori;
+       struct seq_buf s;
++      bool bcs, ccd;
+ 
+       seq_buf_init(&s, buf, PAGE_SIZE - 1);
+ 
+       bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
+       ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
+-      ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
+ 
+       if (bcs || ccd) {
+               seq_buf_printf(&s, "Mitigation: ");
+@@ -77,17 +201,23 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct 
device_attribute *attr, c
+ 
+               if (ccd)
+                       seq_buf_printf(&s, "Indirect branch cache disabled");
+-      } else
++      } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
++              seq_buf_printf(&s, "Mitigation: Software count cache flush");
++
++              if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
++                      seq_buf_printf(&s, " (hardware accelerated)");
++      } else if (btb_flush_enabled) {
++              seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
++      } else {
+               seq_buf_printf(&s, "Vulnerable");
+-
+-      if (ori)
+-              seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++      }
+ 
+       seq_buf_printf(&s, "\n");
+ 
+       return s.len;
+ }
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+  * Store-forwarding barrier support.
+  */
+@@ -235,3 +365,71 @@ static __init int stf_barrier_debugfs_init(void)
+ }
+ device_initcall(stf_barrier_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
++
++static void toggle_count_cache_flush(bool enable)
++{
++      if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
++              patch_instruction_site(&patch__call_flush_count_cache, 
PPC_INST_NOP);
++              count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
++              pr_info("count-cache-flush: software flush disabled.\n");
++              return;
++      }
++
++      patch_branch_site(&patch__call_flush_count_cache,
++                        (u64)&flush_count_cache, BRANCH_SET_LINK);
++
++      if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
++              count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
++              pr_info("count-cache-flush: full software flush sequence 
enabled.\n");
++              return;
++      }
++
++      patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
++      count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
++      pr_info("count-cache-flush: hardware assisted flush sequence 
enabled\n");
++}
++
++void setup_count_cache_flush(void)
++{
++      toggle_count_cache_flush(true);
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int count_cache_flush_set(void *data, u64 val)
++{
++      bool enable;
++
++      if (val == 1)
++              enable = true;
++      else if (val == 0)
++              enable = false;
++      else
++              return -EINVAL;
++
++      toggle_count_cache_flush(enable);
++
++      return 0;
++}
++
++static int count_cache_flush_get(void *data, u64 *val)
++{
++      if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
++              *val = 0;
++      else
++              *val = 1;
++
++      return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
++                      count_cache_flush_set, "%llu\n");
++
++static __init int count_cache_flush_debugfs_init(void)
++{
++      debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
++                          NULL, &fops_count_cache_flush);
++      return 0;
++}
++device_initcall(count_cache_flush_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
++#endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c
+index bf0f712ac0e0..5e7d70c5d065 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -918,6 +918,9 @@ void __init setup_arch(char **cmdline_p)
+       if (ppc_md.setup_arch)
+               ppc_md.setup_arch();
+ 
++      setup_barrier_nospec();
++      setup_spectre_v2();
++
+       paging_init();
+ 
+       /* Initialize the MMU context management stuff. */
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index d929afab7b24..bdf2f7b995bb 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -746,12 +746,25 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, 
unsigned long r5,
+               if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
+                                          &uc_transact->uc_mcontext))
+                       goto badframe;
+-      }
+-      else
+-      /* Fall through, for non-TM restore */
++      } else
+ #endif
+-      if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+-              goto badframe;
++      {
++              /*
++               * Fall through, for non-TM restore
++               *
++               * Unset MSR[TS] on the thread regs since MSR from user
++               * context does not have MSR active, and recheckpoint was
++               * not called since restore_tm_sigcontexts() was not called
++               * also.
++               *
++               * If not unsetting it, the code can RFID to userspace with
++               * MSR[TS] set, but without CPU in the proper state,
++               * causing a TM bad thing.
++               */
++              current->thread.regs->msr &= ~MSR_TS_MASK;
++              if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
++                      goto badframe;
++      }
+ 
+       if (restore_altstack(&uc->uc_stack))
+               goto badframe;
+diff --git a/arch/powerpc/kernel/swsusp_asm64.S 
b/arch/powerpc/kernel/swsusp_asm64.S
+index 988f38dced0f..82d8aae81c6a 100644
+--- a/arch/powerpc/kernel/swsusp_asm64.S
++++ b/arch/powerpc/kernel/swsusp_asm64.S
+@@ -179,7 +179,7 @@ nothing_to_copy:
+       sld     r3, r3, r0
+       li      r0, 0
+ 1:
+-      dcbf    r0,r3
++      dcbf    0,r3
+       addi    r3,r3,0x20
+       bdnz    1b
+ 
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S 
b/arch/powerpc/kernel/vmlinux.lds.S
+index c16fddbb6ab8..50d365060855 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -153,8 +153,25 @@ SECTIONS
+               *(__rfi_flush_fixup)
+               __stop___rfi_flush_fixup = .;
+       }
+-#endif
++#endif /* CONFIG_PPC64 */
++
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++      . = ALIGN(8);
++      __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
++              __start___barrier_nospec_fixup = .;
++              *(__barrier_nospec_fixup)
++              __stop___barrier_nospec_fixup = .;
++      }
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+ 
++#ifdef CONFIG_PPC_FSL_BOOK3E
++      . = ALIGN(8);
++      __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) 
{
++              __start__btb_flush_fixup = .;
++              *(__btb_flush_fixup)
++              __stop__btb_flush_fixup = .;
++      }
++#endif
+       EXCEPTION_TABLE(0)
+ 
+       NOTES :kernel :notes
+diff --git a/arch/powerpc/kvm/bookehv_interrupts.S 
b/arch/powerpc/kvm/bookehv_interrupts.S
+index 81bd8a07aa51..612b7f6a887f 100644
+--- a/arch/powerpc/kvm/bookehv_interrupts.S
++++ b/arch/powerpc/kvm/bookehv_interrupts.S
+@@ -75,6 +75,10 @@
+       PPC_LL  r1, VCPU_HOST_STACK(r4)
+       PPC_LL  r2, HOST_R2(r1)
+ 
++START_BTB_FLUSH_SECTION
++      BTB_FLUSH(r10)
++END_BTB_FLUSH_SECTION
++
+       mfspr   r10, SPRN_PID
+       lwz     r8, VCPU_HOST_PID(r4)
+       PPC_LL  r11, VCPU_SHARED(r4)
+diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
+index 990db69a1d0b..fa88f641ac03 100644
+--- a/arch/powerpc/kvm/e500_emulate.c
++++ b/arch/powerpc/kvm/e500_emulate.c
+@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, 
int sprn, ulong spr_va
+               vcpu->arch.pwrmgtcr0 = spr_val;
+               break;
+ 
++      case SPRN_BUCSR:
++              /*
++               * If we are here, it means that we have already flushed the
++               * branch predictor, so just return to guest.
++               */
++              break;
++
+       /* extra exceptions */
+ #ifdef CONFIG_SPE_POSSIBLE
+       case SPRN_IVOR32:
+diff --git a/arch/powerpc/lib/code-patching.c 
b/arch/powerpc/lib/code-patching.c
+index 753d591f1b52..14535ad4cdd1 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -14,12 +14,20 @@
+ #include <asm/page.h>
+ #include <asm/code-patching.h>
+ #include <asm/uaccess.h>
++#include <asm/setup.h>
++#include <asm/sections.h>
+ 
+ 
+ int patch_instruction(unsigned int *addr, unsigned int instr)
+ {
+       int err;
+ 
++      /* Make sure we aren't patching a freed init section */
++      if (init_mem_is_free && init_section_contains(addr, 4)) {
++              pr_debug("Skipping init section patching addr: 0x%px\n", addr);
++              return 0;
++      }
++
+       __put_user_size(instr, addr, 4, err);
+       if (err)
+               return err;
+@@ -32,6 +40,22 @@ int patch_branch(unsigned int *addr, unsigned long target, 
int flags)
+       return patch_instruction(addr, create_branch(addr, target, flags));
+ }
+ 
++int patch_branch_site(s32 *site, unsigned long target, int flags)
++{
++      unsigned int *addr;
++
++      addr = (unsigned int *)((unsigned long)site + *site);
++      return patch_instruction(addr, create_branch(addr, target, flags));
++}
++
++int patch_instruction_site(s32 *site, unsigned int instr)
++{
++      unsigned int *addr;
++
++      addr = (unsigned int *)((unsigned long)site + *site);
++      return patch_instruction(addr, instr);
++}
++
+ unsigned int create_branch(const unsigned int *addr,
+                          unsigned long target, int flags)
+ {
+diff --git a/arch/powerpc/lib/copypage_power7.S 
b/arch/powerpc/lib/copypage_power7.S
+index a84d333ecb09..ca5fc8fa7efc 100644
+--- a/arch/powerpc/lib/copypage_power7.S
++++ b/arch/powerpc/lib/copypage_power7.S
+@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
+ .machine push
+ .machine "power4"
+       /* setup read stream 0  */
+-      dcbt    r0,r4,0b01000   /* addr from */
+-      dcbt    r0,r7,0b01010   /* length and depth from */
++      dcbt    0,r4,0b01000    /* addr from */
++      dcbt    0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+-      dcbtst  r0,r9,0b01000   /* addr to */
+-      dcbtst  r0,r10,0b01010  /* length and depth to */
++      dcbtst  0,r9,0b01000   /* addr to */
++      dcbtst  0,r10,0b01010  /* length and depth to */
+       eieio
+-      dcbt    r0,r8,0b01010   /* all streams GO */
++      dcbt    0,r8,0b01010    /* all streams GO */
+ .machine pop
+ 
+ #ifdef CONFIG_ALTIVEC
+@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
+       li      r12,112
+ 
+       .align  5
+-1:    lvx     v7,r0,r4
++1:    lvx     v7,0,r4
+       lvx     v6,r4,r6
+       lvx     v5,r4,r7
+       lvx     v4,r4,r8
+@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
+       lvx     v1,r4,r11
+       lvx     v0,r4,r12
+       addi    r4,r4,128
+-      stvx    v7,r0,r3
++      stvx    v7,0,r3
+       stvx    v6,r3,r6
+       stvx    v5,r3,r7
+       stvx    v4,r3,r8
+diff --git a/arch/powerpc/lib/copyuser_power7.S 
b/arch/powerpc/lib/copyuser_power7.S
+index da0c568d18c4..391694814691 100644
+--- a/arch/powerpc/lib/copyuser_power7.S
++++ b/arch/powerpc/lib/copyuser_power7.S
+@@ -327,13 +327,13 @@ err1;    stb     r0,0(r3)
+ .machine push
+ .machine "power4"
+       /* setup read stream 0 */
+-      dcbt    r0,r6,0b01000   /* addr from */
+-      dcbt    r0,r7,0b01010   /* length and depth from */
++      dcbt    0,r6,0b01000   /* addr from */
++      dcbt    0,r7,0b01010   /* length and depth from */
+       /* setup write stream 1 */
+-      dcbtst  r0,r9,0b01000   /* addr to */
+-      dcbtst  r0,r10,0b01010  /* length and depth to */
++      dcbtst  0,r9,0b01000   /* addr to */
++      dcbtst  0,r10,0b01010  /* length and depth to */
+       eieio
+-      dcbt    r0,r8,0b01010   /* all streams GO */
++      dcbt    0,r8,0b01010    /* all streams GO */
+ .machine pop
+ 
+       beq     cr1,.Lunwind_stack_nonvmx_copy
+@@ -388,26 +388,26 @@ err3;    std     r0,0(r3)
+       li      r11,48
+ 
+       bf      cr7*4+3,5f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       addi    r4,r4,16
+-err3; stvx    v1,r0,r3
++err3; stvx    v1,0,r3
+       addi    r3,r3,16
+ 
+ 5:    bf      cr7*4+2,6f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+ err3; lvx     v0,r4,r9
+       addi    r4,r4,32
+-err3; stvx    v1,r0,r3
++err3; stvx    v1,0,r3
+ err3; stvx    v0,r3,r9
+       addi    r3,r3,32
+ 
+ 6:    bf      cr7*4+1,7f
+-err3; lvx     v3,r0,r4
++err3; lvx     v3,0,r4
+ err3; lvx     v2,r4,r9
+ err3; lvx     v1,r4,r10
+ err3; lvx     v0,r4,r11
+       addi    r4,r4,64
+-err3; stvx    v3,r0,r3
++err3; stvx    v3,0,r3
+ err3; stvx    v2,r3,r9
+ err3; stvx    v1,r3,r10
+ err3; stvx    v0,r3,r11
+@@ -433,7 +433,7 @@ err3;      stvx    v0,r3,r11
+        */
+       .align  5
+ 8:
+-err4; lvx     v7,r0,r4
++err4; lvx     v7,0,r4
+ err4; lvx     v6,r4,r9
+ err4; lvx     v5,r4,r10
+ err4; lvx     v4,r4,r11
+@@ -442,7 +442,7 @@ err4;      lvx     v2,r4,r14
+ err4; lvx     v1,r4,r15
+ err4; lvx     v0,r4,r16
+       addi    r4,r4,128
+-err4; stvx    v7,r0,r3
++err4; stvx    v7,0,r3
+ err4; stvx    v6,r3,r9
+ err4; stvx    v5,r3,r10
+ err4; stvx    v4,r3,r11
+@@ -463,29 +463,29 @@ err4;    stvx    v0,r3,r16
+       mtocrf  0x01,r6
+ 
+       bf      cr7*4+1,9f
+-err3; lvx     v3,r0,r4
++err3; lvx     v3,0,r4
+ err3; lvx     v2,r4,r9
+ err3; lvx     v1,r4,r10
+ err3; lvx     v0,r4,r11
+       addi    r4,r4,64
+-err3; stvx    v3,r0,r3
++err3; stvx    v3,0,r3
+ err3; stvx    v2,r3,r9
+ err3; stvx    v1,r3,r10
+ err3; stvx    v0,r3,r11
+       addi    r3,r3,64
+ 
+ 9:    bf      cr7*4+2,10f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+ err3; lvx     v0,r4,r9
+       addi    r4,r4,32
+-err3; stvx    v1,r0,r3
++err3; stvx    v1,0,r3
+ err3; stvx    v0,r3,r9
+       addi    r3,r3,32
+ 
+ 10:   bf      cr7*4+3,11f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       addi    r4,r4,16
+-err3; stvx    v1,r0,r3
++err3; stvx    v1,0,r3
+       addi    r3,r3,16
+ 
+       /* Up to 15B to go */
+@@ -565,25 +565,25 @@ err3;    lvx     v0,0,r4
+       addi    r4,r4,16
+ 
+       bf      cr7*4+3,5f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       addi    r4,r4,16
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+       addi    r3,r3,16
+       vor     v0,v1,v1
+ 
+ 5:    bf      cr7*4+2,6f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+ err3; lvx     v0,r4,r9
+       VPERM(v9,v1,v0,v16)
+       addi    r4,r4,32
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+ err3; stvx    v9,r3,r9
+       addi    r3,r3,32
+ 
+ 6:    bf      cr7*4+1,7f
+-err3; lvx     v3,r0,r4
++err3; lvx     v3,0,r4
+       VPERM(v8,v0,v3,v16)
+ err3; lvx     v2,r4,r9
+       VPERM(v9,v3,v2,v16)
+@@ -592,7 +592,7 @@ err3;      lvx     v1,r4,r10
+ err3; lvx     v0,r4,r11
+       VPERM(v11,v1,v0,v16)
+       addi    r4,r4,64
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+ err3; stvx    v9,r3,r9
+ err3; stvx    v10,r3,r10
+ err3; stvx    v11,r3,r11
+@@ -618,7 +618,7 @@ err3;      stvx    v11,r3,r11
+        */
+       .align  5
+ 8:
+-err4; lvx     v7,r0,r4
++err4; lvx     v7,0,r4
+       VPERM(v8,v0,v7,v16)
+ err4; lvx     v6,r4,r9
+       VPERM(v9,v7,v6,v16)
+@@ -635,7 +635,7 @@ err4;      lvx     v1,r4,r15
+ err4; lvx     v0,r4,r16
+       VPERM(v15,v1,v0,v16)
+       addi    r4,r4,128
+-err4; stvx    v8,r0,r3
++err4; stvx    v8,0,r3
+ err4; stvx    v9,r3,r9
+ err4; stvx    v10,r3,r10
+ err4; stvx    v11,r3,r11
+@@ -656,7 +656,7 @@ err4;      stvx    v15,r3,r16
+       mtocrf  0x01,r6
+ 
+       bf      cr7*4+1,9f
+-err3; lvx     v3,r0,r4
++err3; lvx     v3,0,r4
+       VPERM(v8,v0,v3,v16)
+ err3; lvx     v2,r4,r9
+       VPERM(v9,v3,v2,v16)
+@@ -665,27 +665,27 @@ err3;    lvx     v1,r4,r10
+ err3; lvx     v0,r4,r11
+       VPERM(v11,v1,v0,v16)
+       addi    r4,r4,64
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+ err3; stvx    v9,r3,r9
+ err3; stvx    v10,r3,r10
+ err3; stvx    v11,r3,r11
+       addi    r3,r3,64
+ 
+ 9:    bf      cr7*4+2,10f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+ err3; lvx     v0,r4,r9
+       VPERM(v9,v1,v0,v16)
+       addi    r4,r4,32
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+ err3; stvx    v9,r3,r9
+       addi    r3,r3,32
+ 
+ 10:   bf      cr7*4+3,11f
+-err3; lvx     v1,r0,r4
++err3; lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       addi    r4,r4,16
+-err3; stvx    v8,r0,r3
++err3; stvx    v8,0,r3
+       addi    r3,r3,16
+ 
+       /* Up to 15B to go */
+diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
+index cf1398e3c2e0..e6ed0ec94bc8 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -277,8 +277,101 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
+               (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
+                                               : "unknown");
+ }
++
++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void 
*fixup_end)
++{
++      unsigned int instr, *dest;
++      long *start, *end;
++      int i;
++
++      start = fixup_start;
++      end = fixup_end;
++
++      instr = 0x60000000; /* nop */
++
++      if (enable) {
++              pr_info("barrier-nospec: using ORI speculation barrier\n");
++              instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++      }
++
++      for (i = 0; start < end; start++, i++) {
++              dest = (void *)start + *start;
++
++              pr_devel("patching dest %lx\n", (unsigned long)dest);
++              patch_instruction(dest, instr);
++      }
++
++      printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
++}
++
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+ 
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
++void do_barrier_nospec_fixups(bool enable)
++{
++      void *start, *end;
++
++      start = PTRRELOC(&__start___barrier_nospec_fixup),
++      end = PTRRELOC(&__stop___barrier_nospec_fixup);
++
++      do_barrier_nospec_fixups_range(enable, start, end);
++}
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
++
++#ifdef CONFIG_PPC_FSL_BOOK3E
++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void 
*fixup_end)
++{
++      unsigned int instr[2], *dest;
++      long *start, *end;
++      int i;
++
++      start = fixup_start;
++      end = fixup_end;
++
++      instr[0] = PPC_INST_NOP;
++      instr[1] = PPC_INST_NOP;
++
++      if (enable) {
++              pr_info("barrier-nospec: using isync; sync as speculation 
barrier\n");
++              instr[0] = PPC_INST_ISYNC;
++              instr[1] = PPC_INST_SYNC;
++      }
++
++      for (i = 0; start < end; start++, i++) {
++              dest = (void *)start + *start;
++
++              pr_devel("patching dest %lx\n", (unsigned long)dest);
++              patch_instruction(dest, instr[0]);
++              patch_instruction(dest + 1, instr[1]);
++      }
++
++      printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
++}
++
++static void patch_btb_flush_section(long *curr)
++{
++      unsigned int *start, *end;
++
++      start = (void *)curr + *curr;
++      end = (void *)curr + *(curr + 1);
++      for (; start < end; start++) {
++              pr_devel("patching dest %lx\n", (unsigned long)start);
++              patch_instruction(start, PPC_INST_NOP);
++      }
++}
++
++void do_btb_flush_fixups(void)
++{
++      long *start, *end;
++
++      start = PTRRELOC(&__start__btb_flush_fixup);
++      end = PTRRELOC(&__stop__btb_flush_fixup);
++
++      for (; start < end; start += 2)
++              patch_btb_flush_section(start);
++}
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+       long *start, *end;
+diff --git a/arch/powerpc/lib/memcpy_power7.S 
b/arch/powerpc/lib/memcpy_power7.S
+index 786234fd4e91..193909abd18b 100644
+--- a/arch/powerpc/lib/memcpy_power7.S
++++ b/arch/powerpc/lib/memcpy_power7.S
+@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
+ 
+ .machine push
+ .machine "power4"
+-      dcbt    r0,r6,0b01000
+-      dcbt    r0,r7,0b01010
+-      dcbtst  r0,r9,0b01000
+-      dcbtst  r0,r10,0b01010
++      dcbt    0,r6,0b01000
++      dcbt    0,r7,0b01010
++      dcbtst  0,r9,0b01000
++      dcbtst  0,r10,0b01010
+       eieio
+-      dcbt    r0,r8,0b01010   /* GO */
++      dcbt    0,r8,0b01010    /* GO */
+ .machine pop
+ 
+       beq     cr1,.Lunwind_stack_nonvmx_copy
+@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
+       li      r11,48
+ 
+       bf      cr7*4+3,5f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       addi    r4,r4,16
+-      stvx    v1,r0,r3
++      stvx    v1,0,r3
+       addi    r3,r3,16
+ 
+ 5:    bf      cr7*4+2,6f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       lvx     v0,r4,r9
+       addi    r4,r4,32
+-      stvx    v1,r0,r3
++      stvx    v1,0,r3
+       stvx    v0,r3,r9
+       addi    r3,r3,32
+ 
+ 6:    bf      cr7*4+1,7f
+-      lvx     v3,r0,r4
++      lvx     v3,0,r4
+       lvx     v2,r4,r9
+       lvx     v1,r4,r10
+       lvx     v0,r4,r11
+       addi    r4,r4,64
+-      stvx    v3,r0,r3
++      stvx    v3,0,r3
+       stvx    v2,r3,r9
+       stvx    v1,r3,r10
+       stvx    v0,r3,r11
+@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
+        */
+       .align  5
+ 8:
+-      lvx     v7,r0,r4
++      lvx     v7,0,r4
+       lvx     v6,r4,r9
+       lvx     v5,r4,r10
+       lvx     v4,r4,r11
+@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
+       lvx     v1,r4,r15
+       lvx     v0,r4,r16
+       addi    r4,r4,128
+-      stvx    v7,r0,r3
++      stvx    v7,0,r3
+       stvx    v6,r3,r9
+       stvx    v5,r3,r10
+       stvx    v4,r3,r11
+@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
+       mtocrf  0x01,r6
+ 
+       bf      cr7*4+1,9f
+-      lvx     v3,r0,r4
++      lvx     v3,0,r4
+       lvx     v2,r4,r9
+       lvx     v1,r4,r10
+       lvx     v0,r4,r11
+       addi    r4,r4,64
+-      stvx    v3,r0,r3
++      stvx    v3,0,r3
+       stvx    v2,r3,r9
+       stvx    v1,r3,r10
+       stvx    v0,r3,r11
+       addi    r3,r3,64
+ 
+ 9:    bf      cr7*4+2,10f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       lvx     v0,r4,r9
+       addi    r4,r4,32
+-      stvx    v1,r0,r3
++      stvx    v1,0,r3
+       stvx    v0,r3,r9
+       addi    r3,r3,32
+ 
+ 10:   bf      cr7*4+3,11f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       addi    r4,r4,16
+-      stvx    v1,r0,r3
++      stvx    v1,0,r3
+       addi    r3,r3,16
+ 
+       /* Up to 15B to go */
+@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
+       addi    r4,r4,16
+ 
+       bf      cr7*4+3,5f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       addi    r4,r4,16
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       addi    r3,r3,16
+       vor     v0,v1,v1
+ 
+ 5:    bf      cr7*4+2,6f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       lvx     v0,r4,r9
+       VPERM(v9,v1,v0,v16)
+       addi    r4,r4,32
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       stvx    v9,r3,r9
+       addi    r3,r3,32
+ 
+ 6:    bf      cr7*4+1,7f
+-      lvx     v3,r0,r4
++      lvx     v3,0,r4
+       VPERM(v8,v0,v3,v16)
+       lvx     v2,r4,r9
+       VPERM(v9,v3,v2,v16)
+@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
+       lvx     v0,r4,r11
+       VPERM(v11,v1,v0,v16)
+       addi    r4,r4,64
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       stvx    v9,r3,r9
+       stvx    v10,r3,r10
+       stvx    v11,r3,r11
+@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
+        */
+       .align  5
+ 8:
+-      lvx     v7,r0,r4
++      lvx     v7,0,r4
+       VPERM(v8,v0,v7,v16)
+       lvx     v6,r4,r9
+       VPERM(v9,v7,v6,v16)
+@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
+       lvx     v0,r4,r16
+       VPERM(v15,v1,v0,v16)
+       addi    r4,r4,128
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       stvx    v9,r3,r9
+       stvx    v10,r3,r10
+       stvx    v11,r3,r11
+@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
+       mtocrf  0x01,r6
+ 
+       bf      cr7*4+1,9f
+-      lvx     v3,r0,r4
++      lvx     v3,0,r4
+       VPERM(v8,v0,v3,v16)
+       lvx     v2,r4,r9
+       VPERM(v9,v3,v2,v16)
+@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
+       lvx     v0,r4,r11
+       VPERM(v11,v1,v0,v16)
+       addi    r4,r4,64
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       stvx    v9,r3,r9
+       stvx    v10,r3,r10
+       stvx    v11,r3,r11
+       addi    r3,r3,64
+ 
+ 9:    bf      cr7*4+2,10f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       lvx     v0,r4,r9
+       VPERM(v9,v1,v0,v16)
+       addi    r4,r4,32
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       stvx    v9,r3,r9
+       addi    r3,r3,32
+ 
+ 10:   bf      cr7*4+3,11f
+-      lvx     v1,r0,r4
++      lvx     v1,0,r4
+       VPERM(v8,v0,v1,v16)
+       addi    r4,r4,16
+-      stvx    v8,r0,r3
++      stvx    v8,0,r3
+       addi    r3,r3,16
+ 
+       /* Up to 15B to go */
+diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
+index 57ace356c949..11e6372537fd 100644
+--- a/arch/powerpc/lib/string_64.S
++++ b/arch/powerpc/lib/string_64.S
+@@ -192,7 +192,7 @@ err1;      std     r0,8(r3)
+       mtctr   r6
+       mr      r8,r3
+ 14:
+-err1; dcbz    r0,r3
++err1; dcbz    0,r3
+       add     r3,r3,r9
+       bdnz    14b
+ 
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 5f844337de21..1e93dbc88e80 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -62,6 +62,7 @@
+ #endif
+ 
+ unsigned long long memory_limit;
++bool init_mem_is_free;
+ 
+ #ifdef CONFIG_HIGHMEM
+ pte_t *kmap_pte;
+@@ -396,6 +397,7 @@ void __init mem_init(void)
+ void free_initmem(void)
+ {
+       ppc_md.progress = ppc_printk_progress;
++      init_mem_is_free = true;
+       free_initmem_default(POISON_FREE_INITMEM);
+ }
+ 
+diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
+index eb82d787d99a..b7e9c09dfe19 100644
+--- a/arch/powerpc/mm/tlb_low_64e.S
++++ b/arch/powerpc/mm/tlb_low_64e.S
+@@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+       std     r15,EX_TLB_R15(r12)
+       std     r10,EX_TLB_CR(r12)
+ #ifdef CONFIG_PPC_FSL_BOOK3E
++START_BTB_FLUSH_SECTION
++      mfspr r11, SPRN_SRR1
++      andi. r10,r11,MSR_PR
++      beq 1f
++      BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
+       std     r7,EX_TLB_R7(r12)
+ #endif
+       TLB_MISS_PROLOG_STATS
+diff --git a/arch/powerpc/platforms/powernv/setup.c 
b/arch/powerpc/platforms/powernv/setup.c
+index 17203abf38e8..365e2b620201 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np)
+       if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
+               security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ 
++      if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
++              security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
++
++      if (fw_feature_is("enabled", 
"needs-count-cache-flush-on-context-switch", np))
++              security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
++
+       /*
+        * The features below are enabled by default, so we instead look to see
+        * if firmware has *disabled* them, and clear them if so.
+@@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void)
+                 security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+ 
+       setup_rfi_flush(type, enable);
++      setup_count_cache_flush();
+ }
+ 
+ static void __init pnv_setup_arch(void)
+diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
+index 91ade7755823..adb09ab87f7c 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -475,6 +475,12 @@ static void init_cpu_char_feature_flags(struct 
h_cpu_char_result *result)
+       if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
+               security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ 
++      if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
++              security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
++
++      if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
++              security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
++
+       /*
+        * The features below are enabled by default, so we instead look to see
+        * if firmware has *disabled* them, and clear them if so.
+@@ -525,6 +531,7 @@ void pseries_setup_rfi_flush(void)
+                security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+ 
+       setup_rfi_flush(types, enable);
++      setup_count_cache_flush();
+ }
+ 
+ static void __init pSeries_setup_arch(void)
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index d5409660f5de..756dc9432d15 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -47,10 +47,8 @@ targets += $(vdso_img_sodbg)
+ 
+ export CPPFLAGS_vdso.lds += -P -C
+ 
+-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+-                      -Wl,--no-undefined \
+-                      -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+-                      $(DISABLE_LTO)
++VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
++                      -z max-page-size=4096
+ 
+ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+       $(call if_changed,vdso)
+@@ -96,10 +94,8 @@ CFLAGS_REMOVE_vvar.o = -pg
+ #
+ 
+ CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
+-                         -Wl,-soname=linux-vdso.so.1 \
+-                         -Wl,-z,max-page-size=4096 \
+-                         -Wl,-z,common-page-size=4096
++VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
++                         -z max-page-size=4096
+ 
+ # 64-bit objects to re-brand as x32
+ vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+@@ -127,7 +123,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
+       $(call if_changed,vdso)
+ 
+ CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
++VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
+ 
+ # This makes sure the $(obj) subdirectory exists even though vdso32/
+ # is not a kbuild sub-make subdirectory.
+@@ -165,13 +161,13 @@ $(obj)/vdso32.so.dbg: FORCE \
+ # The DSO images are built using a special linker script.
+ #
+ quiet_cmd_vdso = VDSO    $@
+-      cmd_vdso = $(CC) -nostdlib -o $@ \
++      cmd_vdso = $(LD) -nostdlib -o $@ \
+                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+-                     -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
++                     -T $(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+ 
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, 
-Wl$(comma)--hash-style=both) \
+-      $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
++VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
++      $(call ld-option, --build-id) -Bsymbolic
+ GCOV_PROFILE := n
+ 
+ #
+diff --git a/arch/x86/include/asm/suspend_32.h 
b/arch/x86/include/asm/suspend_32.h
+index 8e9dbe7b73a1..5cc2ce4ab8a3 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -11,7 +11,13 @@
+ 
+ /* image of the saved processor state */
+ struct saved_context {
+-      u16 es, fs, gs, ss;
++      /*
++       * On x86_32, all segment registers, with the possible exception of
++       * gs, are saved at kernel entry in pt_regs.
++       */
++#ifdef CONFIG_X86_32_LAZY_GS
++      u16 gs;
++#endif
+       unsigned long cr0, cr2, cr3, cr4;
+       u64 misc_enable;
+       bool misc_enable_saved;
+diff --git a/arch/x86/include/asm/suspend_64.h 
b/arch/x86/include/asm/suspend_64.h
+index 2bd96b4df140..701751918921 100644
+--- a/arch/x86/include/asm/suspend_64.h
++++ b/arch/x86/include/asm/suspend_64.h
+@@ -19,8 +19,20 @@
+  */
+ struct saved_context {
+       struct pt_regs regs;
+-      u16 ds, es, fs, gs, ss;
+-      unsigned long gs_base, gs_kernel_base, fs_base;
++
++      /*
++       * User CS and SS are saved in current_pt_regs().  The rest of the
++       * segment selectors need to be saved and restored here.
++       */
++      u16 ds, es, fs, gs;
++
++      /*
++       * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
++       * so we save them separately.  We save the kernelmode GSBASE to
++       * restore percpu access after resume.
++       */
++      unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
++
+       unsigned long cr0, cr2, cr3, cr4, cr8;
+       u64 misc_enable;
+       bool misc_enable_saved;
+@@ -29,8 +41,7 @@ struct saved_context {
+       u16 gdt_pad; /* Unused */
+       struct desc_ptr gdt_desc;
+       u16 idt_pad;
+-      u16 idt_limit;
+-      unsigned long idt_base;
++      struct desc_ptr idt;
+       u16 ldt;
+       u16 tss;
+       unsigned long tr;
+diff --git a/arch/x86/include/asm/xen/hypercall.h 
b/arch/x86/include/asm/xen/hypercall.h
+index ccdc23d89b60..9f694537a103 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -216,6 +216,9 @@ privcmd_call(unsigned call,
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ 
++      if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
++              return -EINVAL;
++
+       stac();
+       asm volatile(CALL_NOSPEC
+                    : __HYPERCALL_5PARAM
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 53cace2ec0e2..054e27671df9 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context 
*ctxt)
+       /*
+        * descriptor tables
+        */
+-#ifdef CONFIG_X86_32
+       store_idt(&ctxt->idt);
+-#else
+-/* CONFIG_X86_64 */
+-      store_idt((struct desc_ptr *)&ctxt->idt_limit);
+-#endif
++
+       /*
+        * We save it here, but restore it only in the hibernate case.
+        * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
+@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context 
*ctxt)
+       /*
+        * segment registers
+        */
+-#ifdef CONFIG_X86_32
+-      savesegment(es, ctxt->es);
+-      savesegment(fs, ctxt->fs);
++#ifdef CONFIG_X86_32_LAZY_GS
+       savesegment(gs, ctxt->gs);
+-      savesegment(ss, ctxt->ss);
+-#else
+-/* CONFIG_X86_64 */
+-      asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
+-      asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
+-      asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
+-      asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
+-      asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
++#endif
++#ifdef CONFIG_X86_64
++      savesegment(gs, ctxt->gs);
++      savesegment(fs, ctxt->fs);
++      savesegment(ds, ctxt->ds);
++      savesegment(es, ctxt->es);
+ 
+       rdmsrl(MSR_FS_BASE, ctxt->fs_base);
+-      rdmsrl(MSR_GS_BASE, ctxt->gs_base);
+-      rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
++      rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
++      rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+       mtrr_save_fixed_ranges(NULL);
+ 
+       rdmsrl(MSR_EFER, ctxt->efer);
+@@ -178,6 +170,9 @@ static void fix_processor_context(void)
+       write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
+ 
+       syscall_init();                         /* This sets MSR_*STAR and 
related */
++#else
++      if (boot_cpu_has(X86_FEATURE_SEP))
++              enable_sep_cpu();
+ #endif
+       load_TR_desc();                         /* This does ltr */
+       load_mm_ldt(current->active_mm);        /* This does lldt */
+@@ -186,9 +181,12 @@ static void fix_processor_context(void)
+ }
+ 
+ /**
+- *    __restore_processor_state - restore the contents of CPU registers saved
+- *            by __save_processor_state()
+- *    @ctxt - structure to load the registers contents from
++ * __restore_processor_state - restore the contents of CPU registers saved
++ *                             by __save_processor_state()
++ * @ctxt - structure to load the registers contents from
++ *
++ * The asm code that gets us here will have restored a usable GDT, although
++ * it will be pointing to the wrong alias.
+  */
+ static void notrace __restore_processor_state(struct saved_context *ctxt)
+ {
+@@ -211,46 +209,52 @@ static void notrace __restore_processor_state(struct 
saved_context *ctxt)
+       write_cr2(ctxt->cr2);
+       write_cr0(ctxt->cr0);
+ 
++      /* Restore the IDT. */
++      load_idt(&ctxt->idt);
++
+       /*
+-       * now restore the descriptor tables to their proper values
+-       * ltr is done i fix_processor_context().
++       * Just in case the asm code got us here with the SS, DS, or ES
++       * out of sync with the GDT, update them.
+        */
+-#ifdef CONFIG_X86_32
+-      load_idt(&ctxt->idt);
++      loadsegment(ss, __KERNEL_DS);
++      loadsegment(ds, __USER_DS);
++      loadsegment(es, __USER_DS);
++
++      /*
++       * Restore percpu access.  Percpu access can happen in exception
++       * handlers or in complicated helpers like load_gs_index().
++       */
++#ifdef CONFIG_X86_64
++      wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ #else
+-/* CONFIG_X86_64 */
+-      load_idt((const struct desc_ptr *)&ctxt->idt_limit);
++      loadsegment(fs, __KERNEL_PERCPU);
++      loadsegment(gs, __KERNEL_STACK_CANARY);
+ #endif
+ 
++      /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
++      fix_processor_context();
++
+       /*
+-       * segment registers
++       * Now that we have descriptor tables fully restored and working
++       * exception handling, restore the usermode segments.
+        */
+-#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_64
++      loadsegment(ds, ctxt->es);
+       loadsegment(es, ctxt->es);
+       loadsegment(fs, ctxt->fs);
+-      loadsegment(gs, ctxt->gs);
+-      loadsegment(ss, ctxt->ss);
++      load_gs_index(ctxt->gs);
+ 
+       /*
+-       * sysenter MSRs
++       * Restore FSBASE and GSBASE after restoring the selectors, since
++       * restoring the selectors clobbers the bases.  Keep in mind
++       * that MSR_KERNEL_GS_BASE is horribly misnamed.
+        */
+-      if (boot_cpu_has(X86_FEATURE_SEP))
+-              enable_sep_cpu();
+-#else
+-/* CONFIG_X86_64 */
+-      asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
+-      asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
+-      asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
+-      load_gs_index(ctxt->gs);
+-      asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
+-
+       wrmsrl(MSR_FS_BASE, ctxt->fs_base);
+-      wrmsrl(MSR_GS_BASE, ctxt->gs_base);
+-      wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
++      wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
++#elif defined(CONFIG_X86_32_LAZY_GS)
++      loadsegment(gs, ctxt->gs);
+ #endif
+ 
+-      fix_processor_context();
+-
+       do_fpu_end();
+       x86_platform.restore_sched_clock_state();
+       mtrr_bp_restore();
+diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
+index 7538d802b65a..483593068139 100644
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -272,10 +272,14 @@ static int return_address_cb(struct stackframe *frame, 
void *data)
+       return 1;
+ }
+ 
++/*
++ * level == 0 is for the return address from the caller of this function,
++ * not from this function itself.
++ */
+ unsigned long return_address(unsigned level)
+ {
+       struct return_addr_data r = {
+-              .skip = level + 1,
++              .skip = level,
+       };
+       walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+       return r.addr;
+diff --git a/block/bio.c b/block/bio.c
+index 68972e3d3f5c..4c18a68913de 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1214,8 +1214,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+                       }
+               }
+ 
+-              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
++              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
++                      if (!map_data)
++                              __free_page(page);
+                       break;
++              }
+ 
+               len -= bytes;
+               offset = 0;
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 8453a49471d7..f4ae000eb285 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -377,7 +377,7 @@ config XILINX_HWICAP
+ 
+ config R3964
+       tristate "Siemens R3964 line discipline"
+-      depends on TTY
++      depends on TTY && BROKEN
+       ---help---
+         This driver allows synchronous communication with devices using the
+         Siemens R3964 packet protocol. Unless you are dealing with special
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 737f0f6f4075..45ea2718c65d 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -959,6 +959,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct 
bnxt_rx_ring_info *rxr,
+       tpa_info = &rxr->rx_tpa[agg_id];
+ 
+       if (unlikely(cons != rxr->rx_next_cons)) {
++              netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
++                          cons, rxr->rx_next_cons);
+               bnxt_sched_reset(bp, rxr);
+               return;
+       }
+@@ -1377,14 +1379,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_napi *bnapi, u32 *raw_cons,
+       }
+ 
+       cons = rxcmp->rx_cmp_opaque;
+-      rx_buf = &rxr->rx_buf_ring[cons];
+-      data = rx_buf->data;
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ 
++              netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
++                          cons, rxr->rx_next_cons);
+               bnxt_sched_reset(bp, rxr);
+               return rc1;
+       }
++      rx_buf = &rxr->rx_buf_ring[cons];
++      data = rx_buf->data;
+       prefetch(data);
+ 
+       agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+@@ -1400,11 +1404,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_napi *bnapi, u32 *raw_cons,
+ 
+       rx_buf->data = NULL;
+       if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
++              u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
++
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (agg_bufs)
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ 
+               rc = -EIO;
++              if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
++                      netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
++                      bnxt_sched_reset(bp, rxr);
++              }
+               goto next_rx;
+       }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+index 029e856f72a0..dc809c2ea413 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+       if (err)
+               return err;
+ 
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+       return 0;
+ }
+@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+                      struct mlx5e_tir *tir)
+ {
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       mlx5_core_destroy_tir(mdev, tir->tirn);
+       list_del(&tir->list);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ }
+ 
+ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+       }
+ 
+       INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
++      mutex_init(&mdev->mlx5e_res.td.list_lock);
+ 
+       return 0;
+ 
+@@ -151,6 +156,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct 
mlx5_core_dev *mdev)
+ 
+       MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ 
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+               err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+               if (err)
+@@ -159,6 +165,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct 
mlx5_core_dev *mdev)
+ 
+ out:
+       kvfree(in);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+       return err;
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 16e5c8cd104d..d51ad140f46d 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -890,6 +890,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
+       {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
+       {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
++      {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
+       {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
+       {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+       {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index dedb12083d86..6663b76934ad 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3866,6 +3866,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 
0x9128,
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+                        quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
++                       quirk_dma_func1_alias);
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+                        quirk_dma_func1_alias);
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index 95103054c0e4..fdd2860cb9bd 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -455,4 +455,27 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
+       help
+         FDC channel number to use for KGDB.
+ 
++config LDISC_AUTOLOAD
++      bool "Automatically load TTY Line Disciplines"
++      default y
++      help
++        Historically the kernel has always automatically loaded any
++        line discipline that is in a kernel module when a user asks
++        for it to be loaded with the TIOCSETD ioctl, or through other
++        means.  This is not always the best thing to do on systems
++        where you know you will not be using some of the more
++        "ancient" line disciplines, so prevent the kernel from doing
++        this unless the request is coming from a process with the
++        CAP_SYS_MODULE permissions.
++
++        Say 'Y' here if you trust your userspace users to do the right
++        thing, or if you have only provided the line disciplines that
++        you know you will be using, or if you wish to continue to use
++        the traditional method of on-demand loading of these modules
++        by any user.
++
++        This functionality can be changed at runtime with the
++        dev.tty.ldisc_autoload sysctl, this configuration option will
++        only set the default value of this functionality.
++
+ endif # TTY
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 19fe1e8fc124..15e0116e1232 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -520,6 +520,8 @@ void proc_clear_tty(struct task_struct *p)
+       tty_kref_put(tty);
+ }
+ 
++extern void tty_sysctl_init(void);
++
+ /**
+  * proc_set_tty -  set the controlling terminal
+  *
+@@ -3705,6 +3707,7 @@ void console_sysfs_notify(void)
+  */
+ int __init tty_init(void)
+ {
++      tty_sysctl_init();
+       cdev_init(&tty_cdev, &tty_fops);
+       if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 4ab518d43758..3eb3f2a03bbb 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+  *            takes tty_ldiscs_lock to guard against ldisc races
+  */
+ 
++#if defined(CONFIG_LDISC_AUTOLOAD)
++      #define INITIAL_AUTOLOAD_STATE  1
++#else
++      #define INITIAL_AUTOLOAD_STATE  0
++#endif
++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
++
+ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ {
+       struct tty_ldisc *ld;
+@@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct 
*tty, int disc)
+        */
+       ldops = get_ldops(disc);
+       if (IS_ERR(ldops)) {
++              if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
++                      return ERR_PTR(-EPERM);
+               request_module("tty-ldisc-%d", disc);
+               ldops = get_ldops(disc);
+               if (IS_ERR(ldops))
+@@ -774,3 +783,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
+               tty_ldisc_put(tty->ldisc);
+       tty->ldisc = NULL;
+ }
++
++static int zero;
++static int one = 1;
++static struct ctl_table tty_table[] = {
++      {
++              .procname       = "ldisc_autoload",
++              .data           = &tty_ldisc_autoload,
++              .maxlen         = sizeof(tty_ldisc_autoload),
++              .mode           = 0644,
++              .proc_handler   = proc_dointvec,
++              .extra1         = &zero,
++              .extra2         = &one,
++      },
++      { }
++};
++
++static struct ctl_table tty_dir_table[] = {
++      {
++              .procname       = "tty",
++              .mode           = 0555,
++              .child          = tty_table,
++      },
++      { }
++};
++
++static struct ctl_table tty_root_table[] = {
++      {
++              .procname       = "dev",
++              .mode           = 0555,
++              .child          = tty_dir_table,
++      },
++      { }
++};
++
++void tty_sysctl_init(void)
++{
++      register_sysctl_table(tty_root_table);
++}
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 8977f40ea441..2f09294c5946 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1040,6 +1040,8 @@ struct virtqueue *vring_create_virtqueue(
+                                         GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+               if (queue)
+                       break;
++              if (!may_reduce_num)
++                      return NULL;
+       }
+ 
+       if (!num)
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 242584a0d3b5..a67143c579aa 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -385,6 +385,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, 
void __user *arg)
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+ 
++      /*
++       * If the fs is mounted with nologreplay, which requires it to be
++       * mounted in RO mode as well, we can not allow discard on free space
++       * inside block groups, because log trees refer to extents that are not
++       * pinned in a block group's free space cache (pinning the extents is
++       * precisely the first phase of replaying a log tree).
++       */
++      if (btrfs_test_opt(fs_info, NOLOGREPLAY))
++              return -EROFS;
++
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+                               dev_list) {
+diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
+index fb790b8449c1..333e42cf08de 100644
+--- a/include/linux/bitrev.h
++++ b/include/linux/bitrev.h
+@@ -31,32 +31,32 @@ static inline u32 __bitrev32(u32 x)
+ 
+ #define __constant_bitrev32(x)        \
+ ({                                    \
+-      u32 __x = x;                    \
+-      __x = (__x >> 16) | (__x << 16);        \
+-      __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 
8);      \
+-      __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 
4);      \
+-      __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 
2);      \
+-      __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 
1);      \
+-      __x;                                                            \
++      u32 ___x = x;                   \
++      ___x = (___x >> 16) | (___x << 16);     \
++      ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) 
<< 8);   \
++      ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) 
<< 4);   \
++      ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) 
<< 2);   \
++      ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) 
<< 1);   \
++      ___x;                                                           \
+ })
+ 
+ #define __constant_bitrev16(x)        \
+ ({                                    \
+-      u16 __x = x;                    \
+-      __x = (__x >> 8) | (__x << 8);  \
+-      __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);        
\
+-      __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);        
\
+-      __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);        
\
+-      __x;                                                            \
++      u16 ___x = x;                   \
++      ___x = (___x >> 8) | (___x << 8);       \
++      ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);     
\
++      ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);     
\
++      ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);     
\
++      ___x;                                                           \
+ })
+ 
+ #define __constant_bitrev8(x) \
+ ({                                    \
+-      u8 __x = x;                     \
+-      __x = (__x >> 4) | (__x << 4);  \
+-      __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);      \
+-      __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);      \
+-      __x;                                                            \
++      u8 ___x = x;                    \
++      ___x = (___x >> 4) | (___x << 4);       \
++      ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);   \
++      ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);   \
++      ___x;                                                           \
+ })
+ 
+ #define bitrev32(x) \
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 859fd209603a..509e99076c57 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -578,6 +578,8 @@ enum mlx5_pci_status {
+ };
+ 
+ struct mlx5_td {
++      /* protects tirs list changes while tirs refresh */
++      struct mutex     list_lock;
+       struct list_head tirs_list;
+       u32              tdn;
+ };
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 60042e5e88ff..42eed573ebb6 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -111,6 +111,9 @@ extern void * memscan(void *,int,__kernel_size_t);
+ #ifndef __HAVE_ARCH_MEMCMP
+ extern int memcmp(const void *,const void *,__kernel_size_t);
+ #endif
++#ifndef __HAVE_ARCH_BCMP
++extern int bcmp(const void *,const void *,__kernel_size_t);
++#endif
+ #ifndef __HAVE_ARCH_MEMCHR
+ extern void * memchr(const void *,int,__kernel_size_t);
+ #endif
+diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
+index e8d36938f09a..b38c1871b735 100644
+--- a/include/linux/virtio_ring.h
++++ b/include/linux/virtio_ring.h
+@@ -62,7 +62,7 @@ struct virtqueue;
+ /*
+  * Creates a virtqueue and allocates the descriptor ring.  If
+  * may_reduce_num is set, then this may allocate a smaller ring than
+- * expected.  The caller should query virtqueue_get_ring_size to learn
++ * expected.  The caller should query virtqueue_get_vring_size to learn
+  * the actual size of the ring.
+  */
+ struct virtqueue *vring_create_virtqueue(unsigned int index,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index f06cd30bb44c..a3c1b9dfc9a1 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -580,7 +580,7 @@ int ip_options_get_from_user(struct net *net, struct 
ip_options_rcu **optp,
+                            unsigned char __user *data, int optlen);
+ void ip_options_undo(struct ip_options *opt);
+ void ip_forward_options(struct sk_buff *skb);
+-int ip_options_rcv_srr(struct sk_buff *skb);
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+ 
+ /*
+  *    Functions provided by ip_sockglue.c
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index c05db6ff2515..0cdafe3935a6 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -53,6 +53,7 @@ struct net {
+                                                */
+       spinlock_t              rules_mod_lock;
+ 
++      u32                     hash_mix;
+       atomic64_t              cookie_gen;
+ 
+       struct list_head        list;           /* list of network namespaces */
+diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
+index 69a6715d9f3f..a347b2f9e748 100644
+--- a/include/net/netns/hash.h
++++ b/include/net/netns/hash.h
+@@ -1,21 +1,10 @@
+ #ifndef __NET_NS_HASH_H__
+ #define __NET_NS_HASH_H__
+ 
+-#include <asm/cache.h>
+-
+-struct net;
++#include <net/net_namespace.h>
+ 
+ static inline u32 net_hash_mix(const struct net *net)
+ {
+-#ifdef CONFIG_NET_NS
+-      /*
+-       * shift this right to eliminate bits, that are
+-       * always zeroed
+-       */
+-
+-      return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+-#else
+-      return 0;
+-#endif
++      return net->hash_mix;
+ }
+ #endif
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 9e745cc0726d..9f13667ccb9c 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -1142,6 +1142,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data 
*data, void *vcpu_info)
+ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
+ {
+       data = data->parent_data;
++
++      if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
++              return 0;
++
+       if (data->chip->irq_set_wake)
+               return data->chip->irq_set_wake(data, on);
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0c91d72f3e8f..1c630d94f86b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6634,10 +6634,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+       if (cfs_rq->last_h_load_update == now)
+               return;
+ 
+-      cfs_rq->h_load_next = NULL;
++      WRITE_ONCE(cfs_rq->h_load_next, NULL);
+       for_each_sched_entity(se) {
+               cfs_rq = cfs_rq_of(se);
+-              cfs_rq->h_load_next = se;
++              WRITE_ONCE(cfs_rq->h_load_next, se);
+               if (cfs_rq->last_h_load_update == now)
+                       break;
+       }
+@@ -6647,7 +6647,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+               cfs_rq->last_h_load_update = now;
+       }
+ 
+-      while ((se = cfs_rq->h_load_next) != NULL) {
++      while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
+               load = cfs_rq->h_load;
+               load = div64_ul(load * se->avg.load_avg,
+                       cfs_rq_load_avg(cfs_rq) + 1);
+diff --git a/lib/string.c b/lib/string.c
+index ed83562a53ae..1cd9757291b1 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -772,6 +772,26 @@ __visible int memcmp(const void *cs, const void *ct, 
size_t count)
+ EXPORT_SYMBOL(memcmp);
+ #endif
+ 
++#ifndef __HAVE_ARCH_BCMP
++/**
++ * bcmp - returns 0 if and only if the buffers have identical contents.
++ * @a: pointer to first buffer.
++ * @b: pointer to second buffer.
++ * @len: size of buffers.
++ *
++ * The sign or magnitude of a non-zero return value has no particular
++ * meaning, and architectures may implement their own more efficient bcmp(). 
So
++ * while this particular implementation is a simple (tail) call to memcmp, do
++ * not rely on anything but whether the return value is zero or non-zero.
++ */
++#undef bcmp
++int bcmp(const void *a, const void *b, size_t len)
++{
++      return memcmp(a, b, len);
++}
++EXPORT_SYMBOL(bcmp);
++#endif
++
+ #ifndef __HAVE_ARCH_MEMSCAN
+ /**
+  * memscan - Find a character in an area of memory.
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index a8a9938aeceb..20ae57fbe009 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1801,17 +1801,22 @@ static int ethtool_get_strings(struct net_device *dev, 
void __user *useraddr)
+ 
+       gstrings.len = ret;
+ 
+-      data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+-      if (!data)
+-              return -ENOMEM;
++      if (gstrings.len) {
++              data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
++              if (!data)
++                      return -ENOMEM;
+ 
+-      __ethtool_get_strings(dev, gstrings.string_set, data);
++              __ethtool_get_strings(dev, gstrings.string_set, data);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+               goto out;
+       useraddr += sizeof(gstrings);
+-      if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
++      if (gstrings.len &&
++          copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+               goto out;
+       ret = 0;
+ 
+@@ -1899,17 +1904,21 @@ static int ethtool_get_stats(struct net_device *dev, 
void __user *useraddr)
+               return -EFAULT;
+ 
+       stats.n_stats = n_stats;
+-      data = kmalloc(n_stats * sizeof(u64), GFP_USER);
+-      if (!data)
+-              return -ENOMEM;
++      if (n_stats) {
++              data = kmalloc(n_stats * sizeof(u64), GFP_USER);
++              if (!data)
++                      return -ENOMEM;
+ 
+-      ops->get_ethtool_stats(dev, &stats, data);
++              ops->get_ethtool_stats(dev, &stats, data);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+               goto out;
+       useraddr += sizeof(stats);
+-      if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
++      if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
+               goto out;
+       ret = 0;
+ 
+@@ -1938,19 +1947,23 @@ static int ethtool_get_phy_stats(struct net_device 
*dev, void __user *useraddr)
+               return -EFAULT;
+ 
+       stats.n_stats = n_stats;
+-      data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
+-      if (!data)
+-              return -ENOMEM;
++      if (n_stats) {
++              data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
++              if (!data)
++                      return -ENOMEM;
+ 
+-      mutex_lock(&phydev->lock);
+-      phydev->drv->get_stats(phydev, &stats, data);
+-      mutex_unlock(&phydev->lock);
++              mutex_lock(&phydev->lock);
++              phydev->drv->get_stats(phydev, &stats, data);
++              mutex_unlock(&phydev->lock);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+               goto out;
+       useraddr += sizeof(stats);
+-      if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
++      if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
+               goto out;
+       ret = 0;
+ 
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 04fd04ccaa04..4509dec7bd1c 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -282,6 +282,7 @@ static __net_init int setup_net(struct net *net, struct 
user_namespace *user_ns)
+ 
+       atomic_set(&net->count, 1);
+       atomic_set(&net->passive, 1);
++      get_random_bytes(&net->hash_mix, sizeof(u32));
+       net->dev_base_seq = 1;
+       net->user_ns = user_ns;
+       idr_init(&net->netns_ids);
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index bcadca26523b..ce0ce1401f28 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb)
+                      ip_local_deliver_finish);
+ }
+ 
+-static inline bool ip_rcv_options(struct sk_buff *skb)
++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip_options *opt;
+       const struct iphdr *iph;
+-      struct net_device *dev = skb->dev;
+ 
+       /* It looks as overkill, because not all
+          IP options require packet mangling.
+@@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
+                       }
+               }
+ 
+-              if (ip_options_rcv_srr(skb))
++              if (ip_options_rcv_srr(skb, dev))
+                       goto drop;
+       }
+ 
+@@ -361,7 +360,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+       }
+ #endif
+ 
+-      if (iph->ihl > 5 && ip_rcv_options(skb))
++      if (iph->ihl > 5 && ip_rcv_options(skb, dev))
+               goto drop;
+ 
+       rt = skb_rtable(skb);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 4cd3b5ad9cee..570cdb547234 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -614,7 +614,7 @@ void ip_forward_options(struct sk_buff *skb)
+       }
+ }
+ 
+-int ip_options_rcv_srr(struct sk_buff *skb)
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip_options *opt = &(IPCB(skb)->opt);
+       int srrspace, srrptr;
+@@ -649,7 +649,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
+ 
+               orefdst = skb->_skb_refdst;
+               skb_dst_set(skb, NULL);
+-              err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, 
skb->dev);
++              err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
+               rt2 = skb_rtable(skb);
+               if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != 
RTN_LOCAL)) {
+                       skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index a08cedf9d286..910ef01759e7 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = 
DCTCP_MAX_ALPHA;
+ module_param(dctcp_alpha_on_init, uint, 0644);
+ MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
+ 
+-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
+-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
+-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
+-               "parameter for clamping alpha on loss");
+-
+ static struct tcp_congestion_ops dctcp_reno;
+ 
+ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
+@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 
flags)
+       }
+ }
+ 
+-static void dctcp_state(struct sock *sk, u8 new_state)
++static void dctcp_react_to_loss(struct sock *sk)
+ {
+-      if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
+-              struct dctcp *ca = inet_csk_ca(sk);
++      struct dctcp *ca = inet_csk_ca(sk);
++      struct tcp_sock *tp = tcp_sk(sk);
+ 
+-              /* If this extension is enabled, we clamp dctcp_alpha to
+-               * max on packet loss; the motivation is that dctcp_alpha
+-               * is an indicator to the extend of congestion and packet
+-               * loss is an indicator of extreme congestion; setting
+-               * this in practice turned out to be beneficial, and
+-               * effectively assumes total congestion which reduces the
+-               * window by half.
+-               */
+-              ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+-      }
++      ca->loss_cwnd = tp->snd_cwnd;
++      tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
++}
++
++static void dctcp_state(struct sock *sk, u8 new_state)
++{
++      if (new_state == TCP_CA_Recovery &&
++          new_state != inet_csk(sk)->icsk_ca_state)
++              dctcp_react_to_loss(sk);
++      /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
++       * one loss-adjustment per RTT.
++       */
+ }
+ 
+ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum 
tcp_ca_event ev)
+       case CA_EVENT_ECN_NO_CE:
+               dctcp_ce_state_1_to_0(sk);
+               break;
++      case CA_EVENT_LOSS:
++              dctcp_react_to_loss(sk);
++              break;
+       default:
+               /* Don't care for the rest. */
+               break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index b723987761be..11407dd6bc7c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -592,7 +592,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+                               inet6_sk(skb->sk) : NULL;
+       struct ipv6hdr *tmp_hdr;
+       struct frag_hdr *fh;
+-      unsigned int mtu, hlen, left, len;
++      unsigned int mtu, hlen, left, len, nexthdr_offset;
+       int hroom, troom;
+       __be32 frag_id;
+       int ptr, offset = 0, err = 0;
+@@ -603,6 +603,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               goto fail;
+       hlen = err;
+       nexthdr = *prevhdr;
++      nexthdr_offset = prevhdr - skb_network_header(skb);
+ 
+       mtu = ip6_skb_dst_mtu(skb);
+ 
+@@ -637,6 +638,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+           (err = skb_checksum_help(skb)))
+               goto fail;
+ 
++      prevhdr = skb_network_header(skb) + nexthdr_offset;
+       hroom = LL_RESERVED_SPACE(rt->dst.dev);
+       if (skb_has_frag_list(skb)) {
+               int first_len = skb_pagelen(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index f89516d04150..42f363661d25 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -634,7 +634,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                                          IPPROTO_IPIP,
+                                          RT_TOS(eiph->tos), 0);
+               if (IS_ERR(rt) ||
+-                  rt->dst.dev->type != ARPHRD_TUNNEL) {
++                  rt->dst.dev->type != ARPHRD_TUNNEL6) {
+                       if (!IS_ERR(rt))
+                               ip_rt_put(rt);
+                       goto out;
+@@ -644,7 +644,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+               ip_rt_put(rt);
+               if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+                                  skb2->dev) ||
+-                  skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
++                  skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+                       goto out;
+       }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index c9c6a5e829ab..be74eee0e8ff 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb)
+                   !net_eq(tunnel->net, dev_net(tunnel->dev))))
+                       goto out;
+ 
++              /* skb can be uncloned in iptunnel_pull_header, so
++               * old iph is no longer valid
++               */
++              iph = (const struct iphdr *)skb_mac_header(skb);
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 553d0ad4a2fa..2f3cd09ee0df 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -2058,14 +2058,14 @@ static int __init kcm_init(void)
+       if (err)
+               goto fail;
+ 
+-      err = sock_register(&kcm_family_ops);
+-      if (err)
+-              goto sock_register_fail;
+-
+       err = register_pernet_device(&kcm_net_ops);
+       if (err)
+               goto net_ops_fail;
+ 
++      err = sock_register(&kcm_family_ops);
++      if (err)
++              goto sock_register_fail;
++
+       err = kcm_proc_init();
+       if (err)
+               goto proc_init_fail;
+@@ -2073,12 +2073,12 @@ static int __init kcm_init(void)
+       return 0;
+ 
+ proc_init_fail:
+-      unregister_pernet_device(&kcm_net_ops);
+-
+-net_ops_fail:
+       sock_unregister(PF_KCM);
+ 
+ sock_register_fail:
++      unregister_pernet_device(&kcm_net_ops);
++
++net_ops_fail:
+       proto_unregister(&kcm_proto);
+ 
+ fail:
+@@ -2094,8 +2094,8 @@ fail:
+ static void __exit kcm_exit(void)
+ {
+       kcm_proc_exit();
+-      unregister_pernet_device(&kcm_net_ops);
+       sock_unregister(PF_KCM);
++      unregister_pernet_device(&kcm_net_ops);
+       proto_unregister(&kcm_proto);
+       destroy_workqueue(kcm_wq);
+ 
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 3bd4d5d0c346..50ea76180afa 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1853,14 +1853,14 @@ static struct nlattr *reserve_sfa_size(struct 
sw_flow_actions **sfa,
+ 
+       struct sw_flow_actions *acts;
+       int new_acts_size;
+-      int req_size = NLA_ALIGN(attr_len);
++      size_t req_size = NLA_ALIGN(attr_len);
+       int next_offset = offsetof(struct sw_flow_actions, actions) +
+                                       (*sfa)->actions_len;
+ 
+       if (req_size <= (ksize(*sfa) - next_offset))
+               goto out;
+ 
+-      new_acts_size = ksize(*sfa) * 2;
++      new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+ 
+       if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index d36effbf7614..2daba5316caa 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -527,7 +527,7 @@ static void rds_tcp_kill_sock(struct net *net)
+       list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+               struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+-              if (net != c_net || !tc->t_sock)
++              if (net != c_net)
+                       continue;
+               if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+                       list_move_tail(&tc->t_tcp_node, &tmp_list);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 8ea8217db960..d6af93a24aa0 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -600,6 +600,7 @@ out:
+ static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ {
+       /* No address mapping for V4 sockets */
++      memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+       return sizeof(struct sockaddr_in);
+ }
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 965473d4129c..09491b27092e 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1249,7 +1249,7 @@ static int snd_seq_ioctl_set_client_info(struct 
snd_seq_client *client,
+ 
+       /* fill the info fields */
+       if (client_info->name[0])
+-              strlcpy(client->name, client_info->name, sizeof(client->name));
++              strscpy(client->name, client_info->name, sizeof(client->name));
+ 
+       client->filter = client_info->filter;
+       client->event_lost = client_info->event_lost;
+@@ -1527,7 +1527,7 @@ static int snd_seq_ioctl_create_queue(struct 
snd_seq_client *client, void *arg)
+       /* set queue name */
+       if (!info->name[0])
+               snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+-      strlcpy(q->name, info->name, sizeof(q->name));
++      strscpy(q->name, info->name, sizeof(q->name));
+       snd_use_lock_free(&q->use_lock);
+ 
+       return 0;
+@@ -1589,7 +1589,7 @@ static int snd_seq_ioctl_set_queue_info(struct 
snd_seq_client *client,
+               queuefree(q);
+               return -EPERM;
+       }
+-      strlcpy(q->name, info->name, sizeof(q->name));
++      strscpy(q->name, info->name, sizeof(q->name));
+       queuefree(q);
+ 
+       return 0;
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index 23ab0d169c11..fa64cc2b1729 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -59,6 +59,8 @@ struct fsl_esai {
+       u32 fifo_depth;
+       u32 slot_width;
+       u32 slots;
++      u32 tx_mask;
++      u32 rx_mask;
+       u32 hck_rate[2];
+       u32 sck_rate[2];
+       bool hck_dir[2];
+@@ -359,21 +361,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai 
*dai, u32 tx_mask,
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
+                          ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
+-                         ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
+-                         ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
+-
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
+                          ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
+-                         ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
+-                         ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
+-
+       esai_priv->slot_width = slot_width;
+       esai_priv->slots = slots;
++      esai_priv->tx_mask = tx_mask;
++      esai_priv->rx_mask = rx_mask;
+ 
+       return 0;
+ }
+@@ -594,6 +588,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream 
*substream, int cmd,
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       u8 i, channels = substream->runtime->channels;
+       u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
++      u32 mask;
+ 
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+@@ -606,15 +601,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream 
*substream, int cmd,
+               for (i = 0; tx && i < channels; i++)
+                       regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
+ 
++              /*
++               * When set the TE/RE in the end of enablement flow, there
++               * will be channel swap issue for multi data line case.
++               * In order to workaround this issue, we switch the bit
++               * enablement sequence to below sequence
++               * 1) clear the xSMB & xSMA: which is done in probe and
++               *                           stop state.
++               * 2) set TE/RE
++               * 3) set xSMB
++               * 4) set xSMA:  xSMA is the last one in this flow, which
++               *               will trigger esai to start.
++               */
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+                                  tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
+                                  tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
++              mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
++
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++                                 ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++                                 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
++
+               break;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+                                  tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++                                 ESAI_xSMA_xS_MASK, 0);
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++                                 ESAI_xSMB_xS_MASK, 0);
+ 
+               /* Disable and reset FIFO */
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
+@@ -904,6 +922,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
+               return ret;
+       }
+ 
++      esai_priv->tx_mask = 0xFFFFFFFF;
++      esai_priv->rx_mask = 0xFFFFFFFF;
++
++      /* Clear the TSMA, TSMB, RSMA, RSMB */
++      regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
++
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
+                                             &fsl_esai_dai, 1);
+       if (ret) {

Reply via email to