diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
index a36a695318c6..f9f67be8d3c3 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1084,12 +1084,6 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
        nopku           [X86] Disable Memory Protection Keys CPU feature found
                        in some Intel CPUs.
 
-       eagerfpu=       [X86]
-                       on      enable eager fpu restore
-                       off     disable eager fpu restore
-                       auto    selects the default scheme, which automatically
-                               enables eagerfpu restore for xsaveopt.
-
        module.async_probe [KNL]
                        Enable asynchronous probe on this module.
 
diff --git a/Makefile b/Makefile
index a46c9788ca67..18090f899a7c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 132
+SUBLEVEL = 133
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0e8c0151a390..3ce12137f94f 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -213,6 +213,26 @@ int copy_thread(unsigned long clone_flags,
                task_thread_info(current)->thr_ptr;
        }
 
+
+       /*
+        * setup usermode thread pointer #1:
+        * when child is picked by scheduler, __switch_to() uses @c_callee to
+        * populate usermode callee regs: this works (despite being in a kernel
+        * function) since special return path for child @ret_from_fork()
+        * ensures those regs are not clobbered all the way to RTIE to usermode
+        */
+       c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /*
+        * setup usermode thread pointer #2:
+        * however for this special use of r25 in kernel, __switch_to() sets
+        * r25 for kernel needs and only in the final return path is usermode
+        * r25 setup, from pt_regs->user_r25. So set that up as well
+        */
+       c_regs->user_r25 = c_callee->r25;
+#endif
+
        return 0;
 }
 
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index e3acf5c3480e..02925043575a 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -365,9 +365,9 @@ static int __init early_fadump_reserve_mem(char *p)
 }
 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
 
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
 {
-       int rc;
+       int rc, err;
        unsigned int wait_time;
 
        pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -384,7 +384,11 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
 
        } while (wait_time);
 
+       err = -EIO;
        switch (rc) {
+       default:
+               pr_err("Failed to register. Unknown Error(%d).\n", rc);
+               break;
        case -1:
                printk(KERN_ERR "Failed to register firmware-assisted kernel"
                        " dump. Hardware Error(%d).\n", rc);
@@ -392,18 +396,22 @@ static void register_fw_dump(struct fadump_mem_struct 
*fdm)
        case -3:
                printk(KERN_ERR "Failed to register firmware-assisted kernel"
                        " dump. Parameter Error(%d).\n", rc);
+               err = -EINVAL;
                break;
        case -9:
                printk(KERN_ERR "firmware-assisted kernel dump is already "
                        " registered.");
                fw_dump.dump_registered = 1;
+               err = -EEXIST;
                break;
        case 0:
                printk(KERN_INFO "firmware-assisted kernel dump registration"
                        " is successful\n");
                fw_dump.dump_registered = 1;
+               err = 0;
                break;
        }
+       return err;
 }
 
 void crash_fadump(struct pt_regs *regs, const char *str)
@@ -1006,7 +1014,7 @@ static unsigned long init_fadump_header(unsigned long 
addr)
        return addr;
 }
 
-static void register_fadump(void)
+static int register_fadump(void)
 {
        unsigned long addr;
        void *vaddr;
@@ -1017,7 +1025,7 @@ static void register_fadump(void)
         * assisted dump.
         */
        if (!fw_dump.reserve_dump_area_size)
-               return;
+               return -ENODEV;
 
        ret = fadump_setup_crash_memory_ranges();
        if (ret)
@@ -1032,7 +1040,7 @@ static void register_fadump(void)
        fadump_create_elfcore_headers(vaddr);
 
        /* register the future kernel dump with firmware. */
-       register_fw_dump(&fdm);
+       return register_fw_dump(&fdm);
 }
 
 static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1218,7 +1226,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
        switch (buf[0]) {
        case '0':
                if (fw_dump.dump_registered == 0) {
-                       ret = -EINVAL;
                        goto unlock_out;
                }
                /* Un-register Firmware-assisted dump */
@@ -1226,11 +1233,11 @@ static ssize_t fadump_register_store(struct kobject 
*kobj,
                break;
        case '1':
                if (fw_dump.dump_registered == 1) {
-                       ret = -EINVAL;
+                       ret = -EEXIST;
                        goto unlock_out;
                }
                /* Register Firmware-assisted dump */
-               register_fadump();
+               ret = register_fadump();
                break;
        default:
                ret = -EINVAL;
diff --git a/arch/x86/crypto/crc32c-intel_glue.c 
b/arch/x86/crypto/crc32c-intel_glue.c
index dd1958436591..5773e1161072 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,21 +48,13 @@
 #ifdef CONFIG_X86_64
 /*
  * use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
  * for fpu state save/restore overhead.
  */
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU  512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU        1024
+#define CRC32C_PCL_BREAKEVEN   512
 
 asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
                                unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point()                                      \
-do {                                                                   \
-       if (!use_eager_fpu())                                           \
-               crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
 #endif /* CONFIG_X86_64 */
 
 static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t 
length)
@@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, 
const u8 *data,
         * use faster PCL version if datasize is large enough to
         * overcome kernel fpu state save/restore overhead
         */
-       if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+       if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
                kernel_fpu_begin();
                *crcp = crc_pcl(data, len, *crcp);
                kernel_fpu_end();
@@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, 
const u8 *data,
 static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int 
len,
                                u8 *out)
 {
-       if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+       if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
                kernel_fpu_begin();
                *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
                kernel_fpu_end();
@@ -257,7 +249,6 @@ static int __init crc32c_intel_mod_init(void)
                alg.update = crc32c_pcl_intel_update;
                alg.finup = crc32c_pcl_intel_finup;
                alg.digest = crc32c_pcl_intel_digest;
-               set_pcl_breakeven_point();
        }
 #endif
        return crypto_register_shash(&alg);
diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index 02223cb4bcfd..1e967099ae51 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -37,8 +37,9 @@ extern u8 pvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*ts) :
+            "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -46,8 +47,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, 
struct timezone *tz)
 {
        long ret;
 
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -58,13 +60,13 @@ notrace static long vdso_fallback_gettime(long clock, 
struct timespec *ts)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[clock], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "=a" (ret), "=m" (*ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
                : "memory", "edx");
        return ret;
 }
@@ -73,13 +75,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, 
struct timezone *tz)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[tv], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "=a" (ret), "=m" (*tv), "=m" (*tz)
+               : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
                : "memory", "edx");
        return ret;
 }
diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index fbc1474960e3..f6d1bc93589c 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 
bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU     ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8554f960e21b..25152843dd1f 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
 #ifndef _ASM_X86_FIXMAP_H
 #define _ASM_X86_FIXMAP_H
 
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 #include <asm/acpi.h>
diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 8852e3afa1ad..499d6ed0e376 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
 /*
  * FPU related CPU feature flag helper routines:
  */
-static __always_inline __pure bool use_eager_fpu(void)
-{
-       return true;
-}
-
 static __always_inline __pure bool use_xsaveopt(void)
 {
        return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -501,24 +496,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, 
unsigned int cpu)
 }
 
 
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
-       if (!use_eager_fpu())
-               clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
-       if (!use_eager_fpu())
-               stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
 static inline void __fpregs_deactivate(struct fpu *fpu)
 {
        WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,7 +505,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
        trace_x86_fpu_regs_deactivated(fpu);
 }
 
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
 static inline void __fpregs_activate(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu->fpregs_active);
@@ -554,22 +530,17 @@ static inline int fpregs_active(void)
 }
 
 /*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
  * These generally need preemption protection to work,
  * do try to avoid using these on their own.
  */
 static inline void fpregs_activate(struct fpu *fpu)
 {
-       __fpregs_activate_hw();
        __fpregs_activate(fpu);
 }
 
 static inline void fpregs_deactivate(struct fpu *fpu)
 {
        __fpregs_deactivate(fpu);
-       __fpregs_deactivate_hw();
 }
 
 /*
@@ -596,8 +567,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
         * or if the past 5 consecutive context-switches used math.
         */
        fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
-                     new_fpu->fpstate_active &&
-                     (use_eager_fpu() || new_fpu->counter > 5);
+                     new_fpu->fpstate_active;
 
        if (old_fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(old_fpu))
@@ -611,18 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
 
                /* Don't change CR0.TS if we just switch! */
                if (fpu.preload) {
-                       new_fpu->counter++;
                        __fpregs_activate(new_fpu);
                        trace_x86_fpu_regs_activated(new_fpu);
                        prefetch(&new_fpu->state);
-               } else {
-                       __fpregs_deactivate_hw();
                }
        } else {
-               old_fpu->counter = 0;
                old_fpu->last_cpu = -1;
                if (fpu.preload) {
-                       new_fpu->counter++;
                        if (fpu_want_lazy_restore(new_fpu, cpu))
                                fpu.preload = 0;
                        else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486b02f9..3c80f5b9c09d 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -321,17 +321,6 @@ struct fpu {
         */
        unsigned char                   fpregs_active;
 
-       /*
-        * @counter:
-        *
-        * This counter contains the number of consecutive context switches
-        * during which the FPU stays used. If this is over a threshold, the
-        * lazy FPU restore logic becomes eager, to save the trap overhead.
-        * This is an unsigned char so that after 256 iterations the counter
-        * wraps and the context switch behavior turns lazy again; this is to
-        * deal with bursty apps that only use the FPU for a short time:
-        */
-       unsigned char                   counter;
        /*
         * @state:
         *
@@ -340,29 +329,6 @@ struct fpu {
         * the registers in the FPU are more recent than this state
         * copy. If the task context-switches away then they get
         * saved here and represent the FPU state.
-        *
-        * After context switches there may be a (short) time period
-        * during which the in-FPU hardware registers are unchanged
-        * and still perfectly match this state, if the tasks
-        * scheduled afterwards are not using the FPU.
-        *
-        * This is the 'lazy restore' window of optimization, which
-        * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
-        *
-        * We detect whether a subsequent task uses the FPU via setting
-        * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
-        *
-        * During this window, if the task gets scheduled again, we
-        * might be able to skip having to do a restore from this
-        * memory buffer to the hardware registers - at the cost of
-        * incurring the overhead of #NM fault traps.
-        *
-        * Note that on modern CPUs that support the XSAVEOPT (or other
-        * optimized XSAVE instructions), we don't use #NM traps anymore,
-        * as the hardware can track whether FPU registers need saving
-        * or not. On such CPUs we activate the non-lazy ('eagerfpu')
-        * logic, which unconditionally saves/restores all FPU state
-        * across context switches. (if FPU state exists.)
         */
        union fpregs_state              state;
        /*
diff --git a/arch/x86/include/asm/pgtable_64.h 
b/arch/x86/include/asm/pgtable_64.h
index 221a32ed1372..d5c4df98aac3 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -13,13 +13,14 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <asm/fixmap.h>
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
 extern pgd_t init_level4_pgt[];
 
 #define swapper_pg_dir init_level4_pgt
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1f5bf6..342e59789fcd 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
                __field(struct fpu *, fpu)
                __field(bool, fpregs_active)
                __field(bool, fpstate_active)
-               __field(int, counter)
                __field(u64, xfeatures)
                __field(u64, xcomp_bv)
                ),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
                __entry->fpu            = fpu;
                __entry->fpregs_active  = fpu->fpregs_active;
                __entry->fpstate_active = fpu->fpstate_active;
-               __entry->counter        = fpu->counter;
                if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
                        __entry->xfeatures = fpu->state.xsave.header.xfeatures;
                        __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
                }
        ),
-       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d 
xfeatures: %llx xcomp_bv: %llx",
+       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: 
%llx xcomp_bv: %llx",
                        __entry->fpu,
                        __entry->fpregs_active,
                        __entry->fpstate_active,
-                       __entry->counter,
                        __entry->xfeatures,
                        __entry->xcomp_bv
        )
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 430c095cfa0e..fc965118d2e6 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -59,27 +59,9 @@ static bool kernel_fpu_disabled(void)
        return this_cpu_read(in_kernel_fpu);
 }
 
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
 static bool interrupted_kernel_fpu_idle(void)
 {
-       if (kernel_fpu_disabled())
-               return false;
-
-       if (use_eager_fpu())
-               return true;
-
-       return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+       return !kernel_fpu_disabled();
 }
 
 /*
@@ -127,7 +109,6 @@ void __kernel_fpu_begin(void)
                copy_fpregs_to_fpstate(fpu);
        } else {
                this_cpu_write(fpu_fpregs_owner_ctx, NULL);
-               __fpregs_activate_hw();
        }
 }
 EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -138,8 +119,6 @@ void __kernel_fpu_end(void)
 
        if (fpu->fpregs_active)
                copy_kernel_to_fpregs(&fpu->state);
-       else
-               __fpregs_deactivate_hw();
 
        kernel_fpu_enable();
 }
@@ -201,10 +180,7 @@ void fpu__save(struct fpu *fpu)
        trace_x86_fpu_before_save(fpu);
        if (fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(fpu)) {
-                       if (use_eager_fpu())
-                               copy_kernel_to_fpregs(&fpu->state);
-                       else
-                               fpregs_deactivate(fpu);
+                       copy_kernel_to_fpregs(&fpu->state);
                }
        }
        trace_x86_fpu_after_save(fpu);
@@ -249,7 +225,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-       dst_fpu->counter = 0;
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
@@ -262,8 +237,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
         * Don't let 'init optimized' areas of the XSAVE area
         * leak into the child task:
         */
-       if (use_eager_fpu())
-               memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+       memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
 
        /*
         * Save current FPU registers directly into the child
@@ -285,10 +259,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
                memcpy(&src_fpu->state, &dst_fpu->state,
                       fpu_kernel_xstate_size);
 
-               if (use_eager_fpu())
-                       copy_kernel_to_fpregs(&src_fpu->state);
-               else
-                       fpregs_deactivate(src_fpu);
+               copy_kernel_to_fpregs(&src_fpu->state);
        }
        preempt_enable();
 
@@ -461,7 +432,6 @@ void fpu__restore(struct fpu *fpu)
        trace_x86_fpu_before_restore(fpu);
        fpregs_activate(fpu);
        copy_kernel_to_fpregs(&fpu->state);
-       fpu->counter++;
        trace_x86_fpu_after_restore(fpu);
        kernel_fpu_enable();
 }
@@ -479,7 +449,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
 void fpu__drop(struct fpu *fpu)
 {
        preempt_disable();
-       fpu->counter = 0;
 
        if (fpu->fpregs_active) {
                /* Ignore delayed exceptions from user space */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3ec0d2d64601..3a9318610c4d 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,11 +344,9 @@ static int __fpu__restore_sig(void __user *buf, void 
__user *buf_fx, int size)
                }
 
                fpu->fpstate_active = 1;
-               if (use_eager_fpu()) {
-                       preempt_disable();
-                       fpu__restore(fpu);
-                       preempt_enable();
-               }
+               preempt_disable();
+               fpu__restore(fpu);
+               preempt_enable();
 
                return err;
        } else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index abfbb61b18b8..e9d7f461b7fa 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -890,15 +890,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int 
pkey,
         */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return -EINVAL;
-       /*
-        * For most XSAVE components, this would be an arduous task:
-        * brining fpstate up to date with fpregs, updating fpstate,
-        * then re-populating fpregs.  But, for components that are
-        * never lazily managed, we can just access the fpregs
-        * directly.  PKRU is never managed lazily, so we can just
-        * manipulate it directly.  Make sure it stays that way.
-        */
-       WARN_ON_ONCE(!use_eager_fpu());
 
        /* Set the bits we need in PKRU:  */
        if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 9d72cf547c88..b0d6697ab153 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -23,6 +23,7 @@
 #include "../entry/calling.h"
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -493,13 +494,20 @@ NEXT_PAGE(level2_kernel_pgt)
                KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_fixmap_pgt)
-       .fill   506,8,0
-       .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-       /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-       .fill   5,8,0
+       .fill   (512 - 4 - FIXMAP_PMD_NUM),8,0
+       pgtno = 0
+       .rept (FIXMAP_PMD_NUM)
+       .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+               + _PAGE_TABLE;
+       pgtno = pgtno + 1
+       .endr
+       /* 6 MB reserved space + a 2MB hole */
+       .fill   4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
+       .rept (FIXMAP_PMD_NUM)
        .fill   512,8,0
+       .endr
 
 #undef PMDS
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7e5119c1d15c..c17d3893ae60 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
 #include <linux/export.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
 #include "cpuid.h"
@@ -114,8 +113,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
-       if (use_eager_fpu())
-               kvm_x86_ops->fpu_activate(vcpu);
+       kvm_x86_ops->fpu_activate(vcpu);
 
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 203d42340fc1..5013ef165f44 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7631,16 +7631,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
        __kernel_fpu_end();
        ++vcpu->stat.fpu_reload;
-       /*
-        * If using eager FPU mode, or if the guest is a frequent user
-        * of the FPU, just leave the FPU active for next time.
-        * Every 255 times fpu_counter rolls over to 0; a guest that uses
-        * the FPU in bursts will revert to loading it on demand.
-        */
-       if (!use_eager_fpu()) {
-               if (++vcpu->fpu_counter < 5)
-                       kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
-       }
        trace_kvm_fpu(0);
 }
 
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e30baa8ad94f..8cbed30feb67 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -536,6 +536,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t 
pte)
 {
        unsigned long address = __fix_to_virt(idx);
 
+#ifdef CONFIG_X86_64
+       /*
+       * Ensure that the static initial page tables are covering the
+       * fixmap completely.
+       */
+       BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+                    (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
        if (idx >= __end_of_fixed_addresses) {
                BUG();
                return;
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 0bbec041c003..e2d2b3cd4276 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -142,8 +142,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | 
PKRU_AD_KEY( 3) |
  * Called from the FPU code when creating a fresh set of FPU
  * registers.  This is called from a very specific context where
  * we know the FPU regstiers are safe for use and we can use PKRU
- * directly.  The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
  */
 void copy_init_pkru_to_fpregs(void)
 {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c92f75f7ae33..ebceaba20ad1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1936,7 +1936,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, 
unsigned long max_pfn)
                 * L3_k[511] -> level2_fixmap_pgt */
                convert_pfn_mfn(level3_kernel_pgt);
 
-               /* L3_k[511][506] -> level1_fixmap_pgt */
+               /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
                convert_pfn_mfn(level2_fixmap_pgt);
        }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1970,7 +1970,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, 
unsigned long max_pfn)
                set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
                set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-               set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+               for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+                       set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+                                     PAGE_KERNEL_RO);
+               }
 
                /* Pin down new L4 */
                pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dfffba39f723..98517216879d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1360,8 +1360,10 @@ static int __device_suspend(struct device *dev, 
pm_message_t state, bool async)
 
        dpm_wait_for_children(dev, async);
 
-       if (async_error)
+       if (async_error) {
+               dev->power.direct_complete = false;
                goto Complete;
+       }
 
        /*
         * If a device configured to wake up the system from sleep states
@@ -1373,6 +1375,7 @@ static int __device_suspend(struct device *dev, 
pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
                goto Complete;
        }
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 0fd0d82f80d2..fa9ef8ed5712 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1720,6 +1720,8 @@ static int ucma_close(struct inode *inode, struct file 
*filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index a184c9830ca5..62eb4b7caff3 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1262,8 +1262,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
                if (hints_valid) {
                        r = dm_array_cursor_next(&cmd->hint_cursor);
                        if (r) {
-                               DMERR("dm_array_cursor_next for hint failed");
-                               goto out;
+                               dm_array_cursor_end(&cmd->hint_cursor);
+                               hints_valid = false;
                        }
                }
        }
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index c817627d09ca..58b97226050f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3390,8 +3390,13 @@ static dm_cblock_t get_cache_dev_size(struct cache 
*cache)
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-       if (from_cblock(new_size) > from_cblock(cache->cache_size))
-               return true;
+       if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+               if (cache->sized) {
+                       DMERR("%s: unable to extend cache due to missing cache 
table reload",
+                             cache_device_name(cache));
+                       return false;
+               }
+       }
 
        /*
         * We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/net/wireless/ath/ath10k/debug.c 
b/drivers/net/wireless/ath/ath10k/debug.c
index 0dadc6044dba..b106a06d21cb 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -161,6 +162,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
 void ath10k_debug_print_board_info(struct ath10k *ar)
 {
        char boardinfo[100];
+       const struct firmware *board;
+       u32 crc;
 
        if (ar->id.bmi_ids_valid)
                scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
@@ -168,11 +171,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
        else
                scnprintf(boardinfo, sizeof(boardinfo), "N/A");
 
+       board = ar->normal_mode_fw.board;
+       if (!IS_ERR_OR_NULL(board))
+               crc = crc32_le(0, board->data, board->size);
+       else
+               crc = 0;
+
        ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
                    ar->bd_api,
                    boardinfo,
-                   crc32_le(0, ar->normal_mode_fw.board->data,
-                            ar->normal_mode_fw.board->size));
+                   crc);
 }
 
 void ath10k_debug_print_boot_info(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/trace.h 
b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00cef0bd8..5b974bb76e6c 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-       TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
-                int ret),
+       TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
 
-       TP_ARGS(ar, id, buf, buf_len, ret),
+       TP_ARGS(ar, id, buf, buf_len),
 
        TP_STRUCT__entry(
                __string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
                __field(unsigned int, id)
                __field(size_t, buf_len)
                __dynamic_array(u8, buf, buf_len)
-               __field(int, ret)
        ),
 
        TP_fast_assign(
@@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
                __assign_str(driver, dev_driver_string(ar->dev));
                __entry->id = id;
                __entry->buf_len = buf_len;
-               __entry->ret = ret;
                memcpy(__get_dynamic_array(buf), buf, buf_len);
        ),
 
        TP_printk(
-               "%s %s id %d len %zu ret %d",
+               "%s %s id %d len %zu",
                __get_str(driver),
                __get_str(device),
                __entry->id,
-               __entry->buf_len,
-               __entry->ret
+               __entry->buf_len
        )
 );
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c 
b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index f69b98f4276b..642a441a6586 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
        bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
        ie_len = roundup(arg->ie_len, 4);
        len = (sizeof(*tlv) + sizeof(*cmd)) +
-             (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
-             (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
-             (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
-             (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+             sizeof(*tlv) + chan_len +
+             sizeof(*tlv) + ssid_len +
+             sizeof(*tlv) + bssid_len +
+             sizeof(*tlv) + ie_len;
 
        skb = ath10k_wmi_alloc_skb(ar, len);
        if (!skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c 
b/drivers/net/wireless/ath/ath10k/wmi.c
index e518b640aad0..75f7a7b549df 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1711,8 +1711,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct 
sk_buff *skb,
        cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
        memset(skb_cb, 0, sizeof(*skb_cb));
+       trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
        ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-       trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
 
        if (ret)
                goto err_pull;
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..3b6fb5b3bdb2 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 
size)
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
                            u32 off)
 {
-       u32 *mapping = &vif->hash.mapping[off];
+       u32 *mapping = vif->hash.mapping;
        struct gnttab_copy copy_op = {
                .source.u.ref = gref,
                .source.domid = vif->domid,
-               .dest.u.gmfn = virt_to_gfn(mapping),
                .dest.domid = DOMID_SELF,
-               .dest.offset = xen_offset_in_page(mapping),
-               .len = len * sizeof(u32),
+               .len = len * sizeof(*mapping),
                .flags = GNTCOPY_source_gref
        };
 
-       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+       if ((off + len < off) || (off + len > vif->hash.size) ||
+           len > XEN_PAGE_SIZE / sizeof(*mapping))
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
+       copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
+       copy_op.dest.offset = xen_offset_in_page(mapping + off);
+
        while (len-- != 0)
                if (mapping[off++] >= vif->num_queues)
                        return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 90b5a898d6b1..0a1ebbbd3f16 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -548,6 +548,9 @@ static void __init of_unittest_parse_interrupts(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -622,6 +625,9 @@ static void __init 
of_unittest_parse_interrupts_extended(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = 
of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@ static void __init of_unittest_platform_populate(void)
        pdev = of_find_device_by_node(np);
        unittest(pdev, "device 1 creation failed\n");
 
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", 
irq);
-
-       /* Test that a parsing failure does not return -EPROBE_DEFER */
-       np = of_find_node_by_path("/testcase-data/testcase-device2");
-       pdev = of_find_device_by_node(np);
-       unittest(pdev, "device 2 creation failed\n");
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed 
- %d\n", irq);
+       if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq == -EPROBE_DEFER,
+                        "device deferred probe failed - %d\n", irq);
+
+               /* Test that a parsing failure does not return -EPROBE_DEFER */
+               np = of_find_node_by_path("/testcase-data/testcase-device2");
+               pdev = of_find_device_by_node(np);
+               unittest(pdev, "device 2 creation failed\n");
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq < 0 && irq != -EPROBE_DEFER,
+                        "device parsing error failed - %d\n", irq);
+       }
 
        np = of_find_node_by_path("/testcase-data/platform-tests");
        unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b3c5c4cbb37..ccbbd4cde0f1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1114,12 +1114,12 @@ int pci_save_state(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-                                    u32 saved_val, int retry)
+                                    u32 saved_val, int retry, bool force)
 {
        u32 val;
 
        pci_read_config_dword(pdev, offset, &val);
-       if (val == saved_val)
+       if (!force && val == saved_val)
                return;
 
        for (;;) {
@@ -1138,25 +1138,36 @@ static void pci_restore_config_dword(struct pci_dev 
*pdev, int offset,
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-                                          int start, int end, int retry)
+                                          int start, int end, int retry,
+                                          bool force)
 {
        int index;
 
        for (index = end; index >= start; index--)
                pci_restore_config_dword(pdev, 4 * index,
                                         pdev->saved_config_space[index],
-                                        retry);
+                                        retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-               pci_restore_config_space_range(pdev, 10, 15, 0);
+               pci_restore_config_space_range(pdev, 10, 15, 0, false);
                /* Restore BARs before the command register. */
-               pci_restore_config_space_range(pdev, 4, 9, 10);
-               pci_restore_config_space_range(pdev, 0, 3, 0);
+               pci_restore_config_space_range(pdev, 4, 9, 10, false);
+               pci_restore_config_space_range(pdev, 0, 3, 0, false);
+       } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+               pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+               /*
+                * Force rewriting of prefetch registers to avoid S3 resume
+                * issues on Intel PCI bridges that occur when these
+                * registers are not explicitly written.
+                */
+               pci_restore_config_space_range(pdev, 9, 11, 0, true);
+               pci_restore_config_space_range(pdev, 0, 8, 0, false);
        } else {
-               pci_restore_config_space_range(pdev, 0, 15, 0);
+               pci_restore_config_space_range(pdev, 0, 15, 0, false);
        }
 }
 
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 789c81482542..e6429d419b80 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1475,6 +1475,7 @@ static void tty_driver_remove_tty(struct tty_driver 
*driver, struct tty_struct *
 static int tty_reopen(struct tty_struct *tty)
 {
        struct tty_driver *driver = tty->driver;
+       int retval;
 
        if (driver->type == TTY_DRIVER_TYPE_PTY &&
            driver->subtype == PTY_TYPE_MASTER)
@@ -1488,10 +1489,14 @@ static int tty_reopen(struct tty_struct *tty)
 
        tty->count++;
 
-       if (!tty->ldisc)
-               return tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (tty->ldisc)
+               return 0;
 
-       return 0;
+       retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (retval)
+               tty->count--;
+
+       return retval;
 }
 
 /**
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index ce9e457e60c3..c10875834a5a 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -735,10 +735,10 @@ static int __maybe_unused xhci_mtk_resume(struct device 
*dev)
        xhci_mtk_host_enable(mtk);
 
        xhci_dbg(xhci, "%s: restart port polling\n", __func__);
-       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       usb_hcd_poll_rh_status(hcd);
        set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
        usb_hcd_poll_rh_status(xhci->shared_hcd);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
        return 0;
 }
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f6782a347cde..b5140555a8d5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -179,6 +179,8 @@ static void xhci_pci_quirks(struct device *dev, struct 
xhci_hcd *xhci)
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
                xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/serial/usb-serial-simple.c 
b/drivers/usb/serial/usb-serial-simple.c
index 2674da40d9cd..6d6acf2c07c3 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()                   \
-       { USB_DEVICE(0x0cad, 0x9011) }  /* Motorola Solutions TETRA PEI */
+       { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+       { USB_DEVICE(0x0cad, 0x9012) }  /* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c 
b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c19c96840480..c10180d0b018 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -209,12 +209,12 @@ ext4_xattr_check_block(struct inode *inode, struct 
buffer_head *bh)
 {
        int error;
 
-       if (buffer_verified(bh))
-               return 0;
-
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                return -EFSCORRUPTED;
+       if (buffer_verified(bh))
+               return 0;
+
        if (!ext4_xattr_block_csum_verify(inode, bh))
                return -EFSBADCRC;
        error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -645,14 +645,20 @@ static size_t ext4_xattr_free_space(struct 
ext4_xattr_entry *last,
 }
 
 static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+                    struct inode *inode)
 {
-       struct ext4_xattr_entry *last;
+       struct ext4_xattr_entry *last, *next;
        size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
 
        /* Compute min_offs and last. */
        last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+       for (; !IS_LAST_ENTRY(last); last = next) {
+               next = EXT4_XATTR_NEXT(last);
+               if ((void *)next >= s->end) {
+                       EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+                       return -EIO;
+               }
                if (last->e_value_size) {
                        size_t offs = le16_to_cpu(last->e_value_offs);
                        if (offs < min_offs)
@@ -834,7 +840,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                        mb_cache_entry_delete_block(ext4_mb_cache, hash,
                                                    bs->bh->b_blocknr);
                        ea_bdebug(bs->bh, "modifying in-place");
-                       error = ext4_xattr_set_entry(i, s);
+                       error = ext4_xattr_set_entry(i, s, inode);
                        if (!error) {
                                if (!IS_LAST_ENTRY(s->first))
                                        ext4_xattr_rehash(header(s->base),
@@ -881,7 +887,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                s->end = s->base + sb->s_blocksize;
        }
 
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error == -EFSCORRUPTED)
                goto bad_block;
        if (error)
@@ -1079,7 +1085,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct 
inode *inode,
 
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error) {
                if (error == -ENOSPC &&
                    ext4_has_inline_data(inode)) {
@@ -1091,7 +1097,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct 
inode *inode,
                        error = ext4_xattr_ibody_find(inode, i, is);
                        if (error)
                                return error;
-                       error = ext4_xattr_set_entry(i, s);
+                       error = ext4_xattr_set_entry(i, s, inode);
                }
                if (error)
                        return error;
@@ -1117,7 +1123,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct 
inode *inode,
 
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
-       error = ext4_xattr_set_entry(i, s);
+       error = ext4_xattr_set_entry(i, s, inode);
        if (error)
                return error;
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b4dbc2f59656..aee2a066a446 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -676,6 +676,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, 
block_t cp_addr,
 
        crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
        if (crc_offset >= blk_size) {
+               f2fs_put_page(*cp_page, 1);
                f2fs_msg(sbi->sb, KERN_WARNING,
                        "invalid crc_offset: %zu", crc_offset);
                return -EINVAL;
@@ -684,6 +685,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, 
block_t cp_addr,
        crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
                                                        + crc_offset)));
        if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+               f2fs_put_page(*cp_page, 1);
                f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
                return -EINVAL;
        }
@@ -703,14 +705,14 @@ static struct page *validate_checkpoint(struct 
f2fs_sb_info *sbi,
        err = get_checkpoint_version(sbi, cp_addr, &cp_block,
                                        &cp_page_1, version);
        if (err)
-               goto invalid_cp1;
+               return NULL;
        pre_version = *version;
 
        cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
        err = get_checkpoint_version(sbi, cp_addr, &cp_block,
                                        &cp_page_2, version);
        if (err)
-               goto invalid_cp2;
+               goto invalid_cp;
        cur_version = *version;
 
        if (cur_version == pre_version) {
@@ -718,9 +720,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info 
*sbi,
                f2fs_put_page(cp_page_2, 1);
                return cp_page_1;
        }
-invalid_cp2:
        f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
        f2fs_put_page(cp_page_1, 1);
        return NULL;
 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 03dda1cbe485..727a9e3fa806 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1918,6 +1918,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, 
int mode)
        int dev, vol;
        char *endptr;
 
+       if (!name || !*name)
+               return ERR_PTR(-EINVAL);
+
        /* First, try to open using the device node path method */
        ubi = ubi_open_volume_path(name, mode);
        if (!IS_ERR(ubi))
diff --git a/include/linux/netfilter_bridge/ebtables.h 
b/include/linux/netfilter_bridge/ebtables.h
index 984b2112c77b..ea8a97793d2d 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -123,4 +123,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
 /* True if the target is not a standard target */
 #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target 
>= 0)
 
+static inline bool ebt_invalid_target(int target)
+{
+       return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
 #endif
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4c233437ee1a..bb0cf1caf1cd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4386,7 +4386,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct 
cgroup *from)
         */
        do {
                css_task_iter_start(&from->self, &it);
-               task = css_task_iter_next(&it);
+
+               do {
+                       task = css_task_iter_next(&it);
+               } while (task && (task->flags & PF_EXITING));
+
                if (task)
                        get_task_struct(task);
                css_task_iter_end(&it);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 5f658b6a684f..d31e801a467c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1078,6 +1078,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
+#else
+       "", /* nr_tlb_remote_flush */
+       "", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
diff --git a/net/bridge/netfilter/ebt_arpreply.c 
b/net/bridge/netfilter/ebt_arpreply.c
index 070cf134a22f..f2660c1b29e4 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param 
*par)
        if (e->ethproto != htons(ETH_P_ARP) ||
            e->invflags & EBT_IPROTO)
                return -EINVAL;
+       if (ebt_invalid_target(info->target))
+               return -EINVAL;
+
        return 0;
 }
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e63fd12f923a..6ef9d32c34f1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -386,7 +386,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct 
net_device *dev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
                /* Keys without a station are used for TX only */
-               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+               if (sta && test_sta_flag(sta, WLAN_STA_MFP))
                        key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
                break;
        case NL80211_IFTYPE_ADHOC:
diff --git a/tools/arch/x86/include/asm/cpufeatures.h 
b/tools/arch/x86/include/asm/cpufeatures.h
index fbc1474960e3..f6d1bc93589c 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
 #define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 
bits) */
 #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
 #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU     ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
 #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */

Reply via email to