commit:     1a45eac4572b0b1eec06dc8c8827f0829aa21bba
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 10 23:28:02 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 10 23:28:02 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a45eac4

Linux patch 3.14.48

 0000_README              |    4 +
 1047_linux-3.14.48.patch | 1019 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1023 insertions(+)

diff --git a/0000_README b/0000_README
index 7bd96c3..1bd77d9 100644
--- a/0000_README
+++ b/0000_README
@@ -230,6 +230,10 @@ Patch:  1046_linux-3.14.47.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.14.47
 
+Patch:  1047_linux-3.14.48.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.14.48
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1047_linux-3.14.48.patch b/1047_linux-3.14.48.patch
new file mode 100644
index 0000000..b3cd94d
--- /dev/null
+++ b/1047_linux-3.14.48.patch
@@ -0,0 +1,1019 @@
+diff --git a/Makefile b/Makefile
+index f9041e6d4d19..25393e89051c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Remembering Coco
+ 
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 9f7923193cda..7d35af3f3752 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -117,13 +117,14 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+ })
+ 
++#define kvm_pgd_index(addr)                    pgd_index(addr)
++
+ static inline bool kvm_page_empty(void *ptr)
+ {
+       struct page *ptr_page = virt_to_page(ptr);
+       return page_count(ptr_page) == 1;
+ }
+ 
+-
+ #define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+ #define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+ #define kvm_pud_table_empty(pudp) (0)
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 2e74a617147d..f6a52a2a3724 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -441,6 +441,7 @@ static void update_vttbr(struct kvm *kvm)
+ 
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ {
++      struct kvm *kvm = vcpu->kvm;
+       int ret;
+ 
+       if (likely(vcpu->arch.has_run_once))
+@@ -452,12 +453,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+        * Initialize the VGIC before running a vcpu the first time on
+        * this VM.
+        */
+-      if (unlikely(!vgic_initialized(vcpu->kvm))) {
+-              ret = kvm_vgic_init(vcpu->kvm);
++      if (unlikely(!vgic_initialized(kvm))) {
++              ret = kvm_vgic_init(kvm);
+               if (ret)
+                       return ret;
+       }
+ 
++      /*
++       * Enable the arch timers only if we have an in-kernel VGIC
++       * and it has been properly initialized, since we cannot handle
++       * interrupts from the virtual timer with a userspace gic.
++       */
++      if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
++              kvm_timer_enable(kvm);
++
+       return 0;
+ }
+ 
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index 0d68d4073068..a1467e7689f5 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -159,13 +159,9 @@ __kvm_vcpu_return:
+       @ Don't trap coprocessor accesses for host kernel
+       set_hstr vmexit
+       set_hdcr vmexit
+-      set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
++      set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), 
after_vfp_restore
+ 
+ #ifdef CONFIG_VFPv3
+-      @ Save floating point registers we if let guest use them.
+-      tst     r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+-      bne     after_vfp_restore
+-
+       @ Switch VFP/NEON hardware state to the host's
+       add     r7, vcpu, #VCPU_VFP_GUEST
+       store_vfp_state r7
+@@ -177,6 +173,8 @@ after_vfp_restore:
+       @ Restore FPEXC_EN which we clobbered on entry
+       pop     {r2}
+       VFPFMXR FPEXC, r2
++#else
++after_vfp_restore:
+ #endif
+ 
+       @ Reset Hyp-role
+@@ -467,7 +465,7 @@ switch_to_guest_vfp:
+       push    {r3-r7}
+ 
+       @ NEON/VFP used.  Turn on VFP access.
+-      set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
++      set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
+ 
+       @ Switch VFP/NEON hardware state to the guest's
+       add     r7, r0, #VCPU_VFP_HOST
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 76af93025574..2973b2d342fa 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -578,8 +578,13 @@ vcpu      .req    r0              @ vcpu pointer always 
in r0
+ .endm
+ 
+ /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+- * (hardware reset value is 0). Keep previous value in r2. */
+-.macro set_hcptr operation, mask
++ * (hardware reset value is 0). Keep previous value in r2.
++ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
++ * VFP wasn't already enabled (always executed on vmtrap).
++ * If a label is specified with vmexit, it is branched to if VFP wasn't
++ * enabled.
++ */
++.macro set_hcptr operation, mask, label = none
+       mrc     p15, 4, r2, c1, c1, 2
+       ldr     r3, =\mask
+       .if \operation == vmentry
+@@ -588,6 +593,17 @@ vcpu      .req    r0              @ vcpu pointer always 
in r0
+       bic     r3, r2, r3              @ Don't trap defined coproc-accesses
+       .endif
+       mcr     p15, 4, r3, c1, c1, 2
++      .if \operation != vmentry
++      .if \operation == vmexit
++      tst     r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
++      beq     1f
++      .endif
++      isb
++      .if \label != none
++      b       \label
++      .endif
++1:
++      .endif
+ .endm
+ 
+ /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 524b4b57f650..c612e37166ad 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -194,7 +194,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+       phys_addr_t addr = start, end = start + size;
+       phys_addr_t next;
+ 
+-      pgd = pgdp + pgd_index(addr);
++      pgd = pgdp + kvm_pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               if (!pgd_none(*pgd))
+@@ -264,7 +264,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
+       phys_addr_t next;
+       pgd_t *pgd;
+ 
+-      pgd = kvm->arch.pgd + pgd_index(addr);
++      pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               stage2_flush_puds(kvm, pgd, addr, next);
+@@ -649,7 +649,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct 
kvm_mmu_memory_cache *cache
+       pud_t *pud;
+       pmd_t *pmd;
+ 
+-      pgd = kvm->arch.pgd + pgd_index(addr);
++      pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+       pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) {
+               if (!cache)
+diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
+index 681cb9080100..91f33c2051f2 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long 
addr);
+ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+ {
+       vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
++      if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
++              vcpu->arch.hcr_el2 &= ~HCR_RW;
+ }
+ 
+ static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+diff --git a/arch/arm64/include/asm/kvm_mmu.h 
b/arch/arm64/include/asm/kvm_mmu.h
+index 0d51874c838f..15a8a861264a 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -69,6 +69,8 @@
+ #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
+ #define S2_PGD_ORDER  get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+ 
++#define kvm_pgd_index(addr)    (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 
1))
++
+ int create_hyp_mappings(void *from, void *to);
+ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+ void free_boot_hyp_pgd(void);
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 5dfc8331c385..3aaf3bc4ad8a 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -629,6 +629,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
+        * Instead, we invalidate Stage-2 for this IPA, and the
+        * whole of Stage-1. Weep...
+        */
++      lsr     x1, x1, #12
+       tlbi    ipas2e1is, x1
+       /*
+        * We have to ensure completion of the invalidation at Stage-2,
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 70a7816535cd..0b4326578985 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+                       if (!cpu_has_32bit_el1())
+                               return -EINVAL;
+                       cpu_reset = &default_regs_reset32;
+-                      vcpu->arch.hcr_el2 &= ~HCR_RW;
+               } else {
+                       cpu_reset = &default_regs_reset;
+               }
+diff --git a/arch/mips/include/asm/mach-generic/spaces.h 
b/arch/mips/include/asm/mach-generic/spaces.h
+index 9488fa5f8866..afc96ecb9004 100644
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -94,7 +94,11 @@
+ #endif
+ 
+ #ifndef FIXADDR_TOP
++#ifdef CONFIG_KVM_GUEST
++#define FIXADDR_TOP           ((unsigned long)(long)(int)0x7ffe0000)
++#else
+ #define FIXADDR_TOP           ((unsigned long)(long)(int)0xfffe0000)
+ #endif
++#endif
+ 
+ #endif /* __ASM_MACH_GENERIC_SPACES_H */
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 38265dc85318..65dfbd0c196d 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -124,7 +124,16 @@ static inline void power_pmu_bhrb_read(struct 
cpu_hw_events *cpuhw) {}
+ 
+ static bool regs_use_siar(struct pt_regs *regs)
+ {
+-      return !!regs->result;
++      /*
++       * When we take a performance monitor exception the regs are setup
++       * using perf_read_regs() which overloads some fields, in particular
++       * regs->result to tell us whether to use SIAR.
++       *
++       * However if the regs are from another exception, eg. a syscall, then
++       * they have not been setup using perf_read_regs() and so regs->result
++       * is something random.
++       */
++      return ((TRAP(regs) == 0xf00) && regs->result);
+ }
+ 
+ /*
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 27bb55485472..7ef28625c199 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -2307,7 +2307,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, 
unsigned int len,
+       if (len & (8UL - 1))
+               return ERR_PTR(-EINVAL);
+ 
+-      buf = kzalloc(len, GFP_KERNEL);
++      buf = kzalloc(len, GFP_ATOMIC);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 5dab54accc56..96e743ac28f3 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2440,9 +2440,19 @@ config X86_DMA_REMAP
+       depends on STA2X11
+ 
+ config IOSF_MBI
+-      tristate
+-      default m
++      tristate "Intel System On Chip IOSF Sideband support"
+       depends on PCI
++      ---help---
++        Enables sideband access to mailbox registers on SoC's. The sideband is
++        available on the following platforms. This list is not meant to be
++        exclusive.
++         - BayTrail
++         - Cherryview
++         - Braswell
++         - Quark
++
++        You should say Y if you are running a kernel on one of these
++        platforms.
+ 
+ source "net/Kconfig"
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index e9dc02968cf8..ac03bd7c8978 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -571,7 +571,7 @@ struct kvm_arch {
+       struct kvm_pic *vpic;
+       struct kvm_ioapic *vioapic;
+       struct kvm_pit *vpit;
+-      int vapics_in_nmi_mode;
++      atomic_t vapics_in_nmi_mode;
+       struct mutex apic_map_lock;
+       struct kvm_apic_map *apic_map;
+ 
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 298781d4cfb4..1406ffde3e35 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
+                * LVT0 to NMI delivery. Other PIC interrupts are just sent to
+                * VCPU0, and only if its LVT0 is in EXTINT mode.
+                */
+-              if (kvm->arch.vapics_in_nmi_mode > 0)
++              if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+                       kvm_for_each_vcpu(i, vcpu, kvm)
+                               kvm_apic_nmi_wd_deliver(vcpu);
+       }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 453e5fbbb7ae..6456734a4ca6 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1109,10 +1109,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic 
*apic, u32 lvt0_val)
+               if (!nmi_wd_enabled) {
+                       apic_debug("Receive NMI setting on APIC_LVT0 "
+                                  "for cpu %d\n", apic->vcpu->vcpu_id);
+-                      apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
++                      atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+               }
+       } else if (nmi_wd_enabled)
+-              apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
++              atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ }
+ 
+ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 4f25ec077552..bf001382d170 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] 
__initconst = {
+                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+               },
+       },
++      /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
++      /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
++      {
++              .callback = set_use_crs,
++              .ident = "Foxconn K8M890-8237A",
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
++                      DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
++                      DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
++              },
++      },
+ 
+       /* Now for the blacklist.. */
+ 
+@@ -124,8 +135,10 @@ void __init pci_acpi_crs_quirks(void)
+ {
+       int year;
+ 
+-      if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
+-              pci_use_crs = false;
++      if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
++              if (iomem_resource.end <= 0xffffffff)
++                      pci_use_crs = false;
++      }
+ 
+       dmi_check_system(pci_crs_quirks);
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 533a509439ca..fbc693b7d24f 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -417,7 +417,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int 
pstate)
+ 
+       val |= vid;
+ 
+-      wrmsrl(MSR_IA32_PERF_CTL, val);
++      wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ }
+ 
+ #define BYT_BCLK_FREQS 5
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 5967667e1a8f..1f354879bd06 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -927,7 +927,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int 
sg_count,
+               sg_count--;
+               link_tbl_ptr--;
+       }
+-      be16_add_cpu(&link_tbl_ptr->len, cryptlen);
++      link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
++                                      + cryptlen);
+ 
+       /* tag end of link table */
+       link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg 
*talitos_alg_alloc(struct device *dev,
+               break;
+       default:
+               dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
++              kfree(t_alg);
+               return ERR_PTR(-EINVAL);
+       }
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 9cbef59d404a..935974090aa0 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1922,9 +1922,15 @@ static void free_pt_##LVL (unsigned long __pt)          
        \
+       pt = (u64 *)__pt;                                       \
+                                                               \
+       for (i = 0; i < 512; ++i) {                             \
++              /* PTE present? */                              \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                  \
+                       continue;                               \
+                                                               \
++              /* Large PTE? */                                \
++              if (PM_PTE_LEVEL(pt[i]) == 0 ||                 \
++                  PM_PTE_LEVEL(pt[i]) == 7)                   \
++                      continue;                               \
++                                                              \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
+               FN(p);                                          \
+       }                                                       \
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 25f74191a788..62c3fb91e76f 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -765,10 +765,11 @@ static int genphy_config_advert(struct phy_device 
*phydev)
+       if (phydev->supported & (SUPPORTED_1000baseT_Half |
+                                SUPPORTED_1000baseT_Full)) {
+               adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+-              if (adv != oldadv)
+-                      changed = 1;
+       }
+ 
++      if (adv != oldadv)
++              changed = 1;
++
+       err = phy_write(phydev, MII_CTRL1000, adv);
+       if (err < 0)
+               return err;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 1d7e8a3fb6cd..aa24f7de1b92 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2905,17 +2905,6 @@ restart:
+                               vfsmnt = &mnt->mnt;
+                               continue;
+                       }
+-                      /*
+-                       * Filesystems needing to implement special "root names"
+-                       * should do so with ->d_dname()
+-                       */
+-                      if (IS_ROOT(dentry) &&
+-                         (dentry->d_name.len != 1 ||
+-                          dentry->d_name.name[0] != '/')) {
+-                              WARN(1, "Root dentry has weird name <%.*s>\n",
+-                                   (int) dentry->d_name.len,
+-                                   dentry->d_name.name);
+-                      }
+                       if (!error)
+                               error = is_mounted(vfsmnt) ? 1 : 2;
+                       break;
+diff --git a/fs/inode.c b/fs/inode.c
+index e846a32e8d6e..644875bcc846 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1631,8 +1631,8 @@ int file_remove_suid(struct file *file)
+               error = security_inode_killpriv(dentry);
+       if (!error && killsuid)
+               error = __remove_suid(dentry, killsuid);
+-      if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+-              inode->i_flags |= S_NOSEC;
++      if (!error)
++              inode_has_no_xattr(inode);
+ 
+       return error;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 2faa7eacb62b..fc99d185a477 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3031,11 +3031,15 @@ bool fs_fully_visible(struct file_system_type *type)
+               if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+                       continue;
+ 
+-              /* This mount is not fully visible if there are any child mounts
+-               * that cover anything except for empty directories.
++              /* This mount is not fully visible if there are any
++               * locked child mounts that cover anything except for
++               * empty directories.
+                */
+               list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+                       struct inode *inode = child->mnt_mountpoint->d_inode;
++                      /* Only worry about locked mounts */
++                      if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
++                              continue;
+                       if (!S_ISDIR(inode->i_mode))
+                               goto next;
+                       if (inode->i_nlink > 2)
+diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
+index 6d9aeddc09bf..327b155e7cc9 100644
+--- a/include/kvm/arm_arch_timer.h
++++ b/include/kvm/arm_arch_timer.h
+@@ -60,7 +60,8 @@ struct arch_timer_cpu {
+ 
+ #ifdef CONFIG_KVM_ARM_TIMER
+ int kvm_timer_hyp_init(void);
+-int kvm_timer_init(struct kvm *kvm);
++void kvm_timer_enable(struct kvm *kvm);
++void kvm_timer_init(struct kvm *kvm);
+ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                         const struct kvm_irq_level *irq);
+ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void)
+       return 0;
+ };
+ 
+-static inline int kvm_timer_init(struct kvm *kvm)
+-{
+-      return 0;
+-}
+-
++static inline void kvm_timer_enable(struct kvm *kvm) {}
++static inline void kvm_timer_init(struct kvm *kvm) {}
+ static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                                       const struct kvm_irq_level *irq) {}
+ static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
+index 3573a81815ad..8ba379f9e467 100644
+--- a/include/net/netns/sctp.h
++++ b/include/net/netns/sctp.h
+@@ -31,6 +31,7 @@ struct netns_sctp {
+       struct list_head addr_waitq;
+       struct timer_list addr_wq_timer;
+       struct list_head auto_asconf_splist;
++      /* Lock that protects both addr_waitq and auto_asconf_splist */
+       spinlock_t addr_wq_lock;
+ 
+       /* Lock that protects the local_addr_list writers */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 0dfcc92600e8..2c2d388f884f 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -219,6 +219,10 @@ struct sctp_sock {
+       atomic_t pd_mode;
+       /* Receive to here while partial delivery is in effect. */
+       struct sk_buff_head pd_lobby;
++
++      /* These must be the last fields, as they will skipped on copies,
++       * like on accept and peeloff operations
++       */
+       struct list_head auto_asconf_list;
+       int do_auto_asconf;
+ };
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index a9a4a1b7863d..8d423bc649b9 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct 
ifreq *rq, int cmd)
+               if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+                       return -EPERM;
+ 
+-              spin_lock_bh(&br->lock);
+               br_stp_set_bridge_priority(br, args[1]);
+-              spin_unlock_bh(&br->lock);
+               return 0;
+ 
+       case BRCTL_SET_PORT_PRIORITY:
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 11a2e6c8538f..7bbc8fe25261 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1086,6 +1086,9 @@ static void br_multicast_add_router(struct net_bridge 
*br,
+       struct net_bridge_port *p;
+       struct hlist_node *slot = NULL;
+ 
++      if (!hlist_unhashed(&port->rlist))
++              return;
++
+       hlist_for_each_entry(p, &br->router_list, rlist) {
+               if ((unsigned long) port >= (unsigned long) p)
+                       break;
+@@ -1113,12 +1116,8 @@ static void br_multicast_mark_router(struct net_bridge 
*br,
+       if (port->multicast_router != 1)
+               return;
+ 
+-      if (!hlist_unhashed(&port->rlist))
+-              goto timer;
+-
+       br_multicast_add_router(br, port);
+ 
+-timer:
+       mod_timer(&port->multicast_router_timer,
+                 now + br->multicast_querier_interval);
+ }
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 189ba1e7d851..9a0005aee9ad 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
+       return true;
+ }
+ 
+-/* called under bridge lock */
++/* Acquires and releases bridge lock */
+ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ {
+       struct net_bridge_port *p;
+       int wasroot;
+ 
++      spin_lock_bh(&br->lock);
+       wasroot = br_is_root_bridge(br);
+ 
+       list_for_each_entry(p, &br->port_list, list) {
+@@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 
newprio)
+       br_port_state_selection(br);
+       if (br_is_root_bridge(br) && !wasroot)
+               br_become_root_bridge(br);
++      spin_unlock_bh(&br->lock);
+ }
+ 
+ /* called under bridge lock */
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7d95f69635c6..0f062c671da9 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -976,6 +976,8 @@ int __neigh_event_send(struct neighbour *neigh, struct 
sk_buff *skb)
+       rc = 0;
+       if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+               goto out_unlock_bh;
++      if (neigh->dead)
++              goto out_dead;
+ 
+       if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+               if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+@@ -1032,6 +1034,13 @@ out_unlock_bh:
+               write_unlock(&neigh->lock);
+       local_bh_enable();
+       return rc;
++
++out_dead:
++      if (neigh->nud_state & NUD_STALE)
++              goto out_unlock_bh;
++      write_unlock_bh(&neigh->lock);
++      kfree_skb(skb);
++      return 1;
+ }
+ EXPORT_SYMBOL(__neigh_event_send);
+ 
+@@ -1095,6 +1104,8 @@ int neigh_update(struct neighbour *neigh, const u8 
*lladdr, u8 new,
+       if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+           (old & (NUD_NOARP | NUD_PERMANENT)))
+               goto out;
++      if (neigh->dead)
++              goto out;
+ 
+       if (!(new & NUD_VALID)) {
+               neigh_del_timer(neigh);
+@@ -1244,6 +1255,8 @@ EXPORT_SYMBOL(neigh_update);
+  */
+ void __neigh_set_probe_once(struct neighbour *neigh)
+ {
++      if (neigh->dead)
++              return;
+       neigh->updated = jiffies;
+       if (!(neigh->nud_state & NUD_FAILED))
+               return;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 69ec61abfb37..8207f8d7f665 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -368,9 +368,11 @@ refill:
+               for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
+                       gfp_t gfp = gfp_mask;
+ 
+-                      if (order)
++                      if (order) {
+                               gfp |= __GFP_COMP | __GFP_NOWARN |
+                                      __GFP_NOMEMALLOC;
++                              gfp &= ~__GFP_WAIT;
++                      }
+                       nc->frag.page = alloc_pages(gfp, order);
+                       if (likely(nc->frag.page))
+                               break;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 650dd58ebd05..8ebfa52e5d70 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1914,8 +1914,10 @@ bool skb_page_frag_refill(unsigned int sz, struct 
page_frag *pfrag, gfp_t prio)
+       do {
+               gfp_t gfp = prio;
+ 
+-              if (order)
++              if (order) {
+                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
++                      gfp &= ~__GFP_WAIT;
++              }
+               pfrag->page = alloc_pages(gfp, order);
+               if (likely(pfrag->page)) {
+                       pfrag->offset = 0;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 07bd8edef417..951fe55b1671 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
+                               err = 0;
+                       if (err)
+                               goto out;
++
++                      tcp_fastopen_init_key_once(true);
+               }
+               err = inet_csk_listen_start(sk, backlog);
+               if (err)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 29d240b87af1..dc45221dc692 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2684,10 +2684,13 @@ static int do_tcp_setsockopt(struct sock *sk, int 
level,
+ 
+       case TCP_FASTOPEN:
+               if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+-                  TCPF_LISTEN)))
++                  TCPF_LISTEN))) {
++                      tcp_fastopen_init_key_once(true);
++
+                       err = fastopen_init_queue(sk, val);
+-              else
++              } else {
+                       err = -EINVAL;
++              }
+               break;
+       case TCP_TIMESTAMP:
+               if (!tp->repair)
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index f195d9316e55..ee6518d1afe5 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -84,8 +84,6 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+       __be32 path[4] = { src, dst, 0, 0 };
+       struct tcp_fastopen_context *ctx;
+ 
+-      tcp_fastopen_init_key_once(true);
+-
+       rcu_read_lock();
+       ctx = rcu_dereference(tcp_fastopen_ctx);
+       if (ctx) {
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 48b181797d7b..84a60b82e235 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1264,16 +1264,6 @@ static void packet_sock_destruct(struct sock *sk)
+       sk_refcnt_debug_dec(sk);
+ }
+ 
+-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
+-{
+-      int x = atomic_read(&f->rr_cur) + 1;
+-
+-      if (x >= num)
+-              x = 0;
+-
+-      return x;
+-}
+-
+ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+                                     struct sk_buff *skb,
+                                     unsigned int num)
+@@ -1285,13 +1275,9 @@ static unsigned int fanout_demux_lb(struct 
packet_fanout *f,
+                                   struct sk_buff *skb,
+                                   unsigned int num)
+ {
+-      int cur, old;
++      unsigned int val = atomic_inc_return(&f->rr_cur);
+ 
+-      cur = atomic_read(&f->rr_cur);
+-      while ((old = atomic_cmpxchg(&f->rr_cur, cur,
+-                                   fanout_rr_next(f, num))) != cur)
+-              cur = old;
+-      return cur;
++      return val % num;
+ }
+ 
+ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
+@@ -1345,7 +1331,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct 
net_device *dev,
+                            struct packet_type *pt, struct net_device 
*orig_dev)
+ {
+       struct packet_fanout *f = pt->af_packet_priv;
+-      unsigned int num = f->num_members;
++      unsigned int num = ACCESS_ONCE(f->num_members);
+       struct packet_sock *po;
+       unsigned int idx;
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 740ca5f7add0..e39e6d561592 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -599,7 +599,9 @@ out:
+       return err;
+ no_route:
+       kfree_skb(nskb);
+-      IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++
++      if (asoc)
++              IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+ 
+       /* FIXME: Returning the 'err' will effect all the associations
+        * associated with a socket, although only one of the paths of the
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 604a6acdf92e..f940fdc540f5 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1532,8 +1532,10 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+       /* Supposedly, no process has access to the socket, but
+        * the net layers still may.
++       * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++       * held and that should be grabbed before socket lock.
+        */
+-      local_bh_disable();
++      spin_lock_bh(&net->sctp.addr_wq_lock);
+       bh_lock_sock(sk);
+ 
+       /* Hold the sock, since sk_common_release() will put sock_put()
+@@ -1543,7 +1545,7 @@ static void sctp_close(struct sock *sk, long timeout)
+       sk_common_release(sk);
+ 
+       bh_unlock_sock(sk);
+-      local_bh_enable();
++      spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 
+       sock_put(sk);
+ 
+@@ -3511,6 +3513,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, 
char __user *optval,
+       if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+               return 0;
+ 
++      spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+       if (val == 0 && sp->do_auto_asconf) {
+               list_del(&sp->auto_asconf_list);
+               sp->do_auto_asconf = 0;
+@@ -3519,6 +3522,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, 
char __user *optval,
+                   &sock_net(sk)->sctp.auto_asconf_splist);
+               sp->do_auto_asconf = 1;
+       }
++      spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+       return 0;
+ }
+ 
+@@ -4009,18 +4013,28 @@ static int sctp_init_sock(struct sock *sk)
+       local_bh_disable();
+       percpu_counter_inc(&sctp_sockets_allocated);
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
++
++      /* Nothing can fail after this block, otherwise
++       * sctp_destroy_sock() will be called without addr_wq_lock held
++       */
+       if (net->sctp.default_auto_asconf) {
++              spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+               list_add_tail(&sp->auto_asconf_list,
+                   &net->sctp.auto_asconf_splist);
+               sp->do_auto_asconf = 1;
+-      } else
++              spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
++      } else {
+               sp->do_auto_asconf = 0;
++      }
++
+       local_bh_enable();
+ 
+       return 0;
+ }
+ 
+-/* Cleanup any SCTP per socket resources.  */
++/* Cleanup any SCTP per socket resources. Must be called with
++ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
++ */
+ static void sctp_destroy_sock(struct sock *sk)
+ {
+       struct sctp_sock *sp;
+@@ -6973,6 +6987,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+       newinet->mc_list = NULL;
+ }
+ 
++static inline void sctp_copy_descendant(struct sock *sk_to,
++                                      const struct sock *sk_from)
++{
++      int ancestor_size = sizeof(struct inet_sock) +
++                          sizeof(struct sctp_sock) -
++                          offsetof(struct sctp_sock, auto_asconf_list);
++
++      if (sk_from->sk_family == PF_INET6)
++              ancestor_size += sizeof(struct ipv6_pinfo);
++
++      __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
++}
++
+ /* Populate the fields of the newsk from the oldsk and migrate the assoc
+  * and its messages to the newsk.
+  */
+@@ -6987,7 +7014,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct 
sock *newsk,
+       struct sk_buff *skb, *tmp;
+       struct sctp_ulpevent *event;
+       struct sctp_bind_hashbucket *head;
+-      struct list_head tmplist;
+ 
+       /* Migrate socket buffer sizes and all the socket level options to the
+        * new socket.
+@@ -6995,12 +7021,7 @@ static void sctp_sock_migrate(struct sock *oldsk, 
struct sock *newsk,
+       newsk->sk_sndbuf = oldsk->sk_sndbuf;
+       newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+       /* Brute force copy old sctp opt. */
+-      if (oldsp->do_auto_asconf) {
+-              memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+-              inet_sk_copy_descendant(newsk, oldsk);
+-              memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+-      } else
+-              inet_sk_copy_descendant(newsk, oldsk);
++      sctp_copy_descendant(newsk, oldsk);
+ 
+       /* Restore the ep value that was overwritten with the above structure
+        * copy.
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index 5081e809821f..c6fe40568690 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
+ 
+ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
+ {
++      int ret;
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ 
+       timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
+-      kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+-                          timer->irq->irq,
+-                          timer->irq->level);
++      ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
++                                timer->irq->irq,
++                                timer->irq->level);
++      WARN_ON(ret);
+ }
+ 
+ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
+@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
+       timer_disarm(timer);
+ }
+ 
+-int kvm_timer_init(struct kvm *kvm)
++void kvm_timer_enable(struct kvm *kvm)
+ {
+-      if (timecounter && wqueue) {
+-              kvm->arch.timer.cntvoff = kvm_phys_timer_read();
++      if (kvm->arch.timer.enabled)
++              return;
++
++      /*
++       * There is a potential race here between VCPUs starting for the first
++       * time, which may be enabling the timer multiple times.  That doesn't
++       * hurt though, because we're just setting a variable to the same
++       * variable that it already was.  The important thing is that all
++       * VCPUs have the enabled variable set, before entering the guest, if
++       * the arch timers are enabled.
++       */
++      if (timecounter && wqueue)
+               kvm->arch.timer.enabled = 1;
+-      }
++}
+ 
+-      return 0;
++void kvm_timer_init(struct kvm *kvm)
++{
++      kvm->arch.timer.cntvoff = kvm_phys_timer_read();
+ }
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index c324a52bb407..152ec76ccb42 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1042,6 +1042,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 
sgi_source_id, int irq)
+                         lr, irq, vgic_cpu->vgic_lr[lr]);
+               BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+               vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
++              __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+               return true;
+       }
+ 
+@@ -1055,6 +1056,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 
sgi_source_id, int irq)
+       vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
+       vgic_cpu->vgic_irq_lr_map[irq] = lr;
+       set_bit(lr, vgic_cpu->lr_used);
++      __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ 
+       if (!vgic_irq_is_edge(vcpu, irq))
+               vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
+@@ -1209,6 +1211,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu 
*vcpu)
+       if (vgic_cpu->vgic_misr & GICH_MISR_U)
+               vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
+ 
++      /*
++       * In the next iterations of the vcpu loop, if we sync the vgic state
++       * after flushing it, but before entering the guest (this happens for
++       * pending signals and vmid rollovers), then make sure we don't pick
++       * up any old maintenance interrupts here.
++       */
++      memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2);
++
+       return level_pending;
+ }
+ 

Reply via email to