commit:     e5d1e5b00493ff84e00a64d865ea68bca791188e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  5 11:39:34 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  5 11:39:34 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e5d1e5b0

Linux patch 4.4.90

 0000_README             |    4 +
 1089_linux-4.4.90.patch | 1208 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1212 insertions(+)

diff --git a/0000_README b/0000_README
index 43c1c6e..fb5ca42 100644
--- a/0000_README
+++ b/0000_README
@@ -399,6 +399,10 @@ Patch:  1088_linux-4.4.89.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.89
 
+Patch:  1089_linux-4.4.90.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.90
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1089_linux-4.4.90.patch b/1089_linux-4.4.90.patch
new file mode 100644
index 0000000..658420d
--- /dev/null
+++ b/1089_linux-4.4.90.patch
@@ -0,0 +1,1208 @@
+diff --git a/Makefile b/Makefile
+index 7e4c46b375b3..ca5aaaf4aef7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 89
++SUBLEVEL = 90
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
+index 7f68a1ee7073..210192c38df3 100644
+--- a/arch/arm/boot/dts/pxa27x.dtsi
++++ b/arch/arm/boot/dts/pxa27x.dtsi
+@@ -13,6 +13,7 @@
+                       interrupts = <25>;
+                       #dma-channels = <32>;
+                       #dma-cells = <2>;
++                      #dma-requests = <75>;
+                       status = "okay";
+               };
+ 
+diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
+index 564341af7e97..fec47bcd8292 100644
+--- a/arch/arm/boot/dts/pxa3xx.dtsi
++++ b/arch/arm/boot/dts/pxa3xx.dtsi
+@@ -12,6 +12,7 @@
+                       interrupts = <25>;
+                       #dma-channels = <32>;
+                       #dma-cells = <2>;
++                      #dma-requests = <100>;
+                       status = "okay";
+               };
+ 
+diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
+index 2a6e0ae2b920..614e9d8f0a54 100644
+--- a/arch/arm/mach-pxa/devices.c
++++ b/arch/arm/mach-pxa/devices.c
+@@ -1203,6 +1203,7 @@ void __init pxa2xx_set_spi_info(unsigned id, struct 
pxa2xx_spi_master *info)
+ 
+ static struct mmp_dma_platdata pxa_dma_pdata = {
+       .dma_channels   = 0,
++      .nb_requestors  = 0,
+ };
+ 
+ static struct resource pxa_dma_resource[] = {
+@@ -1231,8 +1232,9 @@ static struct platform_device pxa2xx_pxa_dma = {
+       .resource       = pxa_dma_resource,
+ };
+ 
+-void __init pxa2xx_set_dmac_info(int nb_channels)
++void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
+ {
+       pxa_dma_pdata.dma_channels = nb_channels;
++      pxa_dma_pdata.nb_requestors = nb_requestors;
+       pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
+ }
+diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
+index 1dc85ffc3e20..049b9cc22720 100644
+--- a/arch/arm/mach-pxa/pxa25x.c
++++ b/arch/arm/mach-pxa/pxa25x.c
+@@ -206,7 +206,7 @@ static int __init pxa25x_init(void)
+               register_syscore_ops(&pxa_irq_syscore_ops);
+               register_syscore_ops(&pxa2xx_mfp_syscore_ops);
+ 
+-              pxa2xx_set_dmac_info(16);
++              pxa2xx_set_dmac_info(16, 40);
+               pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
+               ret = platform_add_devices(pxa25x_devices,
+                                          ARRAY_SIZE(pxa25x_devices));
+diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
+index ffc424028557..2fb6430b7a34 100644
+--- a/arch/arm/mach-pxa/pxa27x.c
++++ b/arch/arm/mach-pxa/pxa27x.c
+@@ -309,7 +309,7 @@ static int __init pxa27x_init(void)
+               if (!of_have_populated_dt()) {
+                       pxa_register_device(&pxa27x_device_gpio,
+                                           &pxa27x_gpio_info);
+-                      pxa2xx_set_dmac_info(32);
++                      pxa2xx_set_dmac_info(32, 75);
+                       ret = platform_add_devices(devices,
+                                                  ARRAY_SIZE(devices));
+               }
+diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
+index 20ce2d386f17..ca06f082497c 100644
+--- a/arch/arm/mach-pxa/pxa3xx.c
++++ b/arch/arm/mach-pxa/pxa3xx.c
+@@ -450,7 +450,7 @@ static int __init pxa3xx_init(void)
+               if (of_have_populated_dt())
+                       return 0;
+ 
+-              pxa2xx_set_dmac_info(32);
++              pxa2xx_set_dmac_info(32, 100);
+               ret = platform_add_devices(devices, ARRAY_SIZE(devices));
+               if (ret)
+                       return ret;
+diff --git a/arch/arm/plat-pxa/include/plat/dma.h 
b/arch/arm/plat-pxa/include/plat/dma.h
+index 28848b344e2d..ceba3e4184fc 100644
+--- a/arch/arm/plat-pxa/include/plat/dma.h
++++ b/arch/arm/plat-pxa/include/plat/dma.h
+@@ -95,6 +95,6 @@ static inline int pxad_toggle_reserved_channel(int 
legacy_channel)
+ }
+ #endif
+ 
+-extern void __init pxa2xx_set_dmac_info(int nb_channels);
++extern void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors);
+ 
+ #endif /* __PLAT_DMA_H */
+diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
+index c5f9a9e3d1f3..28d83f536e93 100644
+--- a/arch/arm/xen/mm.c
++++ b/arch/arm/xen/mm.c
+@@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
+       .unmap_page = xen_swiotlb_unmap_page,
+       .dma_supported = xen_swiotlb_dma_supported,
+       .set_dma_mask = xen_swiotlb_set_dma_mask,
++      .mmap = xen_swiotlb_dma_mmap,
+ };
+ 
+ int __init xen_mm_init(void)
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 20ceb5edf7b8..d019c3a58cc2 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -446,6 +446,7 @@ ENDPROC(__mmap_switched)
+  * booted in EL1 or EL2 respectively.
+  */
+ ENTRY(el2_setup)
++      msr     SPsel, #1                       // We want to use SP_EL{1,2}
+       mrs     x0, CurrentEL
+       cmp     x0, #CurrentEL_EL2
+       b.ne    1f
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 7fabf49f2aeb..86485415c5f0 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -447,7 +447,7 @@ static struct fault_info {
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 0 translation 
fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 1 translation 
fault"     },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 2 translation 
fault"     },
+-      { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "level 3 translation 
fault"     },
++      { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "level 3 translation 
fault"     },
+       { do_bad,               SIGBUS,  0,             "unknown 8"             
        },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 1 access flag 
fault"     },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "level 2 access flag 
fault"     },
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c 
b/arch/powerpc/kvm/book3s_64_vio.c
+index 54cf9bc94dad..3a095670b0c4 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -101,22 +101,17 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+                                  struct kvm_create_spapr_tce *args)
+ {
+       struct kvmppc_spapr_tce_table *stt = NULL;
++      struct kvmppc_spapr_tce_table *siter;
+       long npages;
+       int ret = -ENOMEM;
+       int i;
+ 
+-      /* Check this LIOBN hasn't been previously allocated */
+-      list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+-              if (stt->liobn == args->liobn)
+-                      return -EBUSY;
+-      }
+-
+       npages = kvmppc_stt_npages(args->window_size);
+ 
+       stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+                     GFP_KERNEL);
+       if (!stt)
+-              goto fail;
++              return ret;
+ 
+       stt->liobn = args->liobn;
+       stt->window_size = args->window_size;
+@@ -128,23 +123,36 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+                       goto fail;
+       }
+ 
+-      kvm_get_kvm(kvm);
+-
+       mutex_lock(&kvm->lock);
+-      list_add(&stt->list, &kvm->arch.spapr_tce_tables);
++
++      /* Check this LIOBN hasn't been previously allocated */
++      ret = 0;
++      list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
++              if (siter->liobn == args->liobn) {
++                      ret = -EBUSY;
++                      break;
++              }
++      }
++
++      if (!ret)
++              ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
++                                     stt, O_RDWR | O_CLOEXEC);
++
++      if (ret >= 0) {
++              list_add(&stt->list, &kvm->arch.spapr_tce_tables);
++              kvm_get_kvm(kvm);
++      }
+ 
+       mutex_unlock(&kvm->lock);
+ 
+-      return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+-                              stt, O_RDWR | O_CLOEXEC);
++      if (ret >= 0)
++              return ret;
+ 
+-fail:
+-      if (stt) {
+-              for (i = 0; i < npages; i++)
+-                      if (stt->pages[i])
+-                              __free_page(stt->pages[i]);
++ fail:
++      for (i = 0; i < npages; i++)
++              if (stt->pages[i])
++                      __free_page(stt->pages[i]);
+ 
+-              kfree(stt);
+-      }
++      kfree(stt);
+       return ret;
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c 
b/arch/powerpc/platforms/pseries/mobility.c
+index ceb18d349459..8dd0c8edefd6 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 
drc_index)
+               return -ENOENT;
+ 
+       dn = dlpar_configure_connector(drc_index, parent_dn);
+-      if (!dn)
++      if (!dn) {
++              of_node_put(parent_dn);
+               return -ENOENT;
++      }
+ 
+       rc = dlpar_attach_node(dn);
+       if (rc)
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index 0bc3490420c5..72a483c295f2 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -116,6 +116,11 @@ int xstateregs_set(struct task_struct *target, const 
struct user_regset *regset,
+       xsave = &fpu->state.xsave;
+ 
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
++
++      /* xcomp_bv must be 0 when using uncompacted format */
++      if (!ret && xsave->header.xcomp_bv)
++              ret = -EINVAL;
++
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+@@ -126,6 +131,12 @@ int xstateregs_set(struct task_struct *target, const 
struct user_regset *regset,
+        */
+       memset(&xsave->header.reserved, 0, 48);
+ 
++      /*
++       * In case of failure, mark all states as init:
++       */
++      if (ret)
++              fpstate_init(&fpu->state);
++
+       return ret;
+ }
+ 
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 31c6a60505e6..3de077116218 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -309,7 +309,9 @@ static int __fpu__restore_sig(void __user *buf, void 
__user *buf_fx, int size)
+               fpu__drop(fpu);
+ 
+               if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
+-                  __copy_from_user(&env, buf, sizeof(env))) {
++                  __copy_from_user(&env, buf, sizeof(env)) ||
++                  (state_size > offsetof(struct xregs_state, header) &&
++                   fpu->state.xsave.header.xcomp_bv)) {
+                       fpstate_init(&fpu->state);
+                       err = -1;
+               } else {
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index b12391119ce8..a018dff00808 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2029,8 +2029,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int 
cpu)
+ 
+               /* Allow posting non-urgent interrupts */
+               new.sn = 0;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
+ }
+ /*
+  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
+@@ -4541,21 +4541,30 @@ static inline bool 
kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+ {
+ #ifdef CONFIG_SMP
+       if (vcpu->mode == IN_GUEST_MODE) {
+-              struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+               /*
+-               * Currently, we don't support urgent interrupt,
+-               * all interrupts are recognized as non-urgent
+-               * interrupt, so we cannot post interrupts when
+-               * 'SN' is set.
++               * The vector of interrupt to be delivered to vcpu had
++               * been set in PIR before this function.
++               *
++               * Following cases will be reached in this block, and
++               * we always send a notification event in all cases as
++               * explained below.
+                *
+-               * If the vcpu is in guest mode, it means it is
+-               * running instead of being scheduled out and
+-               * waiting in the run queue, and that's the only
+-               * case when 'SN' is set currently, warning if
+-               * 'SN' is set.
++               * Case 1: vcpu keeps in non-root mode. Sending a
++               * notification event posts the interrupt to vcpu.
++               *
++               * Case 2: vcpu exits to root mode and is still
++               * runnable. PIR will be synced to vIRR before the
++               * next vcpu entry. Sending a notification event in
++               * this case has no effect, as vcpu is not in root
++               * mode.
++               *
++               * Case 3: vcpu exits to root mode and is blocked.
++               * vcpu_block() has already synced PIR to vIRR and
++               * never blocks vcpu if vIRR is not cleared. Therefore,
++               * a blocked vcpu here does not wait for any requested
++               * interrupts in PIR, and sending a notification event
++               * which has no effect is safe here.
+                */
+-              WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
+ 
+               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+                               POSTED_INTR_VECTOR);
+@@ -9683,6 +9692,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
struct vmcs12 *vmcs12)
+               vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+                               page_to_phys(vmx->nested.virtual_apic_page));
+               vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
++      } else {
++#ifdef CONFIG_X86_64
++              exec_control |= CPU_BASED_CR8_LOAD_EXITING |
++                              CPU_BASED_CR8_STORE_EXITING;
++#endif
+       }
+ 
+       if (cpu_has_vmx_msr_bitmap() &&
+@@ -10691,8 +10705,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+ 
+               /* set 'NV' to 'wakeup vector' */
+               new.nv = POSTED_INTR_WAKEUP_VECTOR;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
+ 
+       return 0;
+ }
+@@ -10723,8 +10737,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
+ 
+               /* set 'NV' to 'notification vector' */
+               new.nv = POSTED_INTR_VECTOR;
+-      } while (cmpxchg(&pi_desc->control, old.control,
+-                      new.control) != old.control);
++      } while (cmpxchg64(&pi_desc->control, old.control,
++                         new.control) != old.control);
+ 
+       if(vcpu->pre_pcpu != -1) {
+               spin_lock_irqsave(
+@@ -10755,7 +10769,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+       struct kvm_lapic_irq irq;
+       struct kvm_vcpu *vcpu;
+       struct vcpu_data vcpu_info;
+-      int idx, ret = -EINVAL;
++      int idx, ret = 0;
+ 
+       if (!kvm_arch_has_assigned_device(kvm) ||
+               !irq_remapping_cap(IRQ_POSTING_CAP))
+@@ -10763,7 +10777,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+ 
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-      BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
++      if (guest_irq >= irq_rt->nr_rt_entries ||
++          hlist_empty(&irq_rt->map[guest_irq])) {
++              pr_warn_once("no route for guest_irq %u/%u (broken user 
space?)\n",
++                           guest_irq, irq_rt->nr_rt_entries);
++              goto out;
++      }
+ 
+       hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+               if (e->type != KVM_IRQ_ROUTING_MSI)
+@@ -10793,12 +10812,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, 
unsigned int host_irq,
+ 
+               if (set)
+                       ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+-              else {
+-                      /* suppress notification event before unposting */
+-                      pi_set_sn(vcpu_to_pi_desc(vcpu));
++              else
+                       ret = irq_set_vcpu_affinity(host_irq, NULL);
+-                      pi_clear_sn(vcpu_to_pi_desc(vcpu));
+-              }
+ 
+               if (ret < 0) {
+                       printk(KERN_INFO "%s: failed to update PI IRTE\n",
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index 650f427d915b..341b8d858e67 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -147,7 +147,6 @@ static int bsg_create_job(struct device *dev, struct 
request *req)
+ failjob_rls_rqst_payload:
+       kfree(job->request_payload.sg_list);
+ failjob_rls_job:
+-      kfree(job);
+       return -ENOMEM;
+ }
+ 
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 6a60936b46e0..62ce93568e11 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1749,9 +1749,9 @@ static int common_nonsnoop_hash(struct talitos_edesc 
*edesc,
+               req_ctx->swinit = 0;
+       } else {
+               desc->ptr[1] = zero_entry;
+-              /* Indicate next op is not the first. */
+-              req_ctx->first = 0;
+       }
++      /* Indicate next op is not the first. */
++      req_ctx->first = 0;
+ 
+       /* HMAC key */
+       if (ctx->keylen)
+@@ -2770,7 +2770,8 @@ static struct talitos_crypto_alg 
*talitos_alg_alloc(struct device *dev,
+               t_alg->algt.alg.hash.final = ahash_final;
+               t_alg->algt.alg.hash.finup = ahash_finup;
+               t_alg->algt.alg.hash.digest = ahash_digest;
+-              t_alg->algt.alg.hash.setkey = ahash_setkey;
++              if (!strncmp(alg->cra_name, "hmac", 4))
++                      t_alg->algt.alg.hash.setkey = ahash_setkey;
+               t_alg->algt.alg.hash.import = ahash_import;
+               t_alg->algt.alg.hash.export = ahash_export;
+ 
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 5eac08ffc697..d55bf85b76ce 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -818,6 +818,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, 
struct stripe_head *sh
+                       spin_unlock(&head->batch_head->batch_lock);
+                       goto unlock_out;
+               }
++              /*
++               * We must assign batch_head of this stripe within the
++               * batch_lock, otherwise clear_batch_ready of batch head
++               * stripe could clear BATCH_READY bit of this stripe and
++               * this stripe->batch_head doesn't get assigned, which
++               * could confuse clear_batch_ready for this stripe
++               */
++              sh->batch_head = head->batch_head;
+ 
+               /*
+                * at this point, head's BATCH_READY could be cleared, but we
+@@ -825,8 +833,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, 
struct stripe_head *sh
+                */
+               list_add(&sh->batch_list, &head->batch_list);
+               spin_unlock(&head->batch_head->batch_lock);
+-
+-              sh->batch_head = head->batch_head;
+       } else {
+               head->batch_head = head;
+               sh->batch_head = head->batch_head;
+@@ -4258,7 +4264,8 @@ static void break_stripe_batch_list(struct stripe_head 
*head_sh,
+ 
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_PREREAD_ACTIVE) |
+-                                          (1 << STRIPE_DEGRADED)),
++                                          (1 << STRIPE_DEGRADED) |
++                                          (1 << STRIPE_ON_UNPLUG_LIST)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+ 
+               sh->check_state = head_sh->check_state;
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index ea3eeb7011e1..690eb1a18caf 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -176,6 +176,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
+               kernel = false;
+       }
+ 
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
+       cxl_ctx_get();
+ 
+       if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
+diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
+index 10a02934bfc0..013558f4da4f 100644
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -94,7 +94,6 @@ static int __afu_open(struct inode *inode, struct file 
*file, bool master)
+ 
+       pr_devel("afu_open pe: %i\n", ctx->pe);
+       file->private_data = ctx;
+-      cxl_ctx_get();
+ 
+       /* indicate success */
+       rc = 0;
+@@ -205,11 +204,18 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+       ctx->pid = get_task_pid(current, PIDTYPE_PID);
+       ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ 
++      /*
++       * Increment driver use count. Enables global TLBIs for hash
++       * and callbacks to handle the segment table
++       */
++      cxl_ctx_get();
++
+       trace_cxl_attach(ctx, work.work_element_descriptor, 
work.num_interrupts, amr);
+ 
+       if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
+                                    amr))) {
+               afu_release_irqs(ctx, ctx);
++              cxl_ctx_put();
+               goto out;
+       }
+ 
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index f8b2b5987ea9..ec91cd17bf34 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -522,7 +522,7 @@ static ssize_t driver_override_store(struct device *dev,
+                                    const char *buf, size_t count)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev);
+-      char *driver_override, *old = pdev->driver_override, *cp;
++      char *driver_override, *old, *cp;
+ 
+       /* We need to keep extra room for a newline */
+       if (count >= (PAGE_SIZE - 1))
+@@ -536,12 +536,15 @@ static ssize_t driver_override_store(struct device *dev,
+       if (cp)
+               *cp = '\0';
+ 
++      device_lock(dev);
++      old = pdev->driver_override;
+       if (strlen(driver_override)) {
+               pdev->driver_override = driver_override;
+       } else {
+               kfree(driver_override);
+               pdev->driver_override = NULL;
+       }
++      device_unlock(dev);
+ 
+       kfree(old);
+ 
+@@ -552,8 +555,12 @@ static ssize_t driver_override_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev);
++      ssize_t len;
+ 
+-      return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++      device_lock(dev);
++      len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++      device_unlock(dev);
++      return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/scsi/scsi_transport_iscsi.c 
b/drivers/scsi/scsi_transport_iscsi.c
+index e4b3d8f4fd85..bb4ed7b1f5df 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3697,7 +3697,7 @@ iscsi_if_rx(struct sk_buff *skb)
+               uint32_t group;
+ 
+               nlh = nlmsg_hdr(skb);
+-              if (nlh->nlmsg_len < sizeof(*nlh) ||
++              if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+                   skb->len < nlh->nlmsg_len) {
+                       break;
+               }
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c 
b/drivers/video/fbdev/aty/atyfb_base.c
+index f34ed47fcaf8..7f658fa4d22a 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -1861,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, 
u_long arg)
+ #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
+       case ATYIO_CLKR:
+               if (M64_HAS(INTEGRATED)) {
+-                      struct atyclk clk;
++                      struct atyclk clk = { 0 };
+                       union aty_pll *pll = &par->pll;
+                       u32 dsp_config = pll->ct.dsp_config;
+                       u32 dsp_on_off = pll->ct.dsp_on_off;
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 8a58bbc14de2..f7b19c25c3a4 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -680,3 +680,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
++
++/*
++ * Create userspace mapping for the DMA-coherent memory.
++ * This function should be called with the pages from the current domain only,
++ * passing pages mapped from other domains would lead to memory corruption.
++ */
++int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
++                   struct dma_attrs *attrs)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++      if (__generic_dma_ops(dev)->mmap)
++              return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
++                                                  dma_addr, size, attrs);
++#endif
++      return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 317b99acdf4b..9c3b9d07f341 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2984,7 +2984,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 
loff,
+ out:
+       if (ret)
+               btrfs_cmp_data_free(cmp);
+-      return 0;
++      return ret;
+ }
+ 
+ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
+@@ -4118,6 +4118,10 @@ static long btrfs_ioctl_default_subvol(struct file 
*file, void __user *argp)
+               ret = PTR_ERR(new_root);
+               goto out;
+       }
++      if (!is_fstree(new_root->objectid)) {
++              ret = -ENOENT;
++              goto out;
++      }
+ 
+       path = btrfs_alloc_path();
+       if (!path) {
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 8ca9aa92972d..9ebe027cc4b7 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2350,11 +2350,11 @@ void free_reloc_roots(struct list_head *list)
+       while (!list_empty(list)) {
+               reloc_root = list_entry(list->next, struct btrfs_root,
+                                       root_list);
++              __del_reloc_root(reloc_root);
+               free_extent_buffer(reloc_root->node);
+               free_extent_buffer(reloc_root->commit_root);
+               reloc_root->node = NULL;
+               reloc_root->commit_root = NULL;
+-              __del_reloc_root(reloc_root);
+       }
+ }
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 53a827c6d8b1..b377aa8f266f 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4060,6 +4060,14 @@ cifs_setup_session(const unsigned int xid, struct 
cifs_ses *ses,
+       cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+                server->sec_mode, server->capabilities, server->timeAdj);
+ 
++      if (ses->auth_key.response) {
++              cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
++                       ses->auth_key.response);
++              kfree(ses->auth_key.response);
++              ses->auth_key.response = NULL;
++              ses->auth_key.len = 0;
++      }
++
+       if (server->ops->sess_setup)
+               rc = server->ops->sess_setup(xid, ses, nls_info);
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index a0c0a49b6620..ec2d07bb9beb 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct 
cifs_sb_info *cifs_sb,
+       if (backup_cred(cifs_sb))
+               create_options |= CREATE_OPEN_BACKUP_INTENT;
+ 
++      /* O_SYNC also has bit for O_DSYNC so following check picks up either */
++      if (f_flags & O_SYNC)
++              create_options |= CREATE_WRITE_THROUGH;
++
++      if (f_flags & O_DIRECT)
++              create_options |= CREATE_NO_BUFFER;
++
+       oparms.tcon = tcon;
+       oparms.cifs_sb = cifs_sb;
+       oparms.desired_access = desired_access;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 6c484ddf26a9..f2ff60e58ec8 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -361,7 +361,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
+       build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
+       req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+       req->NegotiateContextCount = cpu_to_le16(2);
+-      inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
++      inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
+                       + sizeof(struct smb2_encryption_neg_context)); /* 
calculate hash */
+ }
+ #else
+@@ -526,15 +526,22 @@ int smb3_validate_negotiate(const unsigned int xid, 
struct cifs_tcon *tcon)
+ 
+       /*
+        * validation ioctl must be signed, so no point sending this if we
+-       * can not sign it.  We could eventually change this to selectively
++       * can not sign it (ie are not known user).  Even if signing is not
++       * required (enabled but not negotiated), in those cases we selectively
+        * sign just this, the first and only signed request on a connection.
+-       * This is good enough for now since a user who wants better security
+-       * would also enable signing on the mount. Having validation of
+-       * negotiate info for signed connections helps reduce attack vectors
++       * Having validation of negotiate info  helps reduce attack vectors.
+        */
+-      if (tcon->ses->server->sign == false)
++      if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+               return 0; /* validation requires signing */
+ 
++      if (tcon->ses->user_name == NULL) {
++              cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
++              return 0; /* validation requires signing */
++      }
++
++      if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++              cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent 
by server\n");
++
+       vneg_inbuf.Capabilities =
+                       cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+       memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 070901e76653..ff36f5475d7e 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1814,13 +1814,10 @@ static void *gfs2_glock_seq_start(struct seq_file 
*seq, loff_t *pos)
+ {
+       struct gfs2_glock_iter *gi = seq->private;
+       loff_t n = *pos;
+-      int ret;
+-
+-      if (gi->last_pos <= *pos)
+-              n = (*pos - gi->last_pos);
+ 
+-      ret = rhashtable_walk_start(&gi->hti);
+-      if (ret)
++      if (rhashtable_walk_init(&gl_hash_table, &gi->hti) != 0)
++              return NULL;
++      if (rhashtable_walk_start(&gi->hti) != 0)
+               return NULL;
+ 
+       do {
+@@ -1828,6 +1825,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, 
loff_t *pos)
+       } while (gi->gl && n--);
+ 
+       gi->last_pos = *pos;
++
+       return gi->gl;
+ }
+ 
+@@ -1839,6 +1837,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, 
void *iter_ptr,
+       (*pos)++;
+       gi->last_pos = *pos;
+       gfs2_glock_iter_next(gi);
++
+       return gi->gl;
+ }
+ 
+@@ -1847,7 +1846,10 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, 
void *iter_ptr)
+       struct gfs2_glock_iter *gi = seq->private;
+ 
+       gi->gl = NULL;
+-      rhashtable_walk_stop(&gi->hti);
++      if (gi->hti.walker) {
++              rhashtable_walk_stop(&gi->hti);
++              rhashtable_walk_exit(&gi->hti);
++      }
+ }
+ 
+ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
+@@ -1910,12 +1912,10 @@ static int gfs2_glocks_open(struct inode *inode, 
struct file *file)
+               struct gfs2_glock_iter *gi = seq->private;
+ 
+               gi->sdp = inode->i_private;
+-              gi->last_pos = 0;
+               seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
+               if (seq->buf)
+                       seq->size = GFS2_SEQ_GOODSIZE;
+               gi->gl = NULL;
+-              ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+       }
+       return ret;
+ }
+@@ -1926,7 +1926,6 @@ static int gfs2_glocks_release(struct inode *inode, 
struct file *file)
+       struct gfs2_glock_iter *gi = seq->private;
+ 
+       gi->gl = NULL;
+-      rhashtable_walk_exit(&gi->hti);
+       return seq_release_private(inode, file);
+ }
+ 
+@@ -1938,12 +1937,10 @@ static int gfs2_glstats_open(struct inode *inode, 
struct file *file)
+               struct seq_file *seq = file->private_data;
+               struct gfs2_glock_iter *gi = seq->private;
+               gi->sdp = inode->i_private;
+-              gi->last_pos = 0;
+               seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | 
__GFP_NOWARN);
+               if (seq->buf)
+                       seq->size = GFS2_SEQ_GOODSIZE;
+               gi->gl = NULL;
+-              ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+       }
+       return ret;
+ }
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 819ef3faf1bb..bfd1a5dddf6e 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, 
int whence,
+                * In the generic case the entire file is data, so as long as
+                * offset isn't at the end of the file then the offset is data.
+                */
+-              if (offset >= eof)
++              if ((unsigned long long)offset >= eof)
+                       return -ENXIO;
+               break;
+       case SEEK_HOLE:
+@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, 
int whence,
+                * There is a virtual hole at the end of the file, so as long as
+                * offset isn't i_size or larger, return i_size.
+                */
+-              if (offset >= eof)
++              if ((unsigned long long)offset >= eof)
+                       return -ENXIO;
+               offset = eof;
+               break;
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 66f705243985..dcc115e8dd03 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -177,6 +177,7 @@ struct key {
+ #define KEY_FLAG_TRUSTED_ONLY 9       /* set if keyring only accepts links to 
trusted keys */
+ #define KEY_FLAG_BUILTIN      10      /* set if key is builtin */
+ #define KEY_FLAG_ROOT_CAN_INVAL       11      /* set if key can be 
invalidated by root without permission */
++#define KEY_FLAG_UID_KEYRING  12      /* set if key is a user or user session 
keyring */
+ 
+       /* the key type and key description string
+        * - the desc is used to match a key against search criteria
+@@ -218,6 +219,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_QUOTA_OVERRUN       0x0001  /* add to quota, permit even if 
overrun */
+ #define KEY_ALLOC_NOT_IN_QUOTA        0x0002  /* not in quota */
+ #define KEY_ALLOC_TRUSTED     0x0004  /* Key should be flagged as trusted */
++#define KEY_ALLOC_UID_KEYRING 0x0010  /* allocating a user or user session 
keyring */
+ 
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/platform_data/mmp_dma.h 
b/include/linux/platform_data/mmp_dma.h
+index 2a330ec9e2af..d1397c8ed94e 100644
+--- a/include/linux/platform_data/mmp_dma.h
++++ b/include/linux/platform_data/mmp_dma.h
+@@ -14,6 +14,7 @@
+ 
+ struct mmp_dma_platdata {
+       int dma_channels;
++      int nb_requestors;
+ };
+ 
+ #endif /* MMP_DMA_H */
+diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
+index 8b2eb93ae8ba..4d7fdbf20eff 100644
+--- a/include/xen/swiotlb-xen.h
++++ b/include/xen/swiotlb-xen.h
+@@ -58,4 +58,9 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
+ 
+ extern int
+ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
++
++extern int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++                   void *cpu_addr, dma_addr_t dma_addr, size_t size,
++                   struct dma_attrs *attrs);
+ #endif /* __LINUX_SWIOTLB_XEN_H */
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 15a1795bbba1..efd384f3f852 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
+       return 0;
+ }
+ 
++void __get_seccomp_filter(struct seccomp_filter *filter)
++{
++      /* Reference count is bounded by the number of total processes. */
++      atomic_inc(&filter->usage);
++}
++
+ /* get_seccomp_filter - increments the reference count of the filter on @tsk 
*/
+ void get_seccomp_filter(struct task_struct *tsk)
+ {
+       struct seccomp_filter *orig = tsk->seccomp.filter;
+       if (!orig)
+               return;
+-      /* Reference count is bounded by the number of total processes. */
+-      atomic_inc(&orig->usage);
++      __get_seccomp_filter(orig);
+ }
+ 
+ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+@@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct 
seccomp_filter *filter)
+       }
+ }
+ 
+-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+-void put_seccomp_filter(struct task_struct *tsk)
++static void __put_seccomp_filter(struct seccomp_filter *orig)
+ {
+-      struct seccomp_filter *orig = tsk->seccomp.filter;
+       /* Clean up single-reference branches iteratively. */
+       while (orig && atomic_dec_and_test(&orig->usage)) {
+               struct seccomp_filter *freeme = orig;
+@@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
+       }
+ }
+ 
++/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
++void put_seccomp_filter(struct task_struct *tsk)
++{
++      __put_seccomp_filter(tsk->seccomp.filter);
++}
++
+ /**
+  * seccomp_send_sigsys - signals the task to allow in-process syscall 
emulation
+  * @syscall: syscall number to send to userland
+@@ -927,13 +936,13 @@ long seccomp_get_filter(struct task_struct *task, 
unsigned long filter_off,
+       if (!data)
+               goto out;
+ 
+-      get_seccomp_filter(task);
++      __get_seccomp_filter(filter);
+       spin_unlock_irq(&task->sighand->siglock);
+ 
+       if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+               ret = -EFAULT;
+ 
+-      put_seccomp_filter(task);
++      __put_seccomp_filter(filter);
+       return ret;
+ 
+ out:
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 002ec084124b..17c59e78661b 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1159,6 +1159,8 @@ static struct ctl_table kern_table[] = {
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = timer_migration_handler,
++              .extra1         = &zero,
++              .extra2         = &one,
+       },
+ #endif
+ #ifdef CONFIG_BPF_SYSCALL
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index bbc5d1114583..125407144c01 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -127,7 +127,7 @@ int timer_migration_handler(struct ctl_table *table, int 
write,
+       int ret;
+ 
+       mutex_lock(&mutex);
+-      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++      ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (!ret && write)
+               timers_update_migration(false);
+       mutex_unlock(&mutex);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4743066010c4..b64f35afee4e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3226,11 +3226,17 @@ static int tracing_open(struct inode *inode, struct 
file *file)
+       /* If this file was open for write, then erase contents */
+       if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+               int cpu = tracing_get_cpu(inode);
++              struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++              if (tr->current_trace->print_max)
++                      trace_buf = &tr->max_buffer;
++#endif
+ 
+               if (cpu == RING_BUFFER_ALL_CPUS)
+-                      tracing_reset_online_cpus(&tr->trace_buffer);
++                      tracing_reset_online_cpus(trace_buf);
+               else
+-                      tracing_reset(&tr->trace_buffer, cpu);
++                      tracing_reset(trace_buf, cpu);
+       }
+ 
+       if (file->f_mode & FMODE_READ) {
+@@ -4701,7 +4707,7 @@ static int tracing_wait_pipe(struct file *filp)
+                *
+                * iter->pos will be 0 if we haven't read anything.
+                */
+-              if (!tracing_is_on() && iter->pos)
++              if (!tracer_tracing_is_on(iter->tr) && iter->pos)
+                       break;
+ 
+               mutex_unlock(&iter->mutex);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 04401037140e..b6be51940ead 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -469,6 +469,8 @@ void ieee80211_roc_purge(struct ieee80211_local *local,
+       struct ieee80211_roc_work *roc, *tmp;
+       LIST_HEAD(tmp_list);
+ 
++      flush_work(&local->hw_roc_start);
++
+       mutex_lock(&local->mtx);
+       list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+               if (sdata && roc->sdata != sdata)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index de10e3c0e2a4..8ece212aa3d2 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -9786,6 +9786,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, 
struct genl_info *info)
+       if (err)
+               return err;
+ 
++      if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
++          !tb[NL80211_REKEY_DATA_KCK])
++              return -EINVAL;
+       if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != 
NL80211_REPLAY_CTR_LEN)
+               return -ERANGE;
+       if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 5105c2c2da75..51ffb9cde073 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -136,7 +136,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ extern key_ref_t search_my_process_keyrings(struct keyring_search_context 
*ctx);
+ extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+ 
+-extern struct key *find_keyring_by_name(const char *name, bool 
skip_perm_check);
++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+ 
+ extern int install_user_keyrings(void);
+ extern int install_thread_keyring_to_cred(struct cred *);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 09c10b181881..51d23c623424 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -296,6 +296,8 @@ struct key *key_alloc(struct key_type *type, const char 
*desc,
+               key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+       if (flags & KEY_ALLOC_TRUSTED)
+               key->flags |= 1 << KEY_FLAG_TRUSTED;
++      if (flags & KEY_ALLOC_UID_KEYRING)
++              key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ 
+ #ifdef KEY_DEBUGGING
+       key->magic = KEY_DEBUG_MAGIC;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 671709d8610d..a009dc66eb8f 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -738,6 +738,11 @@ long keyctl_read_key(key_serial_t keyid, char __user 
*buffer, size_t buflen)
+ 
+       key = key_ref_to_ptr(key_ref);
+ 
++      if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
++              ret = -ENOKEY;
++              goto error2;
++      }
++
+       /* see if we can read it directly */
+       ret = key_permission(key_ref, KEY_NEED_READ);
+       if (ret == 0)
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index f931ccfeefb0..0c8dd4fbe130 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -416,7 +416,7 @@ static void keyring_describe(const struct key *keyring, 
struct seq_file *m)
+ }
+ 
+ struct keyring_read_iterator_context {
+-      size_t                  qty;
++      size_t                  buflen;
+       size_t                  count;
+       key_serial_t __user     *buffer;
+ };
+@@ -428,9 +428,9 @@ static int keyring_read_iterator(const void *object, void 
*data)
+       int ret;
+ 
+       kenter("{%s,%d},,{%zu/%zu}",
+-             key->type->name, key->serial, ctx->count, ctx->qty);
++             key->type->name, key->serial, ctx->count, ctx->buflen);
+ 
+-      if (ctx->count >= ctx->qty)
++      if (ctx->count >= ctx->buflen)
+               return 1;
+ 
+       ret = put_user(key->serial, ctx->buffer);
+@@ -465,16 +465,12 @@ static long keyring_read(const struct key *keyring,
+               return 0;
+ 
+       /* Calculate how much data we could return */
+-      ctx.qty = nr_keys * sizeof(key_serial_t);
+-
+       if (!buffer || !buflen)
+-              return ctx.qty;
+-
+-      if (buflen > ctx.qty)
+-              ctx.qty = buflen;
++              return nr_keys * sizeof(key_serial_t);
+ 
+       /* Copy the IDs of the subscribed keys into the buffer */
+       ctx.buffer = (key_serial_t __user *)buffer;
++      ctx.buflen = buflen;
+       ctx.count = 0;
+       ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+       if (ret < 0) {
+@@ -965,15 +961,15 @@ found:
+ /*
+  * Find a keyring with the specified name.
+  *
+- * All named keyrings in the current user namespace are searched, provided 
they
+- * grant Search permission directly to the caller (unless this check is
+- * skipped).  Keyrings whose usage points have reached zero or who have been
+- * revoked are skipped.
++ * Only keyrings that have nonzero refcount, are not revoked, and are owned 
by a
++ * user in the current user namespace are considered.  If @uid_keyring is 
%true,
++ * the keyring additionally must have been allocated as a user or user session
++ * keyring; otherwise, it must grant Search permission directly to the caller.
+  *
+  * Returns a pointer to the keyring with the keyring's refcount having being
+  * incremented on success.  -ENOKEY is returned if a key could not be found.
+  */
+-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+ {
+       struct key *keyring;
+       int bucket;
+@@ -1001,10 +997,15 @@ struct key *find_keyring_by_name(const char *name, bool 
skip_perm_check)
+                       if (strcmp(keyring->description, name) != 0)
+                               continue;
+ 
+-                      if (!skip_perm_check &&
+-                          key_permission(make_key_ref(keyring, 0),
+-                                         KEY_NEED_SEARCH) < 0)
+-                              continue;
++                      if (uid_keyring) {
++                              if (!test_bit(KEY_FLAG_UID_KEYRING,
++                                            &keyring->flags))
++                                      continue;
++                      } else {
++                              if (key_permission(make_key_ref(keyring, 0),
++                                                 KEY_NEED_SEARCH) < 0)
++                                      continue;
++                      }
+ 
+                       /* we've got a match but we might end up racing with
+                        * key_cleanup() if the keyring is currently 'dead'
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 4ed909142956..7dd050f24261 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -76,7 +76,9 @@ int install_user_keyrings(void)
+               if (IS_ERR(uid_keyring)) {
+                       uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+                                                   cred, user_keyring_perm,
+-                                                  KEY_ALLOC_IN_QUOTA, NULL);
++                                                  KEY_ALLOC_UID_KEYRING |
++                                                      KEY_ALLOC_IN_QUOTA,
++                                                  NULL);
+                       if (IS_ERR(uid_keyring)) {
+                               ret = PTR_ERR(uid_keyring);
+                               goto error;
+@@ -92,7 +94,9 @@ int install_user_keyrings(void)
+                       session_keyring =
+                               keyring_alloc(buf, user->uid, INVALID_GID,
+                                             cred, user_keyring_perm,
+-                                            KEY_ALLOC_IN_QUOTA, NULL);
++                                            KEY_ALLOC_UID_KEYRING |
++                                                KEY_ALLOC_IN_QUOTA,
++                                            NULL);
+                       if (IS_ERR(session_keyring)) {
+                               ret = PTR_ERR(session_keyring);
+                               goto error_release;

Reply via email to