commit:     a7f210d59ff1a55ddbfcec02749239de6ba95528
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  7 01:07:31 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  7 01:07:31 2015 +0000
URL:        
http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=a7f210d5

Linux patch 3.18.6

---
 0000_README             |    4 +
 1005_linux-3.18.6.patch | 4030 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4034 insertions(+)

diff --git a/0000_README b/0000_README
index f8b4dcb..626b2f5 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-3.18.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.18.5
 
+Patch:  1005_linux-3.18.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.18.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-3.18.6.patch b/1005_linux-3.18.6.patch
new file mode 100644
index 0000000..b20fb1d
--- /dev/null
+++ b/1005_linux-3.18.6.patch
@@ -0,0 +1,4030 @@
+diff --git a/Makefile b/Makefile
+index 6276fcaabf21..d2bff2d5ae25 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 98838a05ba6d..9d0ac091a52a 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -156,6 +156,8 @@ retry:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
+index 6f7e3a68803a..563cb27e37f5 100644
+--- a/arch/arc/mm/fault.c
++++ b/arch/arc/mm/fault.c
+@@ -161,6 +161,8 @@ good_area:
+ 
+       if (fault & VM_FAULT_OOM)
+               goto out_of_memory;
++      else if (fault & VM_FAULT_SIGSEGV)
++              goto bad_area;
+       else if (fault & VM_FAULT_SIGBUS)
+               goto do_sigbus;
+ 
+diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
+index 135c24a5ba26..68c739b3fdf4 100644
+--- a/arch/arm/include/asm/xen/page.h
++++ b/arch/arm/include/asm/xen/page.h
+@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, 
unsigned long mfn)
+ #define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+ #define xen_unmap(cookie) iounmap((cookie))
+ 
++bool xen_arch_need_swiotlb(struct device *dev,
++                         unsigned long pfn,
++                         unsigned long mfn);
++
+ #endif /* _ASM_ARM_XEN_PAGE_H */
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index 1163a3e9accd..2ffccd4eb084 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -342,6 +342,13 @@ static void __init armada_375_380_coherency_init(struct 
device_node *np)
+       arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+ 
+       /*
++       * We should switch the PL310 to I/O coherency mode only if
++       * I/O coherency is actually enabled.
++       */
++      if (!coherency_available())
++              return;
++
++      /*
+        * Add the PL310 property "arm,io-coherent". This makes sure the
+        * outer sync operation is not used, which allows to
+        * workaround the system erratum that causes deadlocks when
+diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
+index b0e77de99148..f8a576b1d9bb 100644
+--- a/arch/arm/xen/mm.c
++++ b/arch/arm/xen/mm.c
+@@ -16,6 +16,13 @@
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/interface.h>
+ 
++bool xen_arch_need_swiotlb(struct device *dev,
++                         unsigned long pfn,
++                         unsigned long mfn)
++{
++      return (pfn != mfn);
++}
++
+ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+                                unsigned int address_bits,
+                                dma_addr_t *dma_handle)
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index 0eca93327195..d223a8b57c1e 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -142,6 +142,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
+index 1790f22e71a2..2686a7aa8ec8 100644
+--- a/arch/cris/mm/fault.c
++++ b/arch/cris/mm/fault.c
+@@ -176,6 +176,8 @@ retry:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
+index 9a66372fc7c7..ec4917ddf678 100644
+--- a/arch/frv/mm/fault.c
++++ b/arch/frv/mm/fault.c
+@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long 
esr0, unsigned long ear
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 7225dad87094..ba5ba7accd0d 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -172,6 +172,8 @@ retry:
+                */
+               if (fault & VM_FAULT_OOM) {
+                       goto out_of_memory;
++              } else if (fault & VM_FAULT_SIGSEGV) {
++                      goto bad_area;
+               } else if (fault & VM_FAULT_SIGBUS) {
+                       signal = SIGBUS;
+                       goto bad_area;
+diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
+index e9c6a8014bd6..e3d4d4890104 100644
+--- a/arch/m32r/mm/fault.c
++++ b/arch/m32r/mm/fault.c
+@@ -200,6 +200,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index 2bd7487440c4..b2f04aee46ec 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -145,6 +145,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto map_err;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto bus_err;
+               BUG();
+diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
+index 332680e5ebf2..2de5dc695a87 100644
+--- a/arch/metag/mm/fault.c
++++ b/arch/metag/mm/fault.c
+@@ -141,6 +141,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
+index fa4cf52aa7a6..d46a5ebb7570 100644
+--- a/arch/microblaze/mm/fault.c
++++ b/arch/microblaze/mm/fault.c
+@@ -224,6 +224,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index becc42bb1849..70ab5d664332 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -158,6 +158,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
+index 3516cbdf1ee9..0c2cc5d39c8e 100644
+--- a/arch/mn10300/mm/fault.c
++++ b/arch/mn10300/mm/fault.c
+@@ -262,6 +262,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
+index 0703acf7d327..230ac20ae794 100644
+--- a/arch/openrisc/mm/fault.c
++++ b/arch/openrisc/mm/fault.c
+@@ -171,6 +171,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 3ca9c1131cfe..e5120e653240 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -256,6 +256,8 @@ good_area:
+                */
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto bad_area;
+               BUG();
+diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
+index 5a236f082c78..1b5305d4bdab 100644
+--- a/arch/powerpc/mm/copro_fault.c
++++ b/arch/powerpc/mm/copro_fault.c
+@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned 
long ea,
+               if (*flt & VM_FAULT_OOM) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+-              } else if (*flt & VM_FAULT_SIGBUS) {
++              } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+                       ret = -EFAULT;
+                       goto out_unlock;
+               }
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 08d659a9fcdb..f06b56baf0b3 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -444,6 +444,8 @@ good_area:
+        */
+       fault = handle_mm_fault(mm, vma, address, flags);
+       if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
++              if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               rc = mm_fault_error(regs, address, fault);
+               if (rc >= MM_FAULT_RETURN)
+                       goto bail;
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index c8efbb37d6e0..e23f559faa47 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -293,6 +293,7 @@ static inline void disable_surveillance(void)
+       args.token = rtas_token("set-indicator");
+       if (args.token == RTAS_UNKNOWN_SERVICE)
+               return;
++      args.token = cpu_to_be32(args.token);
+       args.nargs = cpu_to_be32(3);
+       args.nret = cpu_to_be32(1);
+       args.rets = &args.args[3];
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index a2b81d6ce8a5..fbe8f2cf9245 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, 
int fault)
+                               do_no_context(regs);
+                       else
+                               pagefault_out_of_memory();
++              } else if (fault & VM_FAULT_SIGSEGV) {
++                      /* Kernel mode? Handle exceptions or die */
++                      if (!user_mode(regs))
++                              do_no_context(regs);
++                      else
++                              do_sigsegv(regs, SEGV_MAPERR);
+               } else if (fault & VM_FAULT_SIGBUS) {
+                       /* Kernel mode? Handle exceptions or die */
+                       if (!user_mode(regs))
+diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
+index 52238983527d..6860beb2a280 100644
+--- a/arch/score/mm/fault.c
++++ b/arch/score/mm/fault.c
+@@ -114,6 +114,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
+index 541dc6101508..a58fec9b55e0 100644
+--- a/arch/sh/mm/fault.c
++++ b/arch/sh/mm/fault.c
+@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long 
error_code,
+       } else {
+               if (fault & VM_FAULT_SIGBUS)
+                       do_sigbus(regs, error_code, address);
++              else if (fault & VM_FAULT_SIGSEGV)
++                      bad_area(regs, error_code, address);
+               else
+                       BUG();
+       }
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 908e8c17c902..70d817154fe8 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -249,6 +249,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 18fcd7167095..479823249429 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -446,6 +446,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
+index 6c0571216a9d..c6d2a76d91a8 100644
+--- a/arch/tile/mm/fault.c
++++ b/arch/tile/mm/fault.c
+@@ -444,6 +444,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
+index 5678c3571e7c..209617302df8 100644
+--- a/arch/um/kernel/trap.c
++++ b/arch/um/kernel/trap.c
+@@ -80,6 +80,8 @@ good_area:
+               if (unlikely(fault & VM_FAULT_ERROR)) {
+                       if (fault & VM_FAULT_OOM) {
+                               goto out_of_memory;
++                      } else if (fault & VM_FAULT_SIGSEGV) {
++                              goto out;
+                       } else if (fault & VM_FAULT_SIGBUS) {
+                               err = -EACCES;
+                               goto out;
+diff --git a/arch/x86/boot/compressed/Makefile 
b/arch/x86/boot/compressed/Makefile
+index 45abc363dd3e..6a1a8458c042 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -77,7 +77,7 @@ suffix-$(CONFIG_KERNEL_LZO)  := lzo
+ suffix-$(CONFIG_KERNEL_LZ4)   := lz4
+ 
+ RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
+-           perl $(srctree)/arch/x86/tools/calc_run_size.pl)
++           $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
+ quiet_cmd_mkpiggy = MKPIGGY $@
+       cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
+ 
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index c949923a5668..f58ef6c0613b 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
+ #define xen_remap(cookie, size) ioremap((cookie), (size));
+ #define xen_unmap(cookie) iounmap((cookie))
+ 
++static inline bool xen_arch_need_swiotlb(struct device *dev,
++                                       unsigned long pfn,
++                                       unsigned long mfn)
++{
++      return false;
++}
++
+ #endif /* _ASM_X86_XEN_PAGE_H */
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
+index 944bf019b74f..498b6d967138 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
+               break;
+ 
+       case 55: /* 22nm Atom "Silvermont"                */
++      case 76: /* 14nm Atom "Airmont"                   */
+       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+               memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
+                       sizeof(hw_cache_event_ids));
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c 
b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+index d64f275fe274..8c256749882c 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v)
+        * or use ldexp(count, -32).
+        * Watts = Joules/Time delta
+        */
+-      return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
++      return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
+ }
+ 
+ static u64 rapl_event_update(struct perf_event *event)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index d973e61e450d..a8612aafeca1 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -905,6 +905,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long 
error_code,
+               if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+                            VM_FAULT_HWPOISON_LARGE))
+                       do_sigbus(regs, error_code, address, fault);
++              else if (fault & VM_FAULT_SIGSEGV)
++                      bad_area_nosemaphore(regs, error_code, address);
+               else
+                       BUG();
+       }
+diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
+deleted file mode 100644
+index 23210baade2d..000000000000
+--- a/arch/x86/tools/calc_run_size.pl
++++ /dev/null
+@@ -1,39 +0,0 @@
+-#!/usr/bin/perl
+-#
+-# Calculate the amount of space needed to run the kernel, including room for
+-# the .bss and .brk sections.
+-#
+-# Usage:
+-# objdump -h a.out | perl calc_run_size.pl
+-use strict;
+-
+-my $mem_size = 0;
+-my $file_offset = 0;
+-
+-my $sections=" *[0-9]+ \.(?:bss|brk) +";
+-while (<>) {
+-      if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
+-              my $size = hex($1);
+-              my $offset = hex($2);
+-              $mem_size += $size;
+-              if ($file_offset == 0) {
+-                      $file_offset = $offset;
+-              } elsif ($file_offset != $offset) {
+-                      # BFD linker shows the same file offset in ELF.
+-                      # Gold linker shows them as consecutive.
+-                      next if ($file_offset + $mem_size == $offset + $size);
+-
+-                      printf STDERR "file_offset: 0x%lx\n", $file_offset;
+-                      printf STDERR "mem_size: 0x%lx\n", $mem_size;
+-                      printf STDERR "offset: 0x%lx\n", $offset;
+-                      printf STDERR "size: 0x%lx\n", $size;
+-
+-                      die ".bss and .brk are non-contiguous\n";
+-              }
+-      }
+-}
+-
+-if ($file_offset == 0) {
+-      die "Never found .bss or .brk file offset\n";
+-}
+-printf("%d\n", $mem_size + $file_offset);
+diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
+new file mode 100644
+index 000000000000..1a4c17bb3910
+--- /dev/null
++++ b/arch/x86/tools/calc_run_size.sh
+@@ -0,0 +1,42 @@
++#!/bin/sh
++#
++# Calculate the amount of space needed to run the kernel, including room for
++# the .bss and .brk sections.
++#
++# Usage:
++# objdump -h a.out | sh calc_run_size.sh
++
++NUM='\([0-9a-fA-F]*[ \t]*\)'
++OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
++if [ -z "$OUT" ] ; then
++      echo "Never found .bss or .brk file offset" >&2
++      exit 1
++fi
++
++OUT=$(echo ${OUT# })
++sizeA=$(printf "%d" 0x${OUT%% *})
++OUT=${OUT#* }
++offsetA=$(printf "%d" 0x${OUT%% *})
++OUT=${OUT#* }
++sizeB=$(printf "%d" 0x${OUT%% *})
++OUT=${OUT#* }
++offsetB=$(printf "%d" 0x${OUT%% *})
++
++run_size=$(( $offsetA + $sizeA + $sizeB ))
++
++# BFD linker shows the same file offset in ELF.
++if [ "$offsetA" -ne "$offsetB" ] ; then
++      # Gold linker shows them as consecutive.
++      endB=$(( $offsetB + $sizeB ))
++      if [ "$endB" != "$run_size" ] ; then
++              printf "sizeA: 0x%x\n" $sizeA >&2
++              printf "offsetA: 0x%x\n" $offsetA >&2
++              printf "sizeB: 0x%x\n" $sizeB >&2
++              printf "offsetB: 0x%x\n" $offsetB >&2
++              echo ".bss and .brk are non-contiguous" >&2
++              exit 1
++      fi
++fi
++
++printf "%d\n" $run_size
++exit 0
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index b57c4f91f487..9e3571a6535c 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -117,6 +117,8 @@ good_area:
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               if (fault & VM_FAULT_OOM)
+                       goto out_of_memory;
++              else if (fault & VM_FAULT_SIGSEGV)
++                      goto bad_area;
+               else if (fault & VM_FAULT_SIGBUS)
+                       goto do_sigbus;
+               BUG();
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 27b71a0b72d0..76b5be937de6 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device 
*rbd_dev)
+  * If an image has a non-zero parent overlap, get a reference to its
+  * parent.
+  *
+- * We must get the reference before checking for the overlap to
+- * coordinate properly with zeroing the parent overlap in
+- * rbd_dev_v2_parent_info() when an image gets flattened.  We
+- * drop it again if there is no overlap.
+- *
+  * Returns true if the rbd device has a parent with a non-zero
+  * overlap and a reference for it was successfully taken, or
+  * false otherwise.
+  */
+ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
+ {
+-      int counter;
++      int counter = 0;
+ 
+       if (!rbd_dev->parent_spec)
+               return false;
+ 
+-      counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+-      if (counter > 0 && rbd_dev->parent_overlap)
+-              return true;
+-
+-      /* Image was flattened, but parent is not yet torn down */
++      down_read(&rbd_dev->header_rwsem);
++      if (rbd_dev->parent_overlap)
++              counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
++      up_read(&rbd_dev->header_rwsem);
+ 
+       if (counter < 0)
+               rbd_warn(rbd_dev, "parent reference overflow");
+ 
+-      return false;
++      return counter > 0;
+ }
+ 
+ /*
+@@ -4236,7 +4230,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device 
*rbd_dev)
+                */
+               if (rbd_dev->parent_overlap) {
+                       rbd_dev->parent_overlap = 0;
+-                      smp_mb();
+                       rbd_dev_parent_put(rbd_dev);
+                       pr_info("%s: clone image has been flattened\n",
+                               rbd_dev->disk->disk_name);
+@@ -4282,7 +4275,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device 
*rbd_dev)
+        * treat it specially.
+        */
+       rbd_dev->parent_overlap = overlap;
+-      smp_mb();
+       if (!overlap) {
+ 
+               /* A null parent_spec indicates it's the initial probe */
+@@ -5111,10 +5103,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
+ {
+       struct rbd_image_header *header;
+ 
+-      /* Drop parent reference unless it's already been done (or none) */
+-
+-      if (rbd_dev->parent_overlap)
+-              rbd_dev_parent_put(rbd_dev);
++      rbd_dev_parent_put(rbd_dev);
+ 
+       /* Free dynamic fields from the header, then zero it out */
+ 
+diff --git a/drivers/clocksource/arm_arch_timer.c 
b/drivers/clocksource/arm_arch_timer.c
+index 1fa2af957b18..84b4c8b7fbd1 100644
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
+ 
+       /* Register the CP15 based counter if we have one */
+       if (type & ARCH_CP15_TIMER) {
+-              if (arch_timer_use_virtual)
++              if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
+                       arch_timer_read_counter = arch_counter_get_cntvct;
+               else
+                       arch_timer_read_counter = arch_counter_get_cntpct;
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index ef757f712a3d..e9a2827ad1c4 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper 
*fb_helper, struct drm_
+ }
+ EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+ 
++static void remove_from_modeset(struct drm_mode_set *set,
++              struct drm_connector *connector)
++{
++      int i, j;
++
++      for (i = 0; i < set->num_connectors; i++) {
++              if (set->connectors[i] == connector)
++                      break;
++      }
++
++      if (i == set->num_connectors)
++              return;
++
++      for (j = i + 1; j < set->num_connectors; j++) {
++              set->connectors[j - 1] = set->connectors[j];
++      }
++      set->num_connectors--;
++
++      /* because i915 is pissy about this..
++       * TODO maybe need to makes sure we set it back to !=NULL somewhere?
++       */
++      if (set->num_connectors == 0)
++              set->fb = NULL;
++}
++
+ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+                                      struct drm_connector *connector)
+ {
+@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct 
drm_fb_helper *fb_helper,
+       }
+       fb_helper->connector_count--;
+       kfree(fb_helper_connector);
++
++      /* also cleanup dangling references to the connector: */
++      for (i = 0; i < fb_helper->crtc_count; i++)
++              remove_from_modeset(&fb_helper->crtc_info[i].mode_set, 
connector);
++
+       return 0;
+ }
+ EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 346aee828dc3..c33327d5c543 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2076,8 +2076,7 @@ struct drm_i915_cmd_table {
+ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
+                                (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
+ #define IS_BDW_ULT(dev)               (IS_BROADWELL(dev) && \
+-                               ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
+-                               (INTEL_DEVID(dev) & 0xf) == 0x6 || \
++                               ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
+                                (INTEL_DEVID(dev) & 0xf) == 0xe))
+ #define IS_HSW_ULT(dev)               (IS_HASWELL(dev) && \
+                                (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index fd76933eed04..d88dbedeaa77 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3050,6 +3050,13 @@ static void i965_write_fence_reg(struct drm_device 
*dev, int reg,
+               u32 size = i915_gem_obj_ggtt_size(obj);
+               uint64_t val;
+ 
++              /* Adjust fence size to match tiled area */
++              if (obj->tiling_mode != I915_TILING_NONE) {
++                      uint32_t row_size = obj->stride *
++                              (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
++                      size = (size / row_size) * row_size;
++              }
++
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+                                0xfffff000) << 32;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+@@ -4811,25 +4818,18 @@ i915_gem_init_hw(struct drm_device *dev)
+       for (i = 0; i < NUM_L3_SLICES(dev); i++)
+               i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+ 
+-      /*
+-       * XXX: Contexts should only be initialized once. Doing a switch to the
+-       * default context switch however is something we'd like to do after
+-       * reset or thaw (the latter may not actually be necessary for HW, but
+-       * goes with our code better). Context switching requires rings (for
+-       * the do_switch), but before enabling PPGTT. So don't move this.
+-       */
+-      ret = i915_gem_context_enable(dev_priv);
++      ret = i915_ppgtt_init_hw(dev);
+       if (ret && ret != -EIO) {
+-              DRM_ERROR("Context enable failed %d\n", ret);
++              DRM_ERROR("PPGTT enable failed %d\n", ret);
+               i915_gem_cleanup_ringbuffer(dev);
+-
+-              return ret;
+       }
+ 
+-      ret = i915_ppgtt_init_hw(dev);
++      ret = i915_gem_context_enable(dev_priv);
+       if (ret && ret != -EIO) {
+-              DRM_ERROR("PPGTT enable failed %d\n", ret);
++              DRM_ERROR("Context enable failed %d\n", ret);
+               i915_gem_cleanup_ringbuffer(dev);
++
++              return ret;
+       }
+ 
+       return ret;
+diff --git a/drivers/gpu/drm/i915/intel_panel.c 
b/drivers/gpu/drm/i915/intel_panel.c
+index 41b3be217493..8bc193f81333 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -947,7 +947,7 @@ void intel_panel_enable_backlight(struct intel_connector 
*connector)
+ 
+       WARN_ON(panel->backlight.max == 0);
+ 
+-      if (panel->backlight.level == 0) {
++      if (panel->backlight.level <= panel->backlight.min) {
+               panel->backlight.level = panel->backlight.max;
+               if (panel->backlight.device)
+                       panel->backlight.device->props.brightness =
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index b53b31a7b76f..cdf6e2149539 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
+               return r;
+       rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+       rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
++      rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
+       rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+       return radeon_gart_table_ram_alloc(rdev);
+ }
+@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
+       WREG32(RADEON_AIC_HI_ADDR, 0);
+ }
+ 
++uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
++{
++      return addr;
++}
++
+ void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                          uint64_t addr, uint32_t flags)
++                          uint64_t entry)
+ {
+       u32 *gtt = rdev->gart.ptr;
+-      gtt[i] = cpu_to_le32(lower_32_bits(addr));
++      gtt[i] = cpu_to_le32(lower_32_bits(entry));
+ }
+ 
+ void r100_pci_gart_fini(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 1bc4704034ce..f3ef6257d669 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+ #define R300_PTE_WRITEABLE (1 << 2)
+ #define R300_PTE_READABLE  (1 << 3)
+ 
+-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                            uint64_t addr, uint32_t flags)
++uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
+ {
+-      void __iomem *ptr = rdev->gart.ptr;
+-
+       addr = (lower_32_bits(addr) >> 8) |
+               ((upper_32_bits(addr) & 0xff) << 24);
+       if (flags & RADEON_GART_PAGE_READ)
+@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, 
unsigned i,
+               addr |= R300_PTE_WRITEABLE;
+       if (!(flags & RADEON_GART_PAGE_SNOOP))
+               addr |= R300_PTE_UNSNOOPED;
++      return addr;
++}
++
++void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
++                            uint64_t entry)
++{
++      void __iomem *ptr = rdev->gart.ptr;
++
+       /* on x86 we want this to be CPU endian, on powerpc
+        * on powerpc without HW swappers, it'll get swapped on way
+        * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+-      writel(addr, ((void __iomem *)ptr) + (i * 4));
++      writel(entry, ((void __iomem *)ptr) + (i * 4));
+ }
+ 
+ int rv370_pcie_gart_init(struct radeon_device *rdev)
+@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
+               DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+       rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+       rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
++      rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
+       rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+       return radeon_gart_table_vram_alloc(rdev);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index a9717b3fbf1b..dbe51bfe3ef4 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -245,6 +245,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
+  * Dummy page
+  */
+ struct radeon_dummy_page {
++      uint64_t        entry;
+       struct page     *page;
+       dma_addr_t      addr;
+ };
+@@ -626,6 +627,7 @@ struct radeon_gart {
+       unsigned                        table_size;
+       struct page                     **pages;
+       dma_addr_t                      *pages_addr;
++      uint64_t                        *pages_entry;
+       bool                            ready;
+ };
+ 
+@@ -1819,8 +1821,9 @@ struct radeon_asic {
+       /* gart */
+       struct {
+               void (*tlb_flush)(struct radeon_device *rdev);
++              uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
+               void (*set_page)(struct radeon_device *rdev, unsigned i,
+-                               uint64_t addr, uint32_t flags);
++                               uint64_t entry);
+       } gart;
+       struct {
+               int (*init)(struct radeon_device *rdev);
+@@ -2818,7 +2821,8 @@ static inline void radeon_ring_write(struct radeon_ring 
*ring, uint32_t v)
+ #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), 
(state))
+ #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+ #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+-#define radeon_gart_set_page(rdev, i, p, f) 
(rdev)->asic->gart.set_page((rdev), (i), (p), (f))
++#define radeon_gart_get_page_entry(a, f) 
(rdev)->asic->gart.get_page_entry((a), (f))
++#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), 
(i), (e))
+ #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+ #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+ #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) 
((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c 
b/drivers/gpu/drm/radeon/radeon_asic.c
+index 121aff6a3b41..ed0e10eee2dc 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
+               DRM_INFO("Forcing AGP to PCIE mode\n");
+               rdev->flags |= RADEON_IS_PCIE;
+               rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
++              rdev->asic->gart.get_page_entry = 
&rv370_pcie_gart_get_page_entry;
+               rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+       } else {
+               DRM_INFO("Forcing AGP to PCI mode\n");
+               rdev->flags |= RADEON_IS_PCI;
+               rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
++              rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
+               rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+       }
+       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
+       .mc_wait_for_idle = &r100_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &r100_pci_gart_tlb_flush,
++              .get_page_entry = &r100_pci_gart_get_page_entry,
+               .set_page = &r100_pci_gart_set_page,
+       },
+       .ring = {
+@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
+       .mc_wait_for_idle = &r100_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &r100_pci_gart_tlb_flush,
++              .get_page_entry = &r100_pci_gart_get_page_entry,
+               .set_page = &r100_pci_gart_set_page,
+       },
+       .ring = {
+@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
+       .mc_wait_for_idle = &r300_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &r100_pci_gart_tlb_flush,
++              .get_page_entry = &r100_pci_gart_get_page_entry,
+               .set_page = &r100_pci_gart_set_page,
+       },
+       .ring = {
+@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
+       .mc_wait_for_idle = &r300_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rv370_pcie_gart_tlb_flush,
++              .get_page_entry = &rv370_pcie_gart_get_page_entry,
+               .set_page = &rv370_pcie_gart_set_page,
+       },
+       .ring = {
+@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
+       .mc_wait_for_idle = &r300_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rv370_pcie_gart_tlb_flush,
++              .get_page_entry = &rv370_pcie_gart_get_page_entry,
+               .set_page = &rv370_pcie_gart_set_page,
+       },
+       .ring = {
+@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
+       .mc_wait_for_idle = &rs400_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rs400_gart_tlb_flush,
++              .get_page_entry = &rs400_gart_get_page_entry,
+               .set_page = &rs400_gart_set_page,
+       },
+       .ring = {
+@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
+       .mc_wait_for_idle = &rs600_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rs600_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
+       .mc_wait_for_idle = &rs690_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rs400_gart_tlb_flush,
++              .get_page_entry = &rs400_gart_get_page_entry,
+               .set_page = &rs400_gart_set_page,
+       },
+       .ring = {
+@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
+       .mc_wait_for_idle = &rv515_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rv370_pcie_gart_tlb_flush,
++              .get_page_entry = &rv370_pcie_gart_get_page_entry,
+               .set_page = &rv370_pcie_gart_set_page,
+       },
+       .ring = {
+@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
+       .mc_wait_for_idle = &r520_mc_wait_for_idle,
+       .gart = {
+               .tlb_flush = &rv370_pcie_gart_tlb_flush,
++              .get_page_entry = &rv370_pcie_gart_get_page_entry,
+               .set_page = &rv370_pcie_gart_set_page,
+       },
+       .ring = {
+@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &r600_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &r600_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &r600_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &r600_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &evergreen_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &evergreen_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &evergreen_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .ring = {
+@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &cayman_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .vm = {
+@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
+       .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &cayman_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .vm = {
+@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
+       .get_gpu_clock_counter = &si_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &si_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .vm = {
+@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
+       .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &cik_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .vm = {
+@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
+       .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
+       .gart = {
+               .tlb_flush = &cik_pcie_gart_tlb_flush,
++              .get_page_entry = &rs600_gart_get_page_entry,
+               .set_page = &rs600_gart_set_page,
+       },
+       .vm = {
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h 
b/drivers/gpu/drm/radeon/radeon_asic.h
+index d8ace5b28a5b..0c1da2bf1fb4 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct 
radeon_ring *cp);
+ int r100_asic_reset(struct radeon_device *rdev);
+ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+ void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
++uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
+ void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                          uint64_t addr, uint32_t flags);
++                          uint64_t entry);
+ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+ int r100_irq_set(struct radeon_device *rdev);
+ int r100_irq_process(struct radeon_device *rdev);
+@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device 
*rdev,
+                               struct radeon_fence *fence);
+ extern int r300_cs_parse(struct radeon_cs_parser *p);
+ extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
++extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
+ extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                                   uint64_t addr, uint32_t flags);
++                                   uint64_t entry);
+ extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+ extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+ extern void r300_set_reg_safe(struct radeon_device *rdev);
+@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
+ extern int rs400_suspend(struct radeon_device *rdev);
+ extern int rs400_resume(struct radeon_device *rdev);
+ void rs400_gart_tlb_flush(struct radeon_device *rdev);
++uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
+ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                       uint64_t addr, uint32_t flags);
++                       uint64_t entry);
+ uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ int rs400_gart_init(struct radeon_device *rdev);
+@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
+ void rs600_irq_disable(struct radeon_device *rdev);
+ u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+ void rs600_gart_tlb_flush(struct radeon_device *rdev);
++uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
+ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                       uint64_t addr, uint32_t flags);
++                       uint64_t entry);
+ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ void rs600_bandwidth_update(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c 
b/drivers/gpu/drm/radeon/radeon_device.c
+index 995a8b1770dd..bdf263a4a67c 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -743,6 +743,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
+               rdev->dummy_page.page = NULL;
+               return -ENOMEM;
+       }
++      rdev->dummy_page.entry = 
radeon_gart_get_page_entry(rdev->dummy_page.addr,
++                                                          
RADEON_GART_PAGE_DUMMY);
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c 
b/drivers/gpu/drm/radeon/radeon_gart.c
+index 84146d5901aa..c7be612b60c9 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+               radeon_bo_unpin(rdev->gart.robj);
+       radeon_bo_unreserve(rdev->gart.robj);
+       rdev->gart.table_addr = gpu_addr;
++
++      if (!r) {
++              int i;
++
++              /* We might have dropped some GART table updates while it wasn't
++               * mapped, restore all entries
++               */
++              for (i = 0; i < rdev->gart.num_gpu_pages; i++)
++                      radeon_gart_set_page(rdev, i, 
rdev->gart.pages_entry[i]);
++              mb();
++              radeon_gart_tlb_flush(rdev);
++      }
++
+       return r;
+ }
+ 
+@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, 
unsigned offset,
+       unsigned t;
+       unsigned p;
+       int i, j;
+-      u64 page_base;
+ 
+       if (!rdev->gart.ready) {
+               WARN(1, "trying to unbind memory from uninitialized GART !\n");
+@@ -240,13 +252,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, 
unsigned offset,
+               if (rdev->gart.pages[p]) {
+                       rdev->gart.pages[p] = NULL;
+                       rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+-                      page_base = rdev->gart.pages_addr[p];
+                       for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 
j++, t++) {
++                              rdev->gart.pages_entry[t] = 
rdev->dummy_page.entry;
+                               if (rdev->gart.ptr) {
+-                                      radeon_gart_set_page(rdev, t, page_base,
+-                                                           
RADEON_GART_PAGE_DUMMY);
++                                      radeon_gart_set_page(rdev, t,
++                                                           
rdev->dummy_page.entry);
+                               }
+-                              page_base += RADEON_GPU_PAGE_SIZE;
+                       }
+               }
+       }
+@@ -274,7 +285,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned 
offset,
+ {
+       unsigned t;
+       unsigned p;
+-      uint64_t page_base;
++      uint64_t page_base, page_entry;
+       int i, j;
+ 
+       if (!rdev->gart.ready) {
+@@ -287,12 +298,14 @@ int radeon_gart_bind(struct radeon_device *rdev, 
unsigned offset,
+       for (i = 0; i < pages; i++, p++) {
+               rdev->gart.pages_addr[p] = dma_addr[i];
+               rdev->gart.pages[p] = pagelist[i];
+-              if (rdev->gart.ptr) {
+-                      page_base = rdev->gart.pages_addr[p];
+-                      for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 
j++, t++) {
+-                              radeon_gart_set_page(rdev, t, page_base, flags);
+-                              page_base += RADEON_GPU_PAGE_SIZE;
++              page_base = dma_addr[i];
++              for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
++                      page_entry = radeon_gart_get_page_entry(page_base, 
flags);
++                      rdev->gart.pages_entry[t] = page_entry;
++                      if (rdev->gart.ptr) {
++                              radeon_gart_set_page(rdev, t, page_entry);
+                       }
++                      page_base += RADEON_GPU_PAGE_SIZE;
+               }
+       }
+       mb();
+@@ -340,10 +353,17 @@ int radeon_gart_init(struct radeon_device *rdev)
+               radeon_gart_fini(rdev);
+               return -ENOMEM;
+       }
++      rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
++                                       rdev->gart.num_gpu_pages);
++      if (rdev->gart.pages_entry == NULL) {
++              radeon_gart_fini(rdev);
++              return -ENOMEM;
++      }
+       /* set GART entry to point to the dummy page by default */
+-      for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
++      for (i = 0; i < rdev->gart.num_cpu_pages; i++)
+               rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+-      }
++      for (i = 0; i < rdev->gart.num_gpu_pages; i++)
++              rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
+       return 0;
+ }
+ 
+@@ -356,15 +376,17 @@ int radeon_gart_init(struct radeon_device *rdev)
+  */
+ void radeon_gart_fini(struct radeon_device *rdev)
+ {
+-      if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
++      if (rdev->gart.ready) {
+               /* unbind pages */
+               radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+       }
+       rdev->gart.ready = false;
+       vfree(rdev->gart.pages);
+       vfree(rdev->gart.pages_addr);
++      vfree(rdev->gart.pages_entry);
+       rdev->gart.pages = NULL;
+       rdev->gart.pages_addr = NULL;
++      rdev->gart.pages_entry = NULL;
+ 
+       radeon_dummy_page_fini(rdev);
+ }
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index c5799f16aa4b..34e3235f41d2 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
+ #define RS400_PTE_WRITEABLE (1 << 2)
+ #define RS400_PTE_READABLE  (1 << 3)
+ 
+-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                       uint64_t addr, uint32_t flags)
++uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
+ {
+       uint32_t entry;
+-      u32 *gtt = rdev->gart.ptr;
+ 
+       entry = (lower_32_bits(addr) & PAGE_MASK) |
+               ((upper_32_bits(addr) & 0xff) << 4);
+@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, 
unsigned i,
+               entry |= RS400_PTE_WRITEABLE;
+       if (!(flags & RADEON_GART_PAGE_SNOOP))
+               entry |= RS400_PTE_UNSNOOPED;
+-      entry = cpu_to_le32(entry);
+-      gtt[i] = entry;
++      return entry;
++}
++
++void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
++                       uint64_t entry)
++{
++      u32 *gtt = rdev->gart.ptr;
++      gtt[i] = cpu_to_le32(lower_32_bits(entry));
+ }
+ 
+ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index 9acb1c3c005b..74bce91aecc1 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
+       radeon_gart_table_vram_free(rdev);
+ }
+ 
+-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+-                       uint64_t addr, uint32_t flags)
++uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
+ {
+-      void __iomem *ptr = (void *)rdev->gart.ptr;
+-
+       addr = addr & 0xFFFFFFFFFFFFF000ULL;
+       addr |= R600_PTE_SYSTEM;
+       if (flags & RADEON_GART_PAGE_VALID)
+@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, 
unsigned i,
+               addr |= R600_PTE_WRITEABLE;
+       if (flags & RADEON_GART_PAGE_SNOOP)
+               addr |= R600_PTE_SNOOPED;
+-      writeq(addr, ptr + (i * 8));
++      return addr;
++}
++
++void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
++                       uint64_t entry)
++{
++      void __iomem *ptr = (void *)rdev->gart.ptr;
++      writeq(entry, ptr + (i * 8));
+ }
+ 
+ int rs600_irq_set(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index daeca571b42f..810dac80179c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
+               if (unlikely(ret != 0))
+                       --dev_priv->num_3d_resources;
+       } else if (unhide_svga) {
+-              mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_ENABLE,
+                         vmw_read(dev_priv, SVGA_REG_ENABLE) &
+                         ~SVGA_REG_ENABLE_HIDE);
+-              mutex_unlock(&dev_priv->hw_mutex);
+       }
+ 
+       mutex_unlock(&dev_priv->release_mutex);
+@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(--dev_priv->num_3d_resources == 0))
+               vmw_release_device(dev_priv);
+-      else if (hide_svga) {
+-              mutex_lock(&dev_priv->hw_mutex);
++      else if (hide_svga)
+               vmw_write(dev_priv, SVGA_REG_ENABLE,
+                         vmw_read(dev_priv, SVGA_REG_ENABLE) |
+                         SVGA_REG_ENABLE_HIDE);
+-              mutex_unlock(&dev_priv->hw_mutex);
+-      }
+ 
+       n3d = (int32_t) dev_priv->num_3d_resources;
+       mutex_unlock(&dev_priv->release_mutex);
+@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, 
unsigned long chipset)
+       dev_priv->dev = dev;
+       dev_priv->vmw_chipset = chipset;
+       dev_priv->last_read_seqno = (uint32_t) -100;
+-      mutex_init(&dev_priv->hw_mutex);
+       mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->release_mutex);
+       mutex_init(&dev_priv->binding_mutex);
+       rwlock_init(&dev_priv->resource_lock);
+       ttm_lock_init(&dev_priv->reservation_sem);
++      spin_lock_init(&dev_priv->hw_lock);
++      spin_lock_init(&dev_priv->waiter_lock);
++      spin_lock_init(&dev_priv->cap_lock);
+ 
+       for (i = vmw_res_context; i < vmw_res_max; ++i) {
+               idr_init(&dev_priv->res_idr[i]);
+@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, 
unsigned long chipset)
+ 
+       dev_priv->enable_fb = enable_fbdev;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+-
+       vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+       svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+       if (svga_id != SVGA_ID_2) {
+               ret = -ENOSYS;
+               DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
+-              mutex_unlock(&dev_priv->hw_mutex);
+               goto out_err0;
+       }
+ 
+@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, 
unsigned long chipset)
+               dev_priv->prim_bb_mem = dev_priv->vram_size;
+ 
+       ret = vmw_dma_masks(dev_priv);
+-      if (unlikely(ret != 0)) {
+-              mutex_unlock(&dev_priv->hw_mutex);
++      if (unlikely(ret != 0))
+               goto out_err0;
+-      }
+ 
+       /*
+        * Limit back buffer size to VRAM size.  Remove this once
+@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, 
unsigned long chipset)
+       if (dev_priv->prim_bb_mem > dev_priv->vram_size)
+               dev_priv->prim_bb_mem = dev_priv->vram_size;
+ 
+-      mutex_unlock(&dev_priv->hw_mutex);
+-
+       vmw_print_capabilities(dev_priv->capabilities);
+ 
+       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+@@ -1161,9 +1151,7 @@ static int vmw_master_set(struct drm_device *dev,
+               if (unlikely(ret != 0))
+                       return ret;
+               vmw_kms_save_vga(dev_priv);
+-              mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+-              mutex_unlock(&dev_priv->hw_mutex);
+       }
+ 
+       if (active) {
+@@ -1197,9 +1185,7 @@ out_no_active_lock:
+       if (!dev_priv->enable_fb) {
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv, true);
+-              mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+-              mutex_unlock(&dev_priv->hw_mutex);
+       }
+       return ret;
+ }
+@@ -1234,9 +1220,7 @@ static void vmw_master_drop(struct drm_device *dev,
+                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv, true);
+-              mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+-              mutex_unlock(&dev_priv->hw_mutex);
+       }
+ 
+       dev_priv->active_master = &dev_priv->fbdev_master;
+@@ -1368,10 +1352,8 @@ static void vmw_pm_complete(struct device *kdev)
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct vmw_private *dev_priv = vmw_priv(dev);
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+       vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+       (void) vmw_read(dev_priv, SVGA_REG_ID);
+-      mutex_unlock(&dev_priv->hw_mutex);
+ 
+       /**
+        * Reclaim 3d reference held by fbdev and potentially
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 4ee799b43d5d..d26a6daa9719 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -399,7 +399,8 @@ struct vmw_private {
+       uint32_t memory_size;
+       bool has_gmr;
+       bool has_mob;
+-      struct mutex hw_mutex;
++      spinlock_t hw_lock;
++      spinlock_t cap_lock;
+ 
+       /*
+        * VGA registers.
+@@ -449,8 +450,9 @@ struct vmw_private {
+       atomic_t marker_seq;
+       wait_queue_head_t fence_queue;
+       wait_queue_head_t fifo_queue;
+-      int fence_queue_waiters; /* Protected by hw_mutex */
+-      int goal_queue_waiters; /* Protected by hw_mutex */
++      spinlock_t waiter_lock;
++      int fence_queue_waiters; /* Protected by waiter_lock */
++      int goal_queue_waiters; /* Protected by waiter_lock */
+       atomic_t fifo_queue_waiters;
+       uint32_t last_read_seqno;
+       spinlock_t irq_lock;
+@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct 
drm_master *master)
+       return (struct vmw_master *) master->driver_priv;
+ }
+ 
++/*
++ * The locking here is fine-grained, so that it is performed once
++ * for every read- and write operation. This is of course costly, but we
++ * don't perform much register access in the timing critical paths anyway.
++ * Instead we have the extra benefit of being sure that we don't forget
++ * the hw lock around register accesses.
++ */
+ static inline void vmw_write(struct vmw_private *dev_priv,
+                            unsigned int offset, uint32_t value)
+ {
++      unsigned long irq_flags;
++
++      spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+       outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+       outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
++      spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ }
+ 
+ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+                               unsigned int offset)
+ {
+-      uint32_t val;
++      unsigned long irq_flags;
++      u32 val;
+ 
++      spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+       outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+       val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
++      spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
++
+       return val;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index b7594cb758af..945f1e0dad92 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -35,7 +35,7 @@ struct vmw_fence_manager {
+       struct vmw_private *dev_priv;
+       spinlock_t lock;
+       struct list_head fence_list;
+-      struct work_struct work, ping_work;
++      struct work_struct work;
+       u32 user_fence_size;
+       u32 fence_size;
+       u32 event_fence_action_size;
+@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct 
fence *f)
+       return "svga";
+ }
+ 
+-static void vmw_fence_ping_func(struct work_struct *work)
+-{
+-      struct vmw_fence_manager *fman =
+-              container_of(work, struct vmw_fence_manager, ping_work);
+-
+-      vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
+-}
+-
+ static bool vmw_fence_enable_signaling(struct fence *f)
+ {
+       struct vmw_fence_obj *fence =
+@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
+       if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+               return false;
+ 
+-      if (mutex_trylock(&dev_priv->hw_mutex)) {
+-              vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
+-              mutex_unlock(&dev_priv->hw_mutex);
+-      } else
+-              schedule_work(&fman->ping_work);
++      vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ 
+       return true;
+ }
+@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct 
vmw_private *dev_priv)
+       INIT_LIST_HEAD(&fman->fence_list);
+       INIT_LIST_HEAD(&fman->cleanup_list);
+       INIT_WORK(&fman->work, &vmw_fence_work_func);
+-      INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
+       fman->fifo_down = true;
+       fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+       fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
+@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager 
*fman)
+       bool lists_empty;
+ 
+       (void) cancel_work_sync(&fman->work);
+-      (void) cancel_work_sync(&fman->ping_work);
+ 
+       spin_lock_irqsave(&fman->lock, irq_flags);
+       lists_empty = list_empty(&fman->fence_list) &&
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+index 09e10aefcd8e..39f2b03888e7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+               if (!dev_priv->has_mob)
+                       return false;
+ 
+-              mutex_lock(&dev_priv->hw_mutex);
++              spin_lock(&dev_priv->cap_lock);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
+               result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+-              mutex_unlock(&dev_priv->hw_mutex);
++              spin_unlock(&dev_priv->cap_lock);
+ 
+               return (result != 0);
+       }
+@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct 
vmw_fifo_state *fifo)
+       DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+       DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+       dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+       dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct 
vmw_fifo_state *fifo)
+       mb();
+ 
+       vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+-      mutex_unlock(&dev_priv->hw_mutex);
+ 
+       max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+       min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
+@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct 
vmw_fifo_state *fifo)
+       return vmw_fifo_send_fence(dev_priv, &dummy);
+ }
+ 
+-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
++void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+ {
+       __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++      static DEFINE_SPINLOCK(ping_lock);
++      unsigned long irq_flags;
+ 
++      /*
++       * The ping_lock is needed because we don't have an atomic
++       * test-and-set of the SVGA_FIFO_BUSY register.
++       */
++      spin_lock_irqsave(&ping_lock, irq_flags);
+       if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
+               iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+               vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+       }
+-}
+-
+-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+-{
+-      mutex_lock(&dev_priv->hw_mutex);
+-
+-      vmw_fifo_ping_host_locked(dev_priv, reason);
+-
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock_irqrestore(&ping_lock, irq_flags);
+ }
+ 
+ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state 
*fifo)
+ {
+       __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+-
+       vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+       while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+               ;
+@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct 
vmw_fifo_state *fifo)
+       vmw_write(dev_priv, SVGA_REG_TRACES,
+                 dev_priv->traces_state);
+ 
+-      mutex_unlock(&dev_priv->hw_mutex);
+       vmw_marker_queue_takedown(&fifo->marker_queue);
+ 
+       if (likely(fifo->static_buffer != NULL)) {
+@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
+               return vmw_fifo_wait_noirq(dev_priv, bytes,
+                                          interruptible, timeout);
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
+               spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+               outl(SVGA_IRQFLAG_FIFO_PROGRESS,
+@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ 
+       if (interruptible)
+               ret = wait_event_interruptible_timeout
+@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
+       else if (likely(ret > 0))
+               ret = 0;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
+               spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+               dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ 
+       return ret;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index 37881ecf5d7a..69c8ce23123c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private 
*dev_priv, void *bounce,
+               (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+       compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->cap_lock);
+       for (i = 0; i < max_size; ++i) {
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+               compat_cap->pairs[i][0] = i;
+               compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->cap_lock);
+ 
+       return 0;
+ }
+@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void 
*data,
+               if (num > SVGA3D_DEVCAP_MAX)
+                       num = SVGA3D_DEVCAP_MAX;
+ 
+-              mutex_lock(&dev_priv->hw_mutex);
++              spin_lock(&dev_priv->cap_lock);
+               for (i = 0; i < num; ++i) {
+                       vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+                       *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+               }
+-              mutex_unlock(&dev_priv->hw_mutex);
++              spin_unlock(&dev_priv->cap_lock);
+       } else if (gb_objects) {
+               ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+               if (unlikely(ret != 0))
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+index 0c423766c441..9fe9827ee499 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
+ 
+ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
+ {
+-      uint32_t busy;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+-      busy = vmw_read(dev_priv, SVGA_REG_BUSY);
+-      mutex_unlock(&dev_priv->hw_mutex);
+-
+-      return (busy == 0);
++      return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
+ }
+ 
+ void vmw_update_seqno(struct vmw_private *dev_priv,
+@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
+ 
+ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+ {
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (dev_priv->fence_queue_waiters++ == 0) {
+               unsigned long irq_flags;
+ 
+@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ }
+ 
+ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+ {
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (--dev_priv->fence_queue_waiters == 0) {
+               unsigned long irq_flags;
+ 
+@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private 
*dev_priv)
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ }
+ 
+ 
+ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+ {
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (dev_priv->goal_queue_waiters++ == 0) {
+               unsigned long irq_flags;
+ 
+@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ }
+ 
+ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+ {
+-      mutex_lock(&dev_priv->hw_mutex);
++      spin_lock(&dev_priv->waiter_lock);
+       if (--dev_priv->goal_queue_waiters == 0) {
+               unsigned long irq_flags;
+ 
+@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+               vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+       }
+-      mutex_unlock(&dev_priv->hw_mutex);
++      spin_unlock(&dev_priv->waiter_lock);
+ }
+ 
+ int vmw_wait_seqno(struct vmw_private *dev_priv,
+@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
+       if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+               return;
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+       vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+-      mutex_unlock(&dev_priv->hw_mutex);
+ 
+       status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+       outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 941a7bc0b791..fddd53335237 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, 
bool force)
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ 
+-      mutex_lock(&dev_priv->hw_mutex);
+       num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+-      mutex_unlock(&dev_priv->hw_mutex);
+ 
+       return ((vmw_connector_to_du(connector)->unit < num_displays &&
+                du->pref_active) ?
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index 3cccff73b9b9..a994477bd25a 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -584,11 +584,15 @@ static int rmi_populate_f11(struct hid_device *hdev)
+       bool has_query10 = false;
+       bool has_query11;
+       bool has_query12;
++      bool has_query27;
++      bool has_query28;
++      bool has_query36 = false;
+       bool has_physical_props;
+       bool has_gestures;
+       bool has_rel;
++      bool has_data40 = false;
+       unsigned x_size, y_size;
+-      u16 query12_offset;
++      u16 query_offset;
+ 
+       if (!data->f11.query_base_addr) {
+               hid_err(hdev, "No 2D sensor found, giving up.\n");
+@@ -604,6 +608,8 @@ static int rmi_populate_f11(struct hid_device *hdev)
+       has_query9 = !!(buf[0] & BIT(3));
+       has_query11 = !!(buf[0] & BIT(4));
+       has_query12 = !!(buf[0] & BIT(5));
++      has_query27 = !!(buf[0] & BIT(6));
++      has_query28 = !!(buf[0] & BIT(7));
+ 
+       /* query 1 to get the max number of fingers */
+       ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
+@@ -642,27 +648,27 @@ static int rmi_populate_f11(struct hid_device *hdev)
+        * +1 for query 5 which is present since absolute events are
+        * reported and +1 for query 12.
+        */
+-      query12_offset = 6;
++      query_offset = 6;
+ 
+       if (has_rel)
+-              ++query12_offset; /* query 6 is present */
++              ++query_offset; /* query 6 is present */
+ 
+       if (has_gestures)
+-              query12_offset += 2; /* query 7 and 8 are present */
++              query_offset += 2; /* query 7 and 8 are present */
+ 
+       if (has_query9)
+-              ++query12_offset;
++              ++query_offset;
+ 
+       if (has_query10)
+-              ++query12_offset;
++              ++query_offset;
+ 
+       if (has_query11)
+-              ++query12_offset;
++              ++query_offset;
+ 
+       /* query 12 to know if the physical properties are reported */
+       if (has_query12) {
+               ret = rmi_read(hdev, data->f11.query_base_addr
+-                              + query12_offset, buf);
++                              + query_offset, buf);
+               if (ret) {
+                       hid_err(hdev, "can not get query 12: %d.\n", ret);
+                       return ret;
+@@ -670,9 +676,10 @@ static int rmi_populate_f11(struct hid_device *hdev)
+               has_physical_props = !!(buf[0] & BIT(5));
+ 
+               if (has_physical_props) {
++                      query_offset += 1;
+                       ret = rmi_read_block(hdev,
+                                       data->f11.query_base_addr
+-                                              + query12_offset + 1, buf, 4);
++                                              + query_offset, buf, 4);
+                       if (ret) {
+                               hid_err(hdev, "can not read query 15-18: %d.\n",
+                                       ret);
+@@ -687,9 +694,45 @@ static int rmi_populate_f11(struct hid_device *hdev)
+ 
+                       hid_info(hdev, "%s: size in mm: %d x %d\n",
+                                __func__, data->x_size_mm, data->y_size_mm);
++
++                      /*
++                       * query 15 - 18 contain the size of the sensor
++                       * and query 19 - 26 contain bezel dimensions
++                       */
++                      query_offset += 12;
++              }
++      }
++
++      if (has_query27)
++              ++query_offset;
++
++      if (has_query28) {
++              ret = rmi_read(hdev, data->f11.query_base_addr
++                              + query_offset, buf);
++              if (ret) {
++                      hid_err(hdev, "can not get query 28: %d.\n", ret);
++                      return ret;
++              }
++
++              has_query36 = !!(buf[0] & BIT(6));
++      }
++
++      if (has_query36) {
++              query_offset += 2;
++              ret = rmi_read(hdev, data->f11.query_base_addr
++                              + query_offset, buf);
++              if (ret) {
++                      hid_err(hdev, "can not get query 36: %d.\n", ret);
++                      return ret;
+               }
++
++              has_data40 = !!(buf[0] & BIT(5));
+       }
+ 
++
++      if (has_data40)
++              data->f11.report_size += data->max_fingers * 2;
++
+       /*
+        * retrieve the ctrl registers
+        * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c 
b/drivers/i2c/busses/i2c-s3c2410.c
+index 65244774bfa3..c127af99a0e0 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -778,14 +778,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
+       int ret;
+ 
+       pm_runtime_get_sync(&adap->dev);
+-      clk_prepare_enable(i2c->clk);
++      ret = clk_enable(i2c->clk);
++      if (ret)
++              return ret;
+ 
+       for (retry = 0; retry < adap->retries; retry++) {
+ 
+               ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
+ 
+               if (ret != -EAGAIN) {
+-                      clk_disable_unprepare(i2c->clk);
++                      clk_disable(i2c->clk);
+                       pm_runtime_put(&adap->dev);
+                       return ret;
+               }
+@@ -795,7 +797,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
+               udelay(100);
+       }
+ 
+-      clk_disable_unprepare(i2c->clk);
++      clk_disable(i2c->clk);
+       pm_runtime_put(&adap->dev);
+       return -EREMOTEIO;
+ }
+@@ -1174,7 +1176,7 @@ static int s3c24xx_i2c_probe(struct platform_device 
*pdev)
+ 
+       clk_prepare_enable(i2c->clk);
+       ret = s3c24xx_i2c_init(i2c);
+-      clk_disable_unprepare(i2c->clk);
++      clk_disable(i2c->clk);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "I2C controller init failed\n");
+               return ret;
+@@ -1187,6 +1189,7 @@ static int s3c24xx_i2c_probe(struct platform_device 
*pdev)
+               i2c->irq = ret = platform_get_irq(pdev, 0);
+               if (ret <= 0) {
+                       dev_err(&pdev->dev, "cannot find IRQ\n");
++                      clk_unprepare(i2c->clk);
+                       return ret;
+               }
+ 
+@@ -1195,6 +1198,7 @@ static int s3c24xx_i2c_probe(struct platform_device 
*pdev)
+ 
+               if (ret != 0) {
+                       dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
++                      clk_unprepare(i2c->clk);
+                       return ret;
+               }
+       }
+@@ -1202,6 +1206,7 @@ static int s3c24xx_i2c_probe(struct platform_device 
*pdev)
+       ret = s3c24xx_i2c_register_cpufreq(i2c);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
++              clk_unprepare(i2c->clk);
+               return ret;
+       }
+ 
+@@ -1218,6 +1223,7 @@ static int s3c24xx_i2c_probe(struct platform_device 
*pdev)
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+               s3c24xx_i2c_deregister_cpufreq(i2c);
++              clk_unprepare(i2c->clk);
+               return ret;
+       }
+ 
+@@ -1239,6 +1245,8 @@ static int s3c24xx_i2c_remove(struct platform_device 
*pdev)
+ {
+       struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ 
++      clk_unprepare(i2c->clk);
++
+       pm_runtime_disable(&i2c->adap.dev);
+       pm_runtime_disable(&pdev->dev);
+ 
+@@ -1267,10 +1275,13 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
+ {
+       struct platform_device *pdev = to_platform_device(dev);
+       struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
++      int ret;
+ 
+-      clk_prepare_enable(i2c->clk);
++      ret = clk_enable(i2c->clk);
++      if (ret)
++              return ret;
+       s3c24xx_i2c_init(i2c);
+-      clk_disable_unprepare(i2c->clk);
++      clk_disable(i2c->clk);
+       i2c->suspended = 0;
+ 
+       return 0;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 77ecf6d32237..6e22682c8255 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse 
*psmouse,
+  * Asus UX31               0x361f00        20, 15, 0e      clickpad
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
++ * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
++ * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
+  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
+  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
+@@ -1475,6 +1477,20 @@ static const struct dmi_system_id 
elantech_dmi_force_crc_enabled[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+               },
+       },
++      {
++              /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
++              },
++      },
++      {
++              /* Fujitsu LIFEBOOK E544  does not work with crc_enabled == 0 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
++              },
++      },
+ #endif
+       { }
+ };
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index f9472920d986..23e26e0768b5 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
+               1232, 5710, 1156, 4696
+       },
+       {
+-              (const char * const []){"LEN0034", "LEN0036", "LEN0039",
+-                                      "LEN2002", "LEN2004", NULL},
++              (const char * const []){"LEN0034", "LEN0036", "LEN0037",
++                                      "LEN0039", "LEN2002", "LEN2004",
++                                      NULL},
+               1024, 5112, 2024, 4832
+       },
+       {
+@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
+       "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
+       "LEN0035", /* X240 */
+       "LEN0036", /* T440 */
+-      "LEN0037",
++      "LEN0037", /* X1 Carbon 2nd */
+       "LEN0038",
+       "LEN0039", /* T440s */
+       "LEN0041",
+diff --git a/drivers/input/serio/i8042-x86ia64io.h 
b/drivers/input/serio/i8042-x86ia64io.h
+index 1a858c86a72b..39bec4715f2c 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst 
i8042_dmi_noloop_table[] = {
+               },
+       },
+       {
++              /* Medion Akoya E7225 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
++              },
++      },
++      {
+               /* Blue FB5601 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 97e3a6c07e31..1e64e9c50d85 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct 
block_device *bdev,
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               DMERR("could not allocate metadata struct");
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+       }
+ 
+       atomic_set(&cmd->ref_count, 1);
+@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct 
block_device *bdev,
+               return cmd;
+ 
+       cmd = metadata_open(bdev, data_block_size, may_format_device, 
policy_hint_size);
+-      if (cmd) {
++      if (!IS_ERR(cmd)) {
+               mutex_lock(&table_lock);
+               cmd2 = lookup(bdev);
+               if (cmd2) {
+@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct 
block_device *bdev,
+ {
+       struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+                                                      may_format_device, 
policy_hint_size);
+-      if (cmd && !same_params(cmd, data_block_size)) {
++
++      if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+               dm_cache_metadata_close(cmd);
+-              return NULL;
++              return ERR_PTR(-EINVAL);
+       }
+ 
+       return cmd;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index aae19133cfac..ac6b0ff161ea 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2978,6 +2978,12 @@ static int pool_message(struct dm_target *ti, unsigned 
argc, char **argv)
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+ 
++      if (get_pool_mode(pool) >= PM_READ_ONLY) {
++              DMERR("%s: unable to service pool target messages in READ_ONLY 
or FAIL mode",
++                    dm_device_name(pool->pool_md));
++              return -EINVAL;
++      }
++
+       if (!strcasecmp(argv[0], "create_thin"))
+               r = process_create_thin_mesg(argc, argv, pool);
+ 
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 8e78bb48f5a4..60285820f7b4 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -611,6 +611,10 @@ static void c_can_stop(struct net_device *dev)
+       struct c_can_priv *priv = netdev_priv(dev);
+ 
+       c_can_irq_control(priv, false);
++
++      /* put ctrl to init on stop to end ongoing transmission */
++      priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
++
+       priv->can.state = CAN_STATE_STOPPED;
+ }
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb.c 
b/drivers/net/can/usb/kvaser_usb.c
+index cc7bfc0c0a71..8b255e777cc7 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct 
kvaser_usb_net_priv *priv,
+                         usb_sndbulkpipe(dev->udev,
+                                         dev->bulk_out->bEndpointAddress),
+                         buf, msg->len,
+-                        kvaser_usb_simple_msg_callback, priv);
++                        kvaser_usb_simple_msg_callback, netdev);
+       usb_anchor_urb(urb, &priv->tx_submitted);
+ 
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb 
*dev,
+       priv = dev->nets[channel];
+       stats = &priv->netdev->stats;
+ 
+-      if (status & M16C_STATE_BUS_RESET) {
+-              kvaser_usb_unlink_tx_urbs(priv);
+-              return;
+-      }
+-
+       skb = alloc_can_err_skb(priv->netdev, &cf);
+       if (!skb) {
+               stats->rx_dropped++;
+@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb 
*dev,
+ 
+       netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+ 
+-      if (status & M16C_STATE_BUS_OFF) {
++      if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+               cf->can_id |= CAN_ERR_BUSOFF;
+ 
+               priv->can.can_stats.bus_off++;
+@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb 
*dev,
+               }
+ 
+               new_state = CAN_STATE_ERROR_PASSIVE;
+-      }
+-
+-      if (status == M16C_STATE_BUS_ERROR) {
++      } else if (status & M16C_STATE_BUS_ERROR) {
+               if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+                   ((txerr >= 96) || (rxerr >= 96))) {
+                       cf->can_id |= CAN_ERR_CRTL;
+@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb 
*dev,
+ 
+                       priv->can.can_stats.error_warning++;
+                       new_state = CAN_STATE_ERROR_WARNING;
+-              } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
++              } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
++                         ((txerr < 96) && (rxerr < 96))) {
+                       cf->can_id |= CAN_ERR_PROT;
+                       cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ 
+@@ -1593,7 +1587,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ {
+       struct kvaser_usb *dev;
+       int err = -ENOMEM;
+-      int i;
++      int i, retry = 3;
+ 
+       dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+@@ -1611,7 +1605,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ 
+       usb_set_intfdata(intf, dev);
+ 
+-      err = kvaser_usb_get_software_info(dev);
++      /* On some x86 laptops, plugging a Kvaser device again after
++       * an unplug makes the firmware always ignore the very first
++       * command. For such a case, provide some room for retries
++       * instead of completely exiting the driver.
++       */
++      do {
++              err = kvaser_usb_get_software_info(dev);
++      } while (--retry && err == -ETIMEDOUT);
++
+       if (err) {
+               dev_err(&intf->dev,
+                       "Cannot get software infos, error %d\n", err);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 64d1cef4cda1..48645504106e 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1676,6 +1676,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device 
*ndev,
+       if (vid == priv->data.default_vlan)
+               return 0;
+ 
++      if (priv->data.dual_emac) {
++              /* In dual EMAC, reserved VLAN id should not be used for
++               * creating VLAN interfaces as this can break the dual
++               * EMAC port separation
++               */
++              int i;
++
++              for (i = 0; i < priv->data.slaves; i++) {
++                      if (vid == priv->slaves[i].port_vlan)
++                              return -EINVAL;
++              }
++      }
++
+       dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+       return cpsw_add_vlan_ale_entry(priv, vid);
+ }
+@@ -1689,6 +1702,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device 
*ndev,
+       if (vid == priv->data.default_vlan)
+               return 0;
+ 
++      if (priv->data.dual_emac) {
++              int i;
++
++              for (i = 0; i < priv->data.slaves; i++) {
++                      if (vid == priv->slaves[i].port_vlan)
++                              return -EINVAL;
++              }
++      }
++
+       dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+       ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+       if (ret != 0)
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 354a81d40925..d6380c187db6 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -179,7 +179,7 @@ struct at91_pinctrl {
+       struct device           *dev;
+       struct pinctrl_dev      *pctl;
+ 
+-      int                     nbanks;
++      int                     nactive_banks;
+ 
+       uint32_t                *mux_mask;
+       int                     nmux;
+@@ -655,12 +655,18 @@ static int pin_check_config(struct at91_pinctrl *info, 
const char *name,
+       int mux;
+ 
+       /* check if it's a valid config */
+-      if (pin->bank >= info->nbanks) {
++      if (pin->bank >= gpio_banks) {
+               dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
+-                      name, index, pin->bank, info->nbanks);
++                      name, index, pin->bank, gpio_banks);
+               return -EINVAL;
+       }
+ 
++      if (!gpio_chips[pin->bank]) {
++              dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
++                      name, index, pin->bank);
++              return -ENXIO;
++      }
++
+       if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
+               dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
+                       name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
+@@ -983,7 +989,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl 
*info,
+ 
+       for_each_child_of_node(np, child) {
+               if (of_device_is_compatible(child, gpio_compat)) {
+-                      info->nbanks++;
++                      if (of_device_is_available(child))
++                              info->nactive_banks++;
+               } else {
+                       info->nfunctions++;
+                       info->ngroups += of_get_child_count(child);
+@@ -1005,11 +1012,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl 
*info,
+       }
+ 
+       size /= sizeof(*list);
+-      if (!size || size % info->nbanks) {
+-              dev_err(info->dev, "wrong mux mask array should be by %d\n", 
info->nbanks);
++      if (!size || size % gpio_banks) {
++              dev_err(info->dev, "wrong mux mask array should be by %d\n", 
gpio_banks);
+               return -EINVAL;
+       }
+-      info->nmux = size / info->nbanks;
++      info->nmux = size / gpio_banks;
+ 
+       info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, 
GFP_KERNEL);
+       if (!info->mux_mask) {
+@@ -1133,7 +1140,7 @@ static int at91_pinctrl_probe_dt(struct platform_device 
*pdev,
+               of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
+       at91_pinctrl_child_count(info, np);
+ 
+-      if (info->nbanks < 1) {
++      if (gpio_banks < 1) {
+               dev_err(&pdev->dev, "you need to specify at least one 
gpio-controller\n");
+               return -EINVAL;
+       }
+@@ -1146,7 +1153,7 @@ static int at91_pinctrl_probe_dt(struct platform_device 
*pdev,
+ 
+       dev_dbg(&pdev->dev, "mux-mask\n");
+       tmp = info->mux_mask;
+-      for (i = 0; i < info->nbanks; i++) {
++      for (i = 0; i < gpio_banks; i++) {
+               for (j = 0; j < info->nmux; j++, tmp++) {
+                       dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
+               }
+@@ -1164,7 +1171,7 @@ static int at91_pinctrl_probe_dt(struct platform_device 
*pdev,
+       if (!info->groups)
+               return -ENOMEM;
+ 
+-      dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
++      dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
+       dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
+       dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ 
+@@ -1187,7 +1194,7 @@ static int at91_pinctrl_probe(struct platform_device 
*pdev)
+ {
+       struct at91_pinctrl *info;
+       struct pinctrl_pin_desc *pdesc;
+-      int ret, i, j, k;
++      int ret, i, j, k, ngpio_chips_enabled = 0;
+ 
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+@@ -1202,23 +1209,27 @@ static int at91_pinctrl_probe(struct platform_device 
*pdev)
+        * to obtain references to the struct gpio_chip * for them, and we
+        * need this to proceed.
+        */
+-      for (i = 0; i < info->nbanks; i++) {
+-              if (!gpio_chips[i]) {
+-                      dev_warn(&pdev->dev, "GPIO chip %d not registered 
yet\n", i);
+-                      devm_kfree(&pdev->dev, info);
+-                      return -EPROBE_DEFER;
+-              }
++      for (i = 0; i < gpio_banks; i++)
++              if (gpio_chips[i])
++                      ngpio_chips_enabled++;
++
++      if (ngpio_chips_enabled < info->nactive_banks) {
++              dev_warn(&pdev->dev,
++                       "All GPIO chips are not registered yet (%d/%d)\n",
++                       ngpio_chips_enabled, info->nactive_banks);
++              devm_kfree(&pdev->dev, info);
++              return -EPROBE_DEFER;
+       }
+ 
+       at91_pinctrl_desc.name = dev_name(&pdev->dev);
+-      at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
++      at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
+       at91_pinctrl_desc.pins = pdesc =
+               devm_kzalloc(&pdev->dev, sizeof(*pdesc) * 
at91_pinctrl_desc.npins, GFP_KERNEL);
+ 
+       if (!at91_pinctrl_desc.pins)
+               return -ENOMEM;
+ 
+-      for (i = 0 , k = 0; i < info->nbanks; i++) {
++      for (i = 0, k = 0; i < gpio_banks; i++) {
+               for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
+                       pdesc->number = k;
+                       pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', 
j);
+@@ -1236,8 +1247,9 @@ static int at91_pinctrl_probe(struct platform_device 
*pdev)
+       }
+ 
+       /* We will handle a range of GPIO pins */
+-      for (i = 0; i < info->nbanks; i++)
+-              pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
++      for (i = 0; i < gpio_banks; i++)
++              if (gpio_chips[i])
++                      pinctrl_add_gpio_range(info->pctl, 
&gpio_chips[i]->range);
+ 
+       dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
+ 
+@@ -1614,9 +1626,10 @@ static void gpio_irq_handler(unsigned irq, struct 
irq_desc *desc)
+ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
+                                 struct at91_gpio_chip *at91_gpio)
+ {
++      struct gpio_chip        *gpiochip_prev = NULL;
+       struct at91_gpio_chip   *prev = NULL;
+       struct irq_data         *d = irq_get_irq_data(at91_gpio->pioc_virq);
+-      int ret;
++      int ret, i;
+ 
+       at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
+ 
+@@ -1642,24 +1655,33 @@ static int at91_gpio_of_irq_setup(struct 
platform_device *pdev,
+               return ret;
+       }
+ 
+-      /* Setup chained handler */
+-      if (at91_gpio->pioc_idx)
+-              prev = gpio_chips[at91_gpio->pioc_idx - 1];
+-
+       /* The top level handler handles one bank of GPIOs, except
+        * on some SoC it can handle up to three...
+        * We only set up the handler for the first of the list.
+        */
+-      if (prev && prev->next == at91_gpio)
++      gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
++      if (!gpiochip_prev) {
++              /* Then register the chain on the parent IRQ */
++              gpiochip_set_chained_irqchip(&at91_gpio->chip,
++                                           &gpio_irqchip,
++                                           at91_gpio->pioc_virq,
++                                           gpio_irq_handler);
+               return 0;
++      }
+ 
+-      /* Then register the chain on the parent IRQ */
+-      gpiochip_set_chained_irqchip(&at91_gpio->chip,
+-                                   &gpio_irqchip,
+-                                   at91_gpio->pioc_virq,
+-                                   gpio_irq_handler);
++      prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
+ 
+-      return 0;
++      /* we can only have 2 banks before */
++      for (i = 0; i < 2; i++) {
++              if (prev->next) {
++                      prev = prev->next;
++              } else {
++                      prev->next = at91_gpio;
++                      return 0;
++              }
++      }
++
++      return -EINVAL;
+ }
+ 
+ /* This structure is replicated for each GPIO block allocated at probe time */
+@@ -1676,24 +1698,6 @@ static struct gpio_chip at91_gpio_template = {
+       .ngpio                  = MAX_NB_GPIO_PER_BANK,
+ };
+ 
+-static void at91_gpio_probe_fixup(void)
+-{
+-      unsigned i;
+-      struct at91_gpio_chip *at91_gpio, *last = NULL;
+-
+-      for (i = 0; i < gpio_banks; i++) {
+-              at91_gpio = gpio_chips[i];
+-
+-              /*
+-               * GPIO controller are grouped on some SoC:
+-               * PIOC, PIOD and PIOE can share the same IRQ line
+-               */
+-              if (last && last->pioc_virq == at91_gpio->pioc_virq)
+-                      last->next = at91_gpio;
+-              last = at91_gpio;
+-      }
+-}
+-
+ static struct of_device_id at91_gpio_of_match[] = {
+       { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
+       { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
+@@ -1806,8 +1810,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
+       gpio_chips[alias_idx] = at91_chip;
+       gpio_banks = max(gpio_banks, alias_idx + 1);
+ 
+-      at91_gpio_probe_fixup();
+-
+       ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+       if (ret)
+               goto irq_setup_err;
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index cd87c0c37034..fc6fb5422b6f 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device 
*dev, const char *id)
+ }
+ EXPORT_SYMBOL_GPL(regulator_get_optional);
+ 
+-/* Locks held by regulator_put() */
++/* regulator_list_mutex lock held by regulator_put() */
+ static void _regulator_put(struct regulator *regulator)
+ {
+       struct regulator_dev *rdev;
+@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
+       /* remove any sysfs entries */
+       if (regulator->dev)
+               sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
++      mutex_lock(&rdev->mutex);
+       kfree(regulator->supply_name);
+       list_del(&regulator->list);
+       kfree(regulator);
+ 
+       rdev->open_count--;
+       rdev->exclusive = 0;
++      mutex_unlock(&rdev->mutex);
+ 
+       module_put(rdev->owner);
+ }
+diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
+index 8754c33361e8..28799d39db8e 100644
+--- a/drivers/rtc/rtc-s5m.c
++++ b/drivers/rtc/rtc-s5m.c
+@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, 
s5m_rtc_resume);
+ static const struct platform_device_id s5m_rtc_id[] = {
+       { "s5m-rtc",            S5M8767X },
+       { "s2mps14-rtc",        S2MPS14X },
++      { },
+ };
+ 
+ static struct platform_driver s5m_rtc_driver = {
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index 46c6d58e1fda..efff55537d8a 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -219,7 +219,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
+       iounmap(clk_reg);
+ 
+       dws->num_cs = 16;
+-      dws->fifo_len = 40;     /* FIFO has 40 words buffer */
+ 
+ #ifdef CONFIG_SPI_DW_MID_DMA
+       dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index d0d5542efc06..1a0f266c4268 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
+       if (!dws->fifo_len) {
+               u32 fifo;
+ 
+-              for (fifo = 2; fifo <= 257; fifo++) {
++              for (fifo = 2; fifo <= 256; fifo++) {
+                       dw_writew(dws, DW_SPI_TXFLTR, fifo);
+                       if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
+                               break;
+               }
+ 
+-              dws->fifo_len = (fifo == 257) ? 0 : fifo;
++              dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
+               dw_writew(dws, DW_SPI_TXFLTR, 0);
+       }
+ }
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 9e9e0f971e6c..d95656d05eb6 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -402,8 +402,8 @@ static void giveback(struct driver_data *drv_data)
+                       cs_deassert(drv_data);
+       }
+ 
+-      spi_finalize_current_message(drv_data->master);
+       drv_data->cur_chip = NULL;
++      spi_finalize_current_message(drv_data->master);
+ }
+ 
+ static void reset_sccr1(struct driver_data *drv_data)
+diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c 
b/drivers/staging/lustre/lustre/llite/vvp_io.c
+index d3f967a78138..1f453b275dbc 100644
+--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
+@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+               return 0;
+       }
+ 
+-      if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
++      if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+               CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+               return -EFAULT;
+       }
+diff --git a/drivers/usb/core/otg_whitelist.h 
b/drivers/usb/core/otg_whitelist.h
+index de0c9c9d7091..a6315abe7b7c 100644
+--- a/drivers/usb/core/otg_whitelist.h
++++ b/drivers/usb/core/otg_whitelist.h
+@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
+            le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
+               return 0;
+ 
++      /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
++      if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
++           le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
++              return 1;
++
+       /* NOTE: can't use usb_match_id() since interface caches
+        * aren't set up yet. this is cut/paste from that code.
+        */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 0ffb4ed0a945..41e510ae8c83 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ 
++      /* Protocol and OTG Electrical Test Device */
++      { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
++                      USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
++
+       { }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
+index 11c7a9676441..8adb53044079 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1995,6 +1995,13 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+ 
++/* Reported by Dmitry Nezhevenko <d...@dion.org.ua> */
++UNUSUAL_DEV(  0x152d, 0x2566, 0x0114, 0x0114,
++              "JMicron",
++              "USB to ATA/ATAPI Bridge",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_BROKEN_FUA ),
++
+ /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
+  * and Mac USB Dock USB-SCSI */
+ UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 1f430bb02ca1..2706a434fdbb 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -138,3 +138,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
++
++/* Reported-by: Richard Henderson <r...@redhat.com> */
++UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
++              "SimpleTech",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_REPORT_OPCODES),
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index f3a9d831d0f9..c9d0d5a0e662 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -397,7 +397,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct 
page *page,
+        * buffering it.
+        */
+       if (dma_capable(dev, dev_addr, size) &&
+-          !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
++          !range_straddles_page_boundary(phys, size) &&
++              !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) 
&&
++              !swiotlb_force) {
+               /* we are not interested in the dma_addr returned by
+                * xen_dma_map_page, only in the potential cache flushes 
executed
+                * by the function. */
+@@ -555,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct 
scatterlist *sgl,
+               dma_addr_t dev_addr = xen_phys_to_bus(paddr);
+ 
+               if (swiotlb_force ||
++                  xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), 
PFN_DOWN(dev_addr)) ||
+                   !dma_capable(hwdev, dev_addr, sg->length) ||
+                   range_straddles_page_boundary(paddr, sg->length)) {
+                       phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
+index 64b29f7f6b4c..dc482ffff659 100644
+--- a/fs/gfs2/quota.c
++++ b/fs/gfs2/quota.c
+@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
+ 
+ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
+                            s64 change, struct gfs2_quota_data *qd,
+-                           struct fs_disk_quota *fdq)
++                           struct qc_dqblk *fdq)
+ {
+       struct inode *inode = &ip->i_inode;
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, 
loff_t loc,
+       be64_add_cpu(&q.qu_value, change);
+       qd->qd_qb.qb_value = q.qu_value;
+       if (fdq) {
+-              if (fdq->d_fieldmask & FS_DQ_BSOFT) {
+-                      q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> 
sdp->sd_fsb2bb_shift);
++              if (fdq->d_fieldmask & QC_SPC_SOFT) {
++                      q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> 
sdp->sd_sb.sb_bsize_shift);
+                       qd->qd_qb.qb_warn = q.qu_warn;
+               }
+-              if (fdq->d_fieldmask & FS_DQ_BHARD) {
+-                      q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> 
sdp->sd_fsb2bb_shift);
++              if (fdq->d_fieldmask & QC_SPC_HARD) {
++                      q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> 
sdp->sd_sb.sb_bsize_shift);
+                       qd->qd_qb.qb_limit = q.qu_limit;
+               }
+-              if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
+-                      q.qu_value = cpu_to_be64(fdq->d_bcount >> 
sdp->sd_fsb2bb_shift);
++              if (fdq->d_fieldmask & QC_SPACE) {
++                      q.qu_value = cpu_to_be64(fdq->d_space >> 
sdp->sd_sb.sb_bsize_shift);
+                       qd->qd_qb.qb_value = q.qu_value;
+               }
+       }
+@@ -1502,7 +1502,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
+ }
+ 
+ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
+-                        struct fs_disk_quota *fdq)
++                        struct qc_dqblk *fdq)
+ {
+       struct gfs2_sbd *sdp = sb->s_fs_info;
+       struct gfs2_quota_lvb *qlvb;
+@@ -1510,7 +1510,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct 
kqid qid,
+       struct gfs2_holder q_gh;
+       int error;
+ 
+-      memset(fdq, 0, sizeof(struct fs_disk_quota));
++      memset(fdq, 0, sizeof(*fdq));
+ 
+       if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+               return -ESRCH; /* Crazy XFS error code */
+@@ -1527,12 +1527,9 @@ static int gfs2_get_dqblk(struct super_block *sb, 
struct kqid qid,
+               goto out;
+ 
+       qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
+-      fdq->d_version = FS_DQUOT_VERSION;
+-      fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
+-      fdq->d_id = from_kqid_munged(current_user_ns(), qid);
+-      fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << 
sdp->sd_fsb2bb_shift;
+-      fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << 
sdp->sd_fsb2bb_shift;
+-      fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
++      fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << 
sdp->sd_sb.sb_bsize_shift;
++      fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << 
sdp->sd_sb.sb_bsize_shift;
++      fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
+ 
+       gfs2_glock_dq_uninit(&q_gh);
+ out:
+@@ -1541,10 +1538,10 @@ out:
+ }
+ 
+ /* GFS2 only supports a subset of the XFS fields */
+-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
++#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
+ 
+ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
+-                        struct fs_disk_quota *fdq)
++                        struct qc_dqblk *fdq)
+ {
+       struct gfs2_sbd *sdp = sb->s_fs_info;
+       struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+@@ -1588,17 +1585,17 @@ static int gfs2_set_dqblk(struct super_block *sb, 
struct kqid qid,
+               goto out_i;
+ 
+       /* If nothing has changed, this is a no-op */
+-      if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
+-          ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == 
be64_to_cpu(qd->qd_qb.qb_warn)))
+-              fdq->d_fieldmask ^= FS_DQ_BSOFT;
++      if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
++          ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == 
be64_to_cpu(qd->qd_qb.qb_warn)))
++              fdq->d_fieldmask ^= QC_SPC_SOFT;
+ 
+-      if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
+-          ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == 
be64_to_cpu(qd->qd_qb.qb_limit)))
+-              fdq->d_fieldmask ^= FS_DQ_BHARD;
++      if ((fdq->d_fieldmask & QC_SPC_HARD) &&
++          ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == 
be64_to_cpu(qd->qd_qb.qb_limit)))
++              fdq->d_fieldmask ^= QC_SPC_HARD;
+ 
+-      if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
+-          ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == 
be64_to_cpu(qd->qd_qb.qb_value)))
+-              fdq->d_fieldmask ^= FS_DQ_BCOUNT;
++      if ((fdq->d_fieldmask & QC_SPACE) &&
++          ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == 
be64_to_cpu(qd->qd_qb.qb_value)))
++              fdq->d_fieldmask ^= QC_SPACE;
+ 
+       if (fdq->d_fieldmask == 0)
+               goto out_i;
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 10bf07280f4a..294692ff83b1 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct 
nfs_direct_req *dreq,
+  */
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, 
loff_t pos)
+ {
++      struct inode *inode = iocb->ki_filp->f_mapping->host;
++
++      /* we only support swap file calling nfs_direct_IO */
++      if (!IS_SWAPFILE(inode))
++              return 0;
++
+ #ifndef CONFIG_NFS_SWAP
+       dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
+                       iocb->ki_filp, (long long) pos, iter->nr_segs);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 9588873d4c46..368a6b72290c 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -640,7 +640,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
+                       prev = pos;
+ 
+                       status = nfs_wait_client_init_complete(pos);
+-                      if (status == 0) {
++                      if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
+                               nfs4_schedule_lease_recovery(pos);
+                               status = nfs4_wait_clnt_recover(pos);
+                       }
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 6b4527216a7f..9340228aff6e 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2391,30 +2391,25 @@ static inline qsize_t stoqb(qsize_t space)
+ }
+ 
+ /* Generic routine for getting common part of quota structure */
+-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
++static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ {
+       struct mem_dqblk *dm = &dquot->dq_dqb;
+ 
+       memset(di, 0, sizeof(*di));
+-      di->d_version = FS_DQUOT_VERSION;
+-      di->d_flags = dquot->dq_id.type == USRQUOTA ?
+-                      FS_USER_QUOTA : FS_GROUP_QUOTA;
+-      di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
+-
+       spin_lock(&dq_data_lock);
+-      di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
+-      di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
++      di->d_spc_hardlimit = dm->dqb_bhardlimit;
++      di->d_spc_softlimit = dm->dqb_bsoftlimit;
+       di->d_ino_hardlimit = dm->dqb_ihardlimit;
+       di->d_ino_softlimit = dm->dqb_isoftlimit;
+-      di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
+-      di->d_icount = dm->dqb_curinodes;
+-      di->d_btimer = dm->dqb_btime;
+-      di->d_itimer = dm->dqb_itime;
++      di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
++      di->d_ino_count = dm->dqb_curinodes;
++      di->d_spc_timer = dm->dqb_btime;
++      di->d_ino_timer = dm->dqb_itime;
+       spin_unlock(&dq_data_lock);
+ }
+ 
+ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
+-                  struct fs_disk_quota *di)
++                  struct qc_dqblk *di)
+ {
+       struct dquot *dquot;
+ 
+@@ -2428,70 +2423,70 @@ int dquot_get_dqblk(struct super_block *sb, struct 
kqid qid,
+ }
+ EXPORT_SYMBOL(dquot_get_dqblk);
+ 
+-#define VFS_FS_DQ_MASK \
+-      (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
+-       FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
+-       FS_DQ_BTIMER | FS_DQ_ITIMER)
++#define VFS_QC_MASK \
++      (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
++       QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
++       QC_SPC_TIMER | QC_INO_TIMER)
+ 
+ /* Generic routine for setting common part of quota structure */
+-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
++static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ {
+       struct mem_dqblk *dm = &dquot->dq_dqb;
+       int check_blim = 0, check_ilim = 0;
+       struct mem_dqinfo *dqi = 
&sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
+ 
+-      if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
++      if (di->d_fieldmask & ~VFS_QC_MASK)
+               return -EINVAL;
+ 
+-      if (((di->d_fieldmask & FS_DQ_BSOFT) &&
+-           (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
+-          ((di->d_fieldmask & FS_DQ_BHARD) &&
+-           (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
+-          ((di->d_fieldmask & FS_DQ_ISOFT) &&
++      if (((di->d_fieldmask & QC_SPC_SOFT) &&
++           stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
++          ((di->d_fieldmask & QC_SPC_HARD) &&
++           stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
++          ((di->d_fieldmask & QC_INO_SOFT) &&
+            (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+-          ((di->d_fieldmask & FS_DQ_IHARD) &&
++          ((di->d_fieldmask & QC_INO_HARD) &&
+            (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+               return -ERANGE;
+ 
+       spin_lock(&dq_data_lock);
+-      if (di->d_fieldmask & FS_DQ_BCOUNT) {
+-              dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
++      if (di->d_fieldmask & QC_SPACE) {
++              dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
+               check_blim = 1;
+               set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+       }
+ 
+-      if (di->d_fieldmask & FS_DQ_BSOFT)
+-              dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
+-      if (di->d_fieldmask & FS_DQ_BHARD)
+-              dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
+-      if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
++      if (di->d_fieldmask & QC_SPC_SOFT)
++              dm->dqb_bsoftlimit = di->d_spc_softlimit;
++      if (di->d_fieldmask & QC_SPC_HARD)
++              dm->dqb_bhardlimit = di->d_spc_hardlimit;
++      if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
+               check_blim = 1;
+               set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+       }
+ 
+-      if (di->d_fieldmask & FS_DQ_ICOUNT) {
+-              dm->dqb_curinodes = di->d_icount;
++      if (di->d_fieldmask & QC_INO_COUNT) {
++              dm->dqb_curinodes = di->d_ino_count;
+               check_ilim = 1;
+               set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+       }
+ 
+-      if (di->d_fieldmask & FS_DQ_ISOFT)
++      if (di->d_fieldmask & QC_INO_SOFT)
+               dm->dqb_isoftlimit = di->d_ino_softlimit;
+-      if (di->d_fieldmask & FS_DQ_IHARD)
++      if (di->d_fieldmask & QC_INO_HARD)
+               dm->dqb_ihardlimit = di->d_ino_hardlimit;
+-      if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
++      if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
+               check_ilim = 1;
+               set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+       }
+ 
+-      if (di->d_fieldmask & FS_DQ_BTIMER) {
+-              dm->dqb_btime = di->d_btimer;
++      if (di->d_fieldmask & QC_SPC_TIMER) {
++              dm->dqb_btime = di->d_spc_timer;
+               check_blim = 1;
+               set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+       }
+ 
+-      if (di->d_fieldmask & FS_DQ_ITIMER) {
+-              dm->dqb_itime = di->d_itimer;
++      if (di->d_fieldmask & QC_INO_TIMER) {
++              dm->dqb_itime = di->d_ino_timer;
+               check_ilim = 1;
+               set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+       }
+@@ -2501,7 +2496,7 @@ static int do_set_dqblk(struct dquot *dquot, struct 
fs_disk_quota *di)
+                   dm->dqb_curspace < dm->dqb_bsoftlimit) {
+                       dm->dqb_btime = 0;
+                       clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+-              } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
++              } else if (!(di->d_fieldmask & QC_SPC_TIMER))
+                       /* Set grace only if user hasn't provided his own... */
+                       dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
+       }
+@@ -2510,7 +2505,7 @@ static int do_set_dqblk(struct dquot *dquot, struct 
fs_disk_quota *di)
+                   dm->dqb_curinodes < dm->dqb_isoftlimit) {
+                       dm->dqb_itime = 0;
+                       clear_bit(DQ_INODES_B, &dquot->dq_flags);
+-              } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
++              } else if (!(di->d_fieldmask & QC_INO_TIMER))
+                       /* Set grace only if user hasn't provided his own... */
+                       dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
+       }
+@@ -2526,7 +2521,7 @@ static int do_set_dqblk(struct dquot *dquot, struct 
fs_disk_quota *di)
+ }
+ 
+ int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
+-                struct fs_disk_quota *di)
++                struct qc_dqblk *di)
+ {
+       struct dquot *dquot;
+       int rc;
+diff --git a/fs/quota/quota.c b/fs/quota/quota.c
+index 75621649dbd7..2ce66201c366 100644
+--- a/fs/quota/quota.c
++++ b/fs/quota/quota.c
+@@ -115,17 +115,27 @@ static int quota_setinfo(struct super_block *sb, int 
type, void __user *addr)
+       return sb->s_qcop->set_info(sb, type, &info);
+ }
+ 
+-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
++static inline qsize_t qbtos(qsize_t blocks)
++{
++      return blocks << QIF_DQBLKSIZE_BITS;
++}
++
++static inline qsize_t stoqb(qsize_t space)
++{
++      return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
++}
++
++static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
+ {
+       memset(dst, 0, sizeof(*dst));
+-      dst->dqb_bhardlimit = src->d_blk_hardlimit;
+-      dst->dqb_bsoftlimit = src->d_blk_softlimit;
+-      dst->dqb_curspace = src->d_bcount;
++      dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
++      dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
++      dst->dqb_curspace = src->d_space;
+       dst->dqb_ihardlimit = src->d_ino_hardlimit;
+       dst->dqb_isoftlimit = src->d_ino_softlimit;
+-      dst->dqb_curinodes = src->d_icount;
+-      dst->dqb_btime = src->d_btimer;
+-      dst->dqb_itime = src->d_itimer;
++      dst->dqb_curinodes = src->d_ino_count;
++      dst->dqb_btime = src->d_spc_timer;
++      dst->dqb_itime = src->d_ino_timer;
+       dst->dqb_valid = QIF_ALL;
+ }
+ 
+@@ -133,7 +143,7 @@ static int quota_getquota(struct super_block *sb, int 
type, qid_t id,
+                         void __user *addr)
+ {
+       struct kqid qid;
+-      struct fs_disk_quota fdq;
++      struct qc_dqblk fdq;
+       struct if_dqblk idq;
+       int ret;
+ 
+@@ -151,36 +161,36 @@ static int quota_getquota(struct super_block *sb, int 
type, qid_t id,
+       return 0;
+ }
+ 
+-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk 
*src)
++static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
+ {
+-      dst->d_blk_hardlimit = src->dqb_bhardlimit;
+-      dst->d_blk_softlimit  = src->dqb_bsoftlimit;
+-      dst->d_bcount = src->dqb_curspace;
++      dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
++      dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
++      dst->d_space = src->dqb_curspace;
+       dst->d_ino_hardlimit = src->dqb_ihardlimit;
+       dst->d_ino_softlimit = src->dqb_isoftlimit;
+-      dst->d_icount = src->dqb_curinodes;
+-      dst->d_btimer = src->dqb_btime;
+-      dst->d_itimer = src->dqb_itime;
++      dst->d_ino_count = src->dqb_curinodes;
++      dst->d_spc_timer = src->dqb_btime;
++      dst->d_ino_timer = src->dqb_itime;
+ 
+       dst->d_fieldmask = 0;
+       if (src->dqb_valid & QIF_BLIMITS)
+-              dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
++              dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
+       if (src->dqb_valid & QIF_SPACE)
+-              dst->d_fieldmask |= FS_DQ_BCOUNT;
++              dst->d_fieldmask |= QC_SPACE;
+       if (src->dqb_valid & QIF_ILIMITS)
+-              dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
++              dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
+       if (src->dqb_valid & QIF_INODES)
+-              dst->d_fieldmask |= FS_DQ_ICOUNT;
++              dst->d_fieldmask |= QC_INO_COUNT;
+       if (src->dqb_valid & QIF_BTIME)
+-              dst->d_fieldmask |= FS_DQ_BTIMER;
++              dst->d_fieldmask |= QC_SPC_TIMER;
+       if (src->dqb_valid & QIF_ITIME)
+-              dst->d_fieldmask |= FS_DQ_ITIMER;
++              dst->d_fieldmask |= QC_INO_TIMER;
+ }
+ 
+ static int quota_setquota(struct super_block *sb, int type, qid_t id,
+                         void __user *addr)
+ {
+-      struct fs_disk_quota fdq;
++      struct qc_dqblk fdq;
+       struct if_dqblk idq;
+       struct kqid qid;
+ 
+@@ -244,10 +254,78 @@ static int quota_getxstatev(struct super_block *sb, void 
__user *addr)
+       return ret;
+ }
+ 
++/*
++ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
++ * out of there as xfsprogs rely on definitions being in that header file. So
++ * just define same functions here for quota purposes.
++ */
++#define XFS_BB_SHIFT 9
++
++static inline u64 quota_bbtob(u64 blocks)
++{
++      return blocks << XFS_BB_SHIFT;
++}
++
++static inline u64 quota_btobb(u64 bytes)
++{
++      return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
++}
++
++static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota 
*src)
++{
++      dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
++      dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
++      dst->d_ino_hardlimit = src->d_ino_hardlimit;
++      dst->d_ino_softlimit = src->d_ino_softlimit;
++      dst->d_space = quota_bbtob(src->d_bcount);
++      dst->d_ino_count = src->d_icount;
++      dst->d_ino_timer = src->d_itimer;
++      dst->d_spc_timer = src->d_btimer;
++      dst->d_ino_warns = src->d_iwarns;
++      dst->d_spc_warns = src->d_bwarns;
++      dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
++      dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
++      dst->d_rt_space = quota_bbtob(src->d_rtbcount);
++      dst->d_rt_spc_timer = src->d_rtbtimer;
++      dst->d_rt_spc_warns = src->d_rtbwarns;
++      dst->d_fieldmask = 0;
++      if (src->d_fieldmask & FS_DQ_ISOFT)
++              dst->d_fieldmask |= QC_INO_SOFT;
++      if (src->d_fieldmask & FS_DQ_IHARD)
++              dst->d_fieldmask |= QC_INO_HARD;
++      if (src->d_fieldmask & FS_DQ_BSOFT)
++              dst->d_fieldmask |= QC_SPC_SOFT;
++      if (src->d_fieldmask & FS_DQ_BHARD)
++              dst->d_fieldmask |= QC_SPC_HARD;
++      if (src->d_fieldmask & FS_DQ_RTBSOFT)
++              dst->d_fieldmask |= QC_RT_SPC_SOFT;
++      if (src->d_fieldmask & FS_DQ_RTBHARD)
++              dst->d_fieldmask |= QC_RT_SPC_HARD;
++      if (src->d_fieldmask & FS_DQ_BTIMER)
++              dst->d_fieldmask |= QC_SPC_TIMER;
++      if (src->d_fieldmask & FS_DQ_ITIMER)
++              dst->d_fieldmask |= QC_INO_TIMER;
++      if (src->d_fieldmask & FS_DQ_RTBTIMER)
++              dst->d_fieldmask |= QC_RT_SPC_TIMER;
++      if (src->d_fieldmask & FS_DQ_BWARNS)
++              dst->d_fieldmask |= QC_SPC_WARNS;
++      if (src->d_fieldmask & FS_DQ_IWARNS)
++              dst->d_fieldmask |= QC_INO_WARNS;
++      if (src->d_fieldmask & FS_DQ_RTBWARNS)
++              dst->d_fieldmask |= QC_RT_SPC_WARNS;
++      if (src->d_fieldmask & FS_DQ_BCOUNT)
++              dst->d_fieldmask |= QC_SPACE;
++      if (src->d_fieldmask & FS_DQ_ICOUNT)
++              dst->d_fieldmask |= QC_INO_COUNT;
++      if (src->d_fieldmask & FS_DQ_RTBCOUNT)
++              dst->d_fieldmask |= QC_RT_SPACE;
++}
++
+ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
+                          void __user *addr)
+ {
+       struct fs_disk_quota fdq;
++      struct qc_dqblk qdq;
+       struct kqid qid;
+ 
+       if (copy_from_user(&fdq, addr, sizeof(fdq)))
+@@ -257,13 +335,44 @@ static int quota_setxquota(struct super_block *sb, int 
type, qid_t id,
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+-      return sb->s_qcop->set_dqblk(sb, qid, &fdq);
++      copy_from_xfs_dqblk(&qdq, &fdq);
++      return sb->s_qcop->set_dqblk(sb, qid, &qdq);
++}
++
++static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
++                            int type, qid_t id)
++{
++      memset(dst, 0, sizeof(*dst));
++      dst->d_version = FS_DQUOT_VERSION;
++      dst->d_id = id;
++      if (type == USRQUOTA)
++              dst->d_flags = FS_USER_QUOTA;
++      else if (type == PRJQUOTA)
++              dst->d_flags = FS_PROJ_QUOTA;
++      else
++              dst->d_flags = FS_GROUP_QUOTA;
++      dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
++      dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
++      dst->d_ino_hardlimit = src->d_ino_hardlimit;
++      dst->d_ino_softlimit = src->d_ino_softlimit;
++      dst->d_bcount = quota_btobb(src->d_space);
++      dst->d_icount = src->d_ino_count;
++      dst->d_itimer = src->d_ino_timer;
++      dst->d_btimer = src->d_spc_timer;
++      dst->d_iwarns = src->d_ino_warns;
++      dst->d_bwarns = src->d_spc_warns;
++      dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
++      dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
++      dst->d_rtbcount = quota_btobb(src->d_rt_space);
++      dst->d_rtbtimer = src->d_rt_spc_timer;
++      dst->d_rtbwarns = src->d_rt_spc_warns;
+ }
+ 
+ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
+                          void __user *addr)
+ {
+       struct fs_disk_quota fdq;
++      struct qc_dqblk qdq;
+       struct kqid qid;
+       int ret;
+ 
+@@ -272,8 +381,11 @@ static int quota_getxquota(struct super_block *sb, int 
type, qid_t id,
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+-      ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
+-      if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
++      ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
++      if (ret)
++              return ret;
++      copy_to_xfs_dqblk(&fdq, &qdq, type, id);
++      if (copy_to_user(addr, &fdq, sizeof(fdq)))
+               return -EFAULT;
+       return ret;
+ }
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index bb15771b92ae..08f3555fbeac 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -224,7 +224,7 @@ out:
+ static int udf_release_file(struct inode *inode, struct file *filp)
+ {
+       if (filp->f_mode & FMODE_WRITE &&
+-          atomic_read(&inode->i_writecount) > 1) {
++          atomic_read(&inode->i_writecount) == 1) {
+               /*
+                * Grab i_mutex to avoid races with writes changing i_size
+                * while we are running.
+diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
+index 3a07a937e232..41f6c0b9d51c 100644
+--- a/fs/xfs/xfs_qm.h
++++ b/fs/xfs/xfs_qm.h
+@@ -166,9 +166,9 @@ extern void                xfs_qm_dqrele_all_inodes(struct 
xfs_mount *, uint);
+ /* quota ops */
+ extern int            xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
+ extern int            xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
+-                                      uint, struct fs_disk_quota *);
++                                      uint, struct qc_dqblk *);
+ extern int            xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, 
uint,
+-                                      struct fs_disk_quota *);
++                                      struct qc_dqblk *);
+ extern int            xfs_qm_scall_getqstat(struct xfs_mount *,
+                                       struct fs_quota_stat *);
+ extern int            xfs_qm_scall_getqstatv(struct xfs_mount *,
+diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
+index 80f2d77d929a..327f85abbea8 100644
+--- a/fs/xfs/xfs_qm_syscalls.c
++++ b/fs/xfs/xfs_qm_syscalls.c
+@@ -40,7 +40,6 @@ STATIC int   xfs_qm_log_quotaoff(xfs_mount_t *, 
xfs_qoff_logitem_t **, uint);
+ STATIC int    xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
+                                       uint);
+ STATIC uint   xfs_qm_export_flags(uint);
+-STATIC uint   xfs_qm_export_qtype_flags(uint);
+ 
+ /*
+  * Turn off quota accounting and/or enforcement for all udquots and/or
+@@ -574,8 +573,8 @@ xfs_qm_scall_getqstatv(
+       return 0;
+ }
+ 
+-#define XFS_DQ_MASK \
+-      (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
++#define XFS_QC_MASK \
++      (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
+ 
+ /*
+  * Adjust quota limits, and start/stop timers accordingly.
+@@ -585,7 +584,7 @@ xfs_qm_scall_setqlim(
+       struct xfs_mount        *mp,
+       xfs_dqid_t              id,
+       uint                    type,
+-      fs_disk_quota_t         *newlim)
++      struct qc_dqblk         *newlim)
+ {
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_disk_dquot   *ddq;
+@@ -594,9 +593,9 @@ xfs_qm_scall_setqlim(
+       int                     error;
+       xfs_qcnt_t              hard, soft;
+ 
+-      if (newlim->d_fieldmask & ~XFS_DQ_MASK)
++      if (newlim->d_fieldmask & ~XFS_QC_MASK)
+               return -EINVAL;
+-      if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
++      if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
+               return 0;
+ 
+       /*
+@@ -634,11 +633,11 @@ xfs_qm_scall_setqlim(
+       /*
+        * Make sure that hardlimits are >= soft limits before changing.
+        */
+-      hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
+-              (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
++      hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
++              (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
+                       be64_to_cpu(ddq->d_blk_hardlimit);
+-      soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
+-              (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
++      soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
++              (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
+                       be64_to_cpu(ddq->d_blk_softlimit);
+       if (hard == 0 || hard >= soft) {
+               ddq->d_blk_hardlimit = cpu_to_be64(hard);
+@@ -651,11 +650,11 @@ xfs_qm_scall_setqlim(
+       } else {
+               xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
+       }
+-      hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
+-              (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
++      hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
++              (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
+                       be64_to_cpu(ddq->d_rtb_hardlimit);
+-      soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
+-              (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
++      soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
++              (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
+                       be64_to_cpu(ddq->d_rtb_softlimit);
+       if (hard == 0 || hard >= soft) {
+               ddq->d_rtb_hardlimit = cpu_to_be64(hard);
+@@ -668,10 +667,10 @@ xfs_qm_scall_setqlim(
+               xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
+       }
+ 
+-      hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
++      hard = (newlim->d_fieldmask & QC_INO_HARD) ?
+               (xfs_qcnt_t) newlim->d_ino_hardlimit :
+                       be64_to_cpu(ddq->d_ino_hardlimit);
+-      soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
++      soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
+               (xfs_qcnt_t) newlim->d_ino_softlimit :
+                       be64_to_cpu(ddq->d_ino_softlimit);
+       if (hard == 0 || hard >= soft) {
+@@ -688,12 +687,12 @@ xfs_qm_scall_setqlim(
+       /*
+        * Update warnings counter(s) if requested
+        */
+-      if (newlim->d_fieldmask & FS_DQ_BWARNS)
+-              ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
+-      if (newlim->d_fieldmask & FS_DQ_IWARNS)
+-              ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
+-      if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
+-              ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
++      if (newlim->d_fieldmask & QC_SPC_WARNS)
++              ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
++      if (newlim->d_fieldmask & QC_INO_WARNS)
++              ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
++      if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
++              ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
+ 
+       if (id == 0) {
+               /*
+@@ -703,24 +702,24 @@ xfs_qm_scall_setqlim(
+                * soft and hard limit values (already done, above), and
+                * for warnings.
+                */
+-              if (newlim->d_fieldmask & FS_DQ_BTIMER) {
+-                      q->qi_btimelimit = newlim->d_btimer;
+-                      ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
++              if (newlim->d_fieldmask & QC_SPC_TIMER) {
++                      q->qi_btimelimit = newlim->d_spc_timer;
++                      ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
+               }
+-              if (newlim->d_fieldmask & FS_DQ_ITIMER) {
+-                      q->qi_itimelimit = newlim->d_itimer;
+-                      ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
++              if (newlim->d_fieldmask & QC_INO_TIMER) {
++                      q->qi_itimelimit = newlim->d_ino_timer;
++                      ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
+               }
+-              if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
+-                      q->qi_rtbtimelimit = newlim->d_rtbtimer;
+-                      ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
++              if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
++                      q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
++                      ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
+               }
+-              if (newlim->d_fieldmask & FS_DQ_BWARNS)
+-                      q->qi_bwarnlimit = newlim->d_bwarns;
+-              if (newlim->d_fieldmask & FS_DQ_IWARNS)
+-                      q->qi_iwarnlimit = newlim->d_iwarns;
+-              if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
+-                      q->qi_rtbwarnlimit = newlim->d_rtbwarns;
++              if (newlim->d_fieldmask & QC_SPC_WARNS)
++                      q->qi_bwarnlimit = newlim->d_spc_warns;
++              if (newlim->d_fieldmask & QC_INO_WARNS)
++                      q->qi_iwarnlimit = newlim->d_ino_warns;
++              if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
++                      q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
+       } else {
+               /*
+                * If the user is now over quota, start the timelimit.
+@@ -831,7 +830,7 @@ xfs_qm_scall_getquota(
+       struct xfs_mount        *mp,
+       xfs_dqid_t              id,
+       uint                    type,
+-      struct fs_disk_quota    *dst)
++      struct qc_dqblk         *dst)
+ {
+       struct xfs_dquot        *dqp;
+       int                     error;
+@@ -855,28 +854,25 @@ xfs_qm_scall_getquota(
+       }
+ 
+       memset(dst, 0, sizeof(*dst));
+-      dst->d_version = FS_DQUOT_VERSION;
+-      dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
+-      dst->d_id = be32_to_cpu(dqp->q_core.d_id);
+-      dst->d_blk_hardlimit =
+-              XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+-      dst->d_blk_softlimit =
+-              XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
++      dst->d_spc_hardlimit =
++              XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
++      dst->d_spc_softlimit =
++              XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+       dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
+       dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
+-      dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
+-      dst->d_icount = dqp->q_res_icount;
+-      dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
+-      dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
+-      dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
+-      dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
+-      dst->d_rtb_hardlimit =
+-              XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+-      dst->d_rtb_softlimit =
+-              XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+-      dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
+-      dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+-      dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
++      dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
++      dst->d_ino_count = dqp->q_res_icount;
++      dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
++      dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
++      dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
++      dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
++      dst->d_rt_spc_hardlimit =
++              XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
++      dst->d_rt_spc_softlimit =
++              XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
++      dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
++      dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
++      dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+ 
+       /*
+        * Internally, we don't reset all the timers when quota enforcement
+@@ -889,23 +885,23 @@ xfs_qm_scall_getquota(
+            dqp->q_core.d_flags == XFS_DQ_GROUP) ||
+           (!XFS_IS_PQUOTA_ENFORCED(mp) &&
+            dqp->q_core.d_flags == XFS_DQ_PROJ)) {
+-              dst->d_btimer = 0;
+-              dst->d_itimer = 0;
+-              dst->d_rtbtimer = 0;
++              dst->d_spc_timer = 0;
++              dst->d_ino_timer = 0;
++              dst->d_rt_spc_timer = 0;
+       }
+ 
+ #ifdef DEBUG
+-      if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
+-           (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
+-           (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
+-          dst->d_id != 0) {
+-              if ((dst->d_bcount > dst->d_blk_softlimit) &&
+-                  (dst->d_blk_softlimit > 0)) {
+-                      ASSERT(dst->d_btimer != 0);
++      if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
++           (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
++           (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
++          id != 0) {
++              if ((dst->d_space > dst->d_spc_softlimit) &&
++                  (dst->d_spc_softlimit > 0)) {
++                      ASSERT(dst->d_spc_timer != 0);
+               }
+-              if ((dst->d_icount > dst->d_ino_softlimit) &&
++              if ((dst->d_ino_count > dst->d_ino_softlimit) &&
+                   (dst->d_ino_softlimit > 0)) {
+-                      ASSERT(dst->d_itimer != 0);
++                      ASSERT(dst->d_ino_timer != 0);
+               }
+       }
+ #endif
+@@ -915,26 +911,6 @@ out_put:
+ }
+ 
+ STATIC uint
+-xfs_qm_export_qtype_flags(
+-      uint flags)
+-{
+-      /*
+-       * Can't be more than one, or none.
+-       */
+-      ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
+-              (FS_PROJ_QUOTA | FS_USER_QUOTA));
+-      ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
+-              (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
+-      ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
+-              (FS_USER_QUOTA | FS_GROUP_QUOTA));
+-      ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
+-
+-      return (flags & XFS_DQ_USER) ?
+-              FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
+-                      FS_PROJ_QUOTA : FS_GROUP_QUOTA;
+-}
+-
+-STATIC uint
+ xfs_qm_export_flags(
+       uint flags)
+ {
+diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
+index b238027df987..320c814bb9a5 100644
+--- a/fs/xfs/xfs_quotaops.c
++++ b/fs/xfs/xfs_quotaops.c
+@@ -133,7 +133,7 @@ STATIC int
+ xfs_fs_get_dqblk(
+       struct super_block      *sb,
+       struct kqid             qid,
+-      struct fs_disk_quota    *fdq)
++      struct qc_dqblk         *qdq)
+ {
+       struct xfs_mount        *mp = XFS_M(sb);
+ 
+@@ -143,14 +143,14 @@ xfs_fs_get_dqblk(
+               return -ESRCH;
+ 
+       return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
+-                                    xfs_quota_type(qid.type), fdq);
++                                    xfs_quota_type(qid.type), qdq);
+ }
+ 
+ STATIC int
+ xfs_fs_set_dqblk(
+       struct super_block      *sb,
+       struct kqid             qid,
+-      struct fs_disk_quota    *fdq)
++      struct qc_dqblk         *qdq)
+ {
+       struct xfs_mount        *mp = XFS_M(sb);
+ 
+@@ -162,7 +162,7 @@ xfs_fs_set_dqblk(
+               return -ESRCH;
+ 
+       return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
+-                                   xfs_quota_type(qid.type), fdq);
++                                   xfs_quota_type(qid.type), qdq);
+ }
+ 
+ const struct quotactl_ops xfs_quotactl_operations = {
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 5ab2da9811c1..86a977bf4f79 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1054,6 +1054,7 @@ static inline int page_mapped(struct page *page)
+ #define VM_FAULT_WRITE        0x0008  /* Special case for get_user_pages */
+ #define VM_FAULT_HWPOISON 0x0010      /* Hit poisoned small page */
+ #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index 
encoded in upper bits */
++#define VM_FAULT_SIGSEGV 0x0040
+ 
+ #define VM_FAULT_NOPAGE       0x0100  /* ->fault installed the pte, not 
return page */
+ #define VM_FAULT_LOCKED       0x0200  /* ->fault locked the returned page */
+@@ -1062,8 +1063,9 @@ static inline int page_mapped(struct page *page)
+ 
+ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large 
hwpoison */
+ 
+-#define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | 
VM_FAULT_HWPOISON | \
+-                       VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
++#define VM_FAULT_ERROR        (VM_FAULT_OOM | VM_FAULT_SIGBUS | 
VM_FAULT_SIGSEGV | \
++                       VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
++                       VM_FAULT_FALLBACK)
+ 
+ /* Encode hstate index for a hwpoisoned large page */
+ #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 80d345a3524c..224fb8154f8f 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -316,6 +316,49 @@ struct dquot_operations {
+ 
+ struct path;
+ 
++/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
++struct qc_dqblk {
++      int d_fieldmask;        /* mask of fields to change in ->set_dqblk() */
++      u64 d_spc_hardlimit;    /* absolute limit on used space */
++      u64 d_spc_softlimit;    /* preferred limit on used space */
++      u64 d_ino_hardlimit;    /* maximum # allocated inodes */
++      u64 d_ino_softlimit;    /* preferred inode limit */
++      u64 d_space;            /* Space owned by the user */
++      u64 d_ino_count;        /* # inodes owned by the user */
++      s64 d_ino_timer;        /* zero if within inode limits */
++                              /* if not, we refuse service */
++      s64 d_spc_timer;        /* similar to above; for space */
++      int d_ino_warns;        /* # warnings issued wrt num inodes */
++      int d_spc_warns;        /* # warnings issued wrt used space */
++      u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
++      u64 d_rt_spc_softlimit; /* preferred limit on RT space */
++      u64 d_rt_space;         /* realtime space owned */
++      s64 d_rt_spc_timer;     /* similar to above; for RT space */
++      int d_rt_spc_warns;     /* # warnings issued wrt RT space */
++};
++
++/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
++#define       QC_INO_SOFT     (1<<0)
++#define       QC_INO_HARD     (1<<1)
++#define       QC_SPC_SOFT     (1<<2)
++#define       QC_SPC_HARD     (1<<3)
++#define       QC_RT_SPC_SOFT  (1<<4)
++#define       QC_RT_SPC_HARD  (1<<5)
++#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD 
| \
++                     QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
++#define       QC_SPC_TIMER    (1<<6)
++#define       QC_INO_TIMER    (1<<7)
++#define       QC_RT_SPC_TIMER (1<<8)
++#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
++#define       QC_SPC_WARNS    (1<<9)
++#define       QC_INO_WARNS    (1<<10)
++#define       QC_RT_SPC_WARNS (1<<11)
++#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
++#define       QC_SPACE        (1<<12)
++#define       QC_INO_COUNT    (1<<13)
++#define       QC_RT_SPACE     (1<<14)
++#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
++
+ /* Operations handling requests from userspace */
+ struct quotactl_ops {
+       int (*quota_on)(struct super_block *, int, int, struct path *);
+@@ -324,8 +367,8 @@ struct quotactl_ops {
+       int (*quota_sync)(struct super_block *, int);
+       int (*get_info)(struct super_block *, int, struct if_dqinfo *);
+       int (*set_info)(struct super_block *, int, struct if_dqinfo *);
+-      int (*get_dqblk)(struct super_block *, struct kqid, struct 
fs_disk_quota *);
+-      int (*set_dqblk)(struct super_block *, struct kqid, struct 
fs_disk_quota *);
++      int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
++      int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
+       int (*set_xstate)(struct super_block *, unsigned int, int);
+       int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index 1d3eee594cd6..bfaf7138d5ee 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
+ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
+ int dquot_get_dqblk(struct super_block *sb, struct kqid id,
+-              struct fs_disk_quota *di);
++              struct qc_dqblk *di);
+ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
+-              struct fs_disk_quota *di);
++              struct qc_dqblk *di);
+ 
+ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
+ int dquot_transfer(struct inode *inode, struct iattr *iattr);
+diff --git a/mm/gup.c b/mm/gup.c
+index cd62c8c90d4a..a0d57ec05510 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct 
vm_area_struct *vma,
+                       return -ENOMEM;
+               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+                       return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
+-              if (ret & VM_FAULT_SIGBUS)
++              if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+                       return -EFAULT;
+               BUG();
+       }
+@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct 
mm_struct *mm,
+                       return -ENOMEM;
+               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+                       return -EHWPOISON;
+-              if (ret & VM_FAULT_SIGBUS)
++              if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+                       return -EFAULT;
+               BUG();
+       }
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 6b2e337bc03c..a0ed043a1096 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned 
long addr)
+               else
+                       ret = VM_FAULT_WRITE;
+               put_page(page);
+-      } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
++      } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | 
VM_FAULT_OOM)));
+       /*
+        * We must loop because handle_mm_fault() may back out if there's
+        * any difficulty e.g. if pte accessed bit gets updated concurrently.
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d6ac0e33e150..4918b6eefae2 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1638,9 +1638,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 
struct task_struct *p)
+ 
+       pr_info("Task in ");
+       pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
+-      pr_info(" killed as a result of limit of ");
++      pr_cont(" killed as a result of limit of ");
+       pr_cont_cgroup_path(memcg->css.cgroup);
+-      pr_info("\n");
++      pr_cont("\n");
+ 
+       rcu_read_unlock();
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 7f86cf6252bd..d442584fd281 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2645,7 +2645,7 @@ static int do_anonymous_page(struct mm_struct *mm, 
struct vm_area_struct *vma,
+ 
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
+-              return VM_FAULT_SIGBUS;
++              return VM_FAULT_SIGSEGV;
+ 
+       /* Use the zero-page for reads */
+       if (!(flags & FAULT_FLAG_WRITE)) {
+diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
+index 4c5192e0d66c..4a95fe3cffbc 100644
+--- a/net/mac80211/pm.c
++++ b/net/mac80211/pm.c
+@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct 
cfg80211_wowlan *wowlan)
+               }
+       }
+ 
+-      /* tear down aggregation sessions and remove STAs */
+-      mutex_lock(&local->sta_mtx);
+-      list_for_each_entry(sta, &local->sta_list, list) {
+-              if (sta->uploaded) {
+-                      enum ieee80211_sta_state state;
+-
+-                      state = sta->sta_state;
+-                      for (; state > IEEE80211_STA_NOTEXIST; state--)
+-                              WARN_ON(drv_sta_state(local, sta->sdata, sta,
+-                                                    state, state - 1));
+-              }
+-      }
+-      mutex_unlock(&local->sta_mtx);
+-
+       /* remove all interfaces that were created in the driver */
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               if (!ieee80211_sdata_running(sdata))
+@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct 
cfg80211_wowlan *wowlan)
+               case NL80211_IFTYPE_STATION:
+                       ieee80211_mgd_quiesce(sdata);
+                       break;
++              case NL80211_IFTYPE_WDS:
++                      /* tear down aggregation sessions and remove STAs */
++                      mutex_lock(&local->sta_mtx);
++                      sta = sdata->u.wds.sta;
++                      if (sta && sta->uploaded) {
++                              enum ieee80211_sta_state state;
++
++                              state = sta->sta_state;
++                              for (; state > IEEE80211_STA_NOTEXIST; state--)
++                                      WARN_ON(drv_sta_state(local, sta->sdata,
++                                                            sta, state,
++                                                            state - 1));
++                      }
++                      mutex_unlock(&local->sta_mtx);
++                      break;
+               default:
+                       break;
+               }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index e60da9a062c2..7d6379bd2cb8 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -235,7 +235,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local 
*local,
+       else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
+               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+       else if (rate)
+-              channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
++              channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
+       else
+               channel_flags |= IEEE80211_CHAN_2GHZ;
+       put_unaligned_le16(channel_flags, pos);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index ea558e07981f..213048ad31c7 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2805,6 +2805,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct 
genl_info *info)
+       if (!rdev->ops->get_key)
+               return -EOPNOTSUPP;
+ 
++      if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
++              return -ENOENT;
++
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+@@ -2824,10 +2827,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct 
genl_info *info)
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+               goto nla_put_failure;
+ 
+-      if (pairwise && mac_addr &&
+-          !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+-              return -ENOENT;
+-
+       err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
+                          get_key_callback);
+ 
+@@ -2998,7 +2997,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct 
genl_info *info)
+       wdev_lock(dev->ieee80211_ptr);
+       err = nl80211_key_allowed(dev->ieee80211_ptr);
+ 
+-      if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
++      if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
+           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               err = -ENOENT;
+ 
+diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
+index ec667f158f19..5d905d90d504 100644
+--- a/sound/core/seq/seq_dummy.c
++++ b/sound/core/seq/seq_dummy.c
+@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
+ static int my_client = -1;
+ 
+ /*
+- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
+- * to subscribers.
+- * Note: this callback is called only after all subscribers are removed.
+- */
+-static int
+-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
+-{
+-      struct snd_seq_dummy_port *p;
+-      int i;
+-      struct snd_seq_event ev;
+-
+-      p = private_data;
+-      memset(&ev, 0, sizeof(ev));
+-      if (p->duplex)
+-              ev.source.port = p->connect;
+-      else
+-              ev.source.port = p->port;
+-      ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
+-      ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
+-      for (i = 0; i < 16; i++) {
+-              ev.data.control.channel = i;
+-              ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
+-              snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-              ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
+-              snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-      }
+-      return 0;
+-}
+-
+-/*
+  * event input callback - just redirect events to subscribers
+  */
+ static int
+@@ -175,7 +145,6 @@ create_port(int idx, int type)
+               | SNDRV_SEQ_PORT_TYPE_PORT;
+       memset(&pcb, 0, sizeof(pcb));
+       pcb.owner = THIS_MODULE;
+-      pcb.unuse = dummy_unuse;
+       pcb.event_input = dummy_input;
+       pcb.private_free = dummy_free;
+       pcb.private_data = rec;
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 0c8aefab404c..640c99198cda 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
+ static const char * const pcm512x_dsp_program_texts[] = {
+       "FIR interpolation with de-emphasis",
+       "Low latency IIR with de-emphasis",
+-      "Fixed process flow",
+       "High attenuation with de-emphasis",
++      "Fixed process flow",
+       "Ringing-less low latency FIR",
+ };
+ 
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 4dc4e85116cd..641f940c138d 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -555,7 +555,7 @@ static struct {
+       { 22050, 2 },
+       { 24000, 2 },
+       { 16000, 3 },
+-      { 11250, 4 },
++      { 11025, 4 },
+       { 12000, 4 },
+       {  8000, 5 },
+ };
+diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
+index 91a550f4a10d..5e793bbb6b02 100644
+--- a/sound/soc/fsl/fsl_esai.h
++++ b/sound/soc/fsl/fsl_esai.h
+@@ -302,7 +302,7 @@
+ #define ESAI_xCCR_xFP_MASK    (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << 
ESAI_xCCR_xFP_SHIFT)
+ #define ESAI_xCCR_xFP(v)      ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & 
ESAI_xCCR_xFP_MASK)
+ #define ESAI_xCCR_xDC_SHIFT     9
+-#define ESAI_xCCR_xDC_WIDTH   4
++#define ESAI_xCCR_xDC_WIDTH   5
+ #define ESAI_xCCR_xDC_MASK    (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << 
ESAI_xCCR_xDC_SHIFT)
+ #define ESAI_xCCR_xDC(v)      ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & 
ESAI_xCCR_xDC_MASK)
+ #define ESAI_xCCR_xPSR_SHIFT  8
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index d1b7293c133e..c6a6693bbfc9 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -453,9 +453,8 @@ static int asoc_simple_card_parse_of(struct device_node 
*node,
+ }
+ 
+ /* Decrease the reference count of the device nodes */
+-static int asoc_simple_card_unref(struct platform_device *pdev)
++static int asoc_simple_card_unref(struct snd_soc_card *card)
+ {
+-      struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct snd_soc_dai_link *dai_link;
+       struct device_node *np;
+       int num_links;
+@@ -562,7 +561,7 @@ static int asoc_simple_card_probe(struct platform_device 
*pdev)
+               return ret;
+ 
+ err:
+-      asoc_simple_card_unref(pdev);
++      asoc_simple_card_unref(&priv->snd_card);
+       return ret;
+ }
+ 
+@@ -578,7 +577,7 @@ static int asoc_simple_card_remove(struct platform_device 
*pdev)
+               snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
+                                       &simple_card_mic_jack_gpio);
+ 
+-      return asoc_simple_card_unref(pdev);
++      return asoc_simple_card_unref(card);
+ }
+ 
+ static const struct of_device_id asoc_simple_of_match[] = {
+diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
+index bd3ef2a88be0..aafc0686ab22 100644
+--- a/sound/soc/omap/omap-mcbsp.c
++++ b/sound/soc/omap/omap-mcbsp.c
+@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai 
*cpu_dai,
+       case SND_SOC_DAIFMT_CBM_CFS:
+               /* McBSP slave. FS clock as output */
+               regs->srgr2     |= FSGM;
+-              regs->pcr0      |= FSXM;
++              regs->pcr0      |= FSXM | FSRM;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFM:
+               /* McBSP slave */
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index cecfab3cc948..08e430d664cd 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -666,7 +666,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int 
num)
+                       rtd->dai_link->stream_name);
+ 
+               ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
+-                              1, 0, &be_pcm);
++                              rtd->dai_link->dpcm_playback,
++                              rtd->dai_link->dpcm_capture, &be_pcm);
+               if (ret < 0) {
+                       dev_err(rtd->card->dev, "ASoC: can't create compressed 
for %s\n",
+                               rtd->dai_link->name);
+@@ -675,8 +676,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int 
num)
+ 
+               rtd->pcm = be_pcm;
+               rtd->fe_compr = 1;
+-              
be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+-              
be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
++              if (rtd->dai_link->dpcm_playback)
++                      
be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
++              else if (rtd->dai_link->dpcm_capture)
++                      
be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+               memcpy(compr->ops, &soc_compr_dyn_ops, 
sizeof(soc_compr_dyn_ops));
+       } else
+               memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));

Reply via email to