diff --git a/Makefile b/Makefile
index 320663d..bf1df55 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 4
-SUBLEVEL = 22
+SUBLEVEL = 23
 EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e14ae11..7fe19a3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -579,6 +579,7 @@ config ARCH_KIRKWOOD
        bool "Marvell Kirkwood"
        select CPU_FEROCEON
        select PCI
+       select PCI_QUIRKS
        select ARCH_REQUIRE_GPIOLIB
        select GENERIC_CLOCKEVENTS
        select NEED_MACH_IO_H
diff --git a/arch/arm/mach-dove/include/mach/pm.h 
b/arch/arm/mach-dove/include/mach/pm.h
index 3ad9f94..11799c3 100644
--- a/arch/arm/mach-dove/include/mach/pm.h
+++ b/arch/arm/mach-dove/include/mach/pm.h
@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
 
 static inline int irq_to_pmu(int irq)
 {
-       if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
+       if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
                return irq - IRQ_DOVE_PMU_START;
 
        return -EINVAL;
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index f07fd16..9f2fd10 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
        int pin = irq_to_pmu(d->irq);
        u32 u;
 
+       /*
+        * The PMU mask register is not RW0C: it is RW.  This means that
+        * the bits take whatever value is written to them; if you write
+        * a '1', you will set the interrupt.
+        *
+        * Unfortunately this means there is NO race free way to clear
+        * these interrupts.
+        *
+        * So, let's structure the code so that the window is as small as
+        * possible.
+        */
        u = ~(1 << (pin & 31));
-       writel(u, PMU_INTERRUPT_CAUSE);
+       u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
+       writel_relaxed(u, PMU_INTERRUPT_CAUSE);
 }
 
 static struct irq_chip pmu_irq_chip = {
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index f56a011..c46d20e 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -212,14 +212,19 @@ static int __init kirkwood_pcie_setup(int nr, struct 
pci_sys_data *sys)
        return 1;
 }
 
+/*
+ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
+ * is operating as a root complex this needs to be switched to
+ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
+ * the device. Decoding setup is handled by the orion code.
+ */
 static void __devinit rc_pci_fixup(struct pci_dev *dev)
 {
-       /*
-        * Prevent enumeration of root complex.
-        */
        if (dev->bus->parent == NULL && dev->devfn == 0) {
                int i;
 
+               dev->class &= 0xff;
+               dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                        dev->resource[i].start = 0;
                        dev->resource[i].end   = 0;
diff --git a/arch/x86/include/asm/fpu-internal.h 
b/arch/x86/include/asm/fpu-internal.h
index 4fa8815..92e05b6 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(struct task_struct 
*tsk)
 typedef struct { int preload; } fpu_switch_t;
 
 /*
- * FIXME! We could do a totally lazy restore, but we need to
- * add a per-cpu "this was the task that last touched the FPU
- * on this CPU" variable, and the task needs to have a "I last
- * touched the FPU on this CPU" and check them.
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
  *
- * We don't do that yet, so "fpu_lazy_restore()" always returns
- * false, but some day..
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
  */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+       per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
 {
        return new == percpu_read_stable(fpu_owner_task) &&
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6e1e406..849cdcf 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -66,6 +66,8 @@
 #include <asm/mwait.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
@@ -851,6 +853,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
 
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
+       /* the FPU context is blank, nobody can own it */
+       __cpu_disable_lazy_restore(cpu);
+
        err = do_boot_cpu(apicid, cpu);
        if (err) {
                pr_debug("do_boot_cpu failed %d\n", err);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bbac51e..4a2c131 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -407,6 +407,7 @@ static void acpi_processor_notify(struct acpi_device 
*device, u32 event)
                acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), 
event, 0);
+               break;
        default:
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Unsupported event [0x%x]\n", event));
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3b..f4059e9 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = {
        [0]  = "Memory Write error on non-redundant retry or "
               "FBD configuration Write error on retry",
 };
-#define GET_FBD_FAT_IDX(fbderr)        (fbderr & (3 << 28))
-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
+#define GET_FBD_FAT_IDX(fbderr)        (((fbderr) >> 28) & 3)
+#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
 
 #define FERR_NF_FBD    0xa0
 static const char *ferr_nf_fbd_name[] = {
@@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = {
        [1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
        [0]  = "Uncorrectable Data ECC on Replay",
 };
-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
+#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
                              (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
                              (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
@@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info 
*mci)
                errnum = find_first_bit(&errors,
                                        ARRAY_SIZE(ferr_nf_fbd_name));
                specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
-               branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
+               branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
 
                pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
                        REDMEMA, &syndrome);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c 
b/drivers/gpu/drm/i915/intel_lvds.c
index 4ff7d5f..802fec2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -785,6 +785,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Gigabyte GA-D525TUD",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., 
Ltd."),
+                       DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+               },
+       },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Supermicro X7SPA-H",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+               },
+       },
 
        { }     /* terminating entry */
 };
diff --git a/drivers/gpu/drm/radeon/evergreen.c 
b/drivers/gpu/drm/radeon/evergreen.c
index e5328da..4a1d8f3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -37,6 +37,16 @@
 #define EVERGREEN_PFP_UCODE_SIZE 1120
 #define EVERGREEN_PM4_UCODE_SIZE 1376
 
+static const u32 crtc_offsets[6] =
+{
+       EVERGREEN_CRTC0_REGISTER_OFFSET,
+       EVERGREEN_CRTC1_REGISTER_OFFSET,
+       EVERGREEN_CRTC2_REGISTER_OFFSET,
+       EVERGREEN_CRTC3_REGISTER_OFFSET,
+       EVERGREEN_CRTC4_REGISTER_OFFSET,
+       EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
 void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
@@ -101,17 +111,19 @@ void evergreen_fix_pci_max_read_req_size(struct 
radeon_device *rdev)
 
 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
 {
-       struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
        int i;
 
-       if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & 
EVERGREEN_CRTC_MASTER_EN) {
+       if (crtc >= rdev->num_crtc)
+               return;
+
+       if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & 
EVERGREEN_CRTC_MASTER_EN) {
                for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (!(RREG32(EVERGREEN_CRTC_STATUS + 
radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
+                       if (!(RREG32(EVERGREEN_CRTC_STATUS + 
crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
                                break;
                        udelay(1);
                }
                for (i = 0; i < rdev->usec_timeout; i++) {
-                       if (RREG32(EVERGREEN_CRTC_STATUS + 
radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
+                       if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) 
& EVERGREEN_CRTC_V_BLANK)
                                break;
                        udelay(1);
                }
@@ -1117,116 +1129,105 @@ void evergreen_agp_enable(struct radeon_device *rdev)
 
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save 
*save)
 {
+       u32 crtc_enabled, tmp, frame_count, blackout;
+       int i, j;
+
        save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 
-       /* Stop all video */
+       /* disable VGA render */
        WREG32(VGA_RENDER_CONTROL, 0);
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
-       }
-       WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_CONTROL + 
EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_CONTROL + 
EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_CONTROL + 
EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_CONTROL + 
EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
-       }
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
-       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + 
EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+       /* blank the display controllers */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) 
& EVERGREEN_CRTC_MASTER_EN;
+               if (crtc_enabled) {
+                       save->crtc_enabled[i] = true;
+                       if (ASIC_IS_DCE6(rdev)) {
+                               tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + 
crtc_offsets[i]);
+                               if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+                                       radeon_wait_for_vblank(rdev, i);
+                                       tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                                       WREG32(EVERGREEN_CRTC_BLANK_CONTROL + 
crtc_offsets[i], tmp);
+                               }
+                       } else {
+                               tmp = RREG32(EVERGREEN_CRTC_CONTROL + 
crtc_offsets[i]);
+                               if (!(tmp & 
EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+                                       radeon_wait_for_vblank(rdev, i);
+                                       tmp |= 
EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                                       WREG32(EVERGREEN_CRTC_CONTROL + 
crtc_offsets[i], tmp);
+                               }
+                       }
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != 
frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
        }
 
-       WREG32(D1VGA_CONTROL, 0);
-       WREG32(D2VGA_CONTROL, 0);
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_D3VGA_CONTROL, 0);
-               WREG32(EVERGREEN_D4VGA_CONTROL, 0);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_D5VGA_CONTROL, 0);
-               WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+       radeon_mc_wait_for_idle(rdev);
+
+       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+               /* Block CPU access */
+               WREG32(BIF_FB_EN, 0);
+               /* blackout the MC */
+               blackout &= ~BLACKOUT_MODE_MASK;
+               WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
        }
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save 
*save)
 {
-       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC0_REGISTER_OFFSET,
-              upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC0_REGISTER_OFFSET,
-              upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC0_REGISTER_OFFSET,
-              (u32)rdev->mc.vram_start);
-       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC0_REGISTER_OFFSET,
-              (u32)rdev->mc.vram_start);
-
-       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC1_REGISTER_OFFSET,
-              upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC1_REGISTER_OFFSET,
-              upper_32_bits(rdev->mc.vram_start));
-       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC1_REGISTER_OFFSET,
-              (u32)rdev->mc.vram_start);
-       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC1_REGISTER_OFFSET,
-              (u32)rdev->mc.vram_start);
-
-       if (rdev->num_crtc >= 4) {
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC2_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC2_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC2_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC2_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
-
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC3_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC3_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC3_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC3_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
-       }
-       if (rdev->num_crtc >= 6) {
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC4_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC4_REGISTER_OFFSET,
-                      upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC4_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC4_REGISTER_OFFSET,
-                      (u32)rdev->mc.vram_start);
+       u32 tmp, frame_count;
+       int i, j;
 
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC5_REGISTER_OFFSET,
+       /* update crtc base addresses */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + 
crtc_offsets[i],
                       upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
EVERGREEN_CRTC5_REGISTER_OFFSET,
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + 
crtc_offsets[i],
                       upper_32_bits(rdev->mc.vram_start));
-               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC5_REGISTER_OFFSET,
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
                       (u32)rdev->mc.vram_start);
-               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
EVERGREEN_CRTC5_REGISTER_OFFSET,
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + 
crtc_offsets[i],
                       (u32)rdev->mc.vram_start);
        }
-
        WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, 
upper_32_bits(rdev->mc.vram_start));
        WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
-       /* Unlock host access */
+
+       /* unblackout the MC */
+       tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       tmp &= ~BLACKOUT_MODE_MASK;
+       WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+       /* allow CPU access */
+       WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled) {
+                       if (ASIC_IS_DCE6(rdev)) {
+                               tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + 
crtc_offsets[i]);
+                               tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                               WREG32(EVERGREEN_CRTC_BLANK_CONTROL + 
crtc_offsets[i], tmp);
+                       } else {
+                               tmp = RREG32(EVERGREEN_CRTC_CONTROL + 
crtc_offsets[i]);
+                               tmp &= 
~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                               WREG32(EVERGREEN_CRTC_CONTROL + 
crtc_offsets[i], tmp);
+                       }
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != 
frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+       /* Unlock vga access */
        WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
        WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h 
b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3..34a0e85 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -218,6 +218,8 @@
 #define EVERGREEN_CRTC_CONTROL                          0x6e70
 #       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
 #       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x6e74
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
 #define EVERGREEN_CRTC_STATUS                           0x6e8c
 #       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
diff --git a/drivers/gpu/drm/radeon/evergreend.h 
b/drivers/gpu/drm/radeon/evergreend.h
index 2eaaea0..81e744f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -77,6 +77,10 @@
 
 #define        CONFIG_MEMSIZE                                  0x5428
 
+#define        BIF_FB_EN                                               0x5490
+#define                FB_READ_EN                                      (1 << 0)
+#define                FB_WRITE_EN                                     (1 << 1)
+
 #define        CP_STRMOUT_CNTL                                 0x84FC
 
 #define        CP_COHER_CNTL                                   0x85F0
@@ -200,6 +204,9 @@
 #define                NOOFCHAN_MASK                                   
0x00003000
 #define MC_SHARED_CHREMAP                                      0x2008
 
+#define MC_SHARED_BLACKOUT_CNTL                        0x20ac
+#define                BLACKOUT_MODE_MASK                      0x00000007
+
 #define        MC_ARB_RAMCFG                                   0x2760
 #define                NOOFBANK_SHIFT                                  0
 #define                NOOFBANK_MASK                                   
0x00000003
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h 
b/drivers/gpu/drm/radeon/radeon_asic.h
index 665df87..917e49c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -400,6 +400,7 @@ void r700_cp_fini(struct radeon_device *rdev);
 struct evergreen_mc_save {
        u32 vga_render_control;
        u32 vga_hdp_control;
+       bool crtc_enabled[RADEON_MAX_CRTCS];
 };
 
 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a2b5304..6137d00 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1182,18 +1182,21 @@ retry_write:
                        blocked_rdev = rrdev;
                        break;
                }
+               if (rdev && (test_bit(Faulty, &rdev->flags)
+                            || test_bit(Unmerged, &rdev->flags)))
+                       rdev = NULL;
                if (rrdev && (test_bit(Faulty, &rrdev->flags)
                              || test_bit(Unmerged, &rrdev->flags)))
                        rrdev = NULL;
 
                r10_bio->devs[i].bio = NULL;
                r10_bio->devs[i].repl_bio = NULL;
-               if (!rdev || test_bit(Faulty, &rdev->flags) ||
-                   test_bit(Unmerged, &rdev->flags)) {
+
+               if (!rdev && !rrdev) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
                        continue;
                }
-               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+               if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
                        sector_t first_bad;
                        sector_t dev_sector = r10_bio->devs[i].addr;
                        int bad_sectors;
@@ -1235,8 +1238,10 @@ retry_write:
                                        max_sectors = good_sectors;
                        }
                }
-               r10_bio->devs[i].bio = bio;
-               atomic_inc(&rdev->nr_pending);
+               if (rdev) {
+                       r10_bio->devs[i].bio = bio;
+                       atomic_inc(&rdev->nr_pending);
+               }
                if (rrdev) {
                        r10_bio->devs[i].repl_bio = bio;
                        atomic_inc(&rrdev->nr_pending);
@@ -1292,51 +1297,52 @@ retry_write:
        for (i = 0; i < conf->copies; i++) {
                struct bio *mbio;
                int d = r10_bio->devs[i].devnum;
-               if (!r10_bio->devs[i].bio)
-                       continue;
-
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].bio = mbio;
-
-               mbio->bi_sector = (r10_bio->devs[i].addr+
-                                  conf->mirrors[d].rdev->data_offset);
-               mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua;
-               mbio->bi_private = r10_bio;
-
-               atomic_inc(&r10_bio->remaining);
-               spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-
-               if (!r10_bio->devs[i].repl_bio)
-                       continue;
+               if (r10_bio->devs[i].bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].rdev;
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                                          rdev->data_offset);
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua;
+                       mbio->bi_private = r10_bio;
 
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].repl_bio = mbio;
+                       atomic_inc(&r10_bio->remaining);
+                       spin_lock_irqsave(&conf->device_lock, flags);
+                       bio_list_add(&conf->pending_bio_list, mbio);
+                       conf->pending_count++;
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+               }
 
-               /* We are actively writing to the original device
-                * so it cannot disappear, so the replacement cannot
-                * become NULL here
-                */
-               mbio->bi_sector = (r10_bio->devs[i].addr+
-                                  conf->mirrors[d].replacement->data_offset);
-               mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua;
-               mbio->bi_private = r10_bio;
+               if (r10_bio->devs[i].repl_bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].replacement;
+                       if (rdev == NULL) {
+                               /* Replacement just got moved to main 'rdev' */
+                               smp_mb();
+                               rdev = conf->mirrors[d].rdev;
+                       }
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].repl_bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                                          rdev->data_offset);
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua;
+                       mbio->bi_private = r10_bio;
 
-               atomic_inc(&r10_bio->remaining);
-               spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
-               spin_unlock_irqrestore(&conf->device_lock, flags);
+                       atomic_inc(&r10_bio->remaining);
+                       spin_lock_irqsave(&conf->device_lock, flags);
+                       bio_list_add(&conf->pending_bio_list, mbio);
+                       conf->pending_count++;
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+               }
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6af3101..b8e7f3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9131,10 +9131,13 @@ static int __devinit bnx2x_prev_unload_common(struct 
bnx2x *bp)
  */
 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-       if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-               BNX2X_ERR("was error bit was found to be set in pglueb upon 
startup. Clearing");
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+       if (!CHIP_IS_E1x(bp)) {
+               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+                       BNX2X_ERR("was error bit was found to be set in pglueb 
upon startup. Clearing");
+                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                              1 << BP_FUNC(bp));
+               }
        }
 }
 
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7f6a23f..d16dae2 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t 
isect,
        return bio;
 }
 
-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
                                      sector_t isect, struct page *page,
                                      struct pnfs_block_extent *be,
                                      void (*end_io)(struct bio *, int err),
-                                     struct parallel_io *par)
+                                     struct parallel_io *par,
+                                     unsigned int offset, int len)
 {
+       isect = isect + (offset >> SECTOR_SHIFT);
+       dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
+               npg, rw, (unsigned long long)isect, offset, len);
 retry:
        if (!bio) {
                bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
                if (!bio)
                        return ERR_PTR(-ENOMEM);
        }
-       if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+       if (bio_add_page(bio, page, len, offset) < len) {
                bio = bl_submit_bio(rw, bio);
                goto retry;
        }
        return bio;
 }
 
+static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+                                     sector_t isect, struct page *page,
+                                     struct pnfs_block_extent *be,
+                                     void (*end_io)(struct bio *, int err),
+                                     struct parallel_io *par)
+{
+       return do_add_page_to_bio(bio, npg, rw, isect, page, be,
+                                 end_io, par, 0, PAGE_CACHE_SIZE);
+}
+
 /* This is basically copied from mpage_end_io_read */
 static void bl_end_io_read(struct bio *bio, int err)
 {
@@ -443,6 +457,107 @@ map_block(struct buffer_head *bh, sector_t isect, struct 
pnfs_block_extent *be)
        return;
 }
 
+static void
+bl_read_single_end_io(struct bio *bio, int error)
+{
+       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct page *page = bvec->bv_page;
+
+       /* Only one page in bvec */
+       unlock_page(page);
+}
+
+static int
+bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
+                   unsigned int offset, unsigned int len)
+{
+       struct bio *bio;
+       struct page *shadow_page;
+       sector_t isect;
+       char *kaddr, *kshadow_addr;
+       int ret = 0;
+
+       dprintk("%s: offset %u len %u\n", __func__, offset, len);
+
+       shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       if (shadow_page == NULL)
+               return -ENOMEM;
+
+       bio = bio_alloc(GFP_NOIO, 1);
+       if (bio == NULL)
+               return -ENOMEM;
+
+       isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
+               (offset / SECTOR_SIZE);
+
+       bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+       bio->bi_bdev = be->be_mdev;
+       bio->bi_end_io = bl_read_single_end_io;
+
+       lock_page(shadow_page);
+       if (bio_add_page(bio, shadow_page,
+                        SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
+               unlock_page(shadow_page);
+               bio_put(bio);
+               return -EIO;
+       }
+
+       submit_bio(READ, bio);
+       wait_on_page_locked(shadow_page);
+       if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
+               ret = -EIO;
+       } else {
+               kaddr = kmap_atomic(page);
+               kshadow_addr = kmap_atomic(shadow_page);
+               memcpy(kaddr + offset, kshadow_addr + offset, len);
+               kunmap_atomic(kshadow_addr);
+               kunmap_atomic(kaddr);
+       }
+       __free_page(shadow_page);
+       bio_put(bio);
+
+       return ret;
+}
+
+static int
+bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
+                         unsigned int dirty_offset, unsigned int dirty_len,
+                         bool full_page)
+{
+       int ret = 0;
+       unsigned int start, end;
+
+       if (full_page) {
+               start = 0;
+               end = PAGE_CACHE_SIZE;
+       } else {
+               start = round_down(dirty_offset, SECTOR_SIZE);
+               end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
+       }
+
+       dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
+       if (!be) {
+               zero_user_segments(page, start, dirty_offset,
+                                  dirty_offset + dirty_len, end);
+               if (start == 0 && end == PAGE_CACHE_SIZE &&
+                   trylock_page(page)) {
+                       SetPageUptodate(page);
+                       unlock_page(page);
+               }
+               return ret;
+       }
+
+       if (start != dirty_offset)
+               ret = bl_do_readpage_sync(page, be, start,
+                                         dirty_offset - start);
+
+       if (!ret && (dirty_offset + dirty_len < end))
+               ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
+                                         end - dirty_offset - dirty_len);
+
+       return ret;
+}
+
 /* Given an unmapped page, zero it or read in page for COW, page is locked
  * by caller.
  */
@@ -476,7 +591,6 @@ init_page_for_write(struct page *page, struct 
pnfs_block_extent *cow_read)
        SetPageUptodate(page);
 
 cleanup:
-       bl_put_extent(cow_read);
        if (bh)
                free_buffer_head(bh);
        if (ret) {
@@ -547,6 +661,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
        struct parallel_io *par;
        loff_t offset = wdata->args.offset;
        size_t count = wdata->args.count;
+       unsigned int pg_offset, pg_len, saved_len;
        struct page **pages = wdata->args.pages;
        struct page *page;
        pgoff_t index;
@@ -651,10 +766,11 @@ next_page:
                if (!extent_length) {
                        /* We've used up the previous extent */
                        bl_put_extent(be);
+                       bl_put_extent(cow_read);
                        bio = bl_submit_bio(WRITE, bio);
                        /* Get the next one */
                        be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
-                                            isect, NULL);
+                                               isect, &cow_read);
                        if (!be || !is_writable(be, isect)) {
                                wdata->pnfs_error = -EINVAL;
                                goto out;
@@ -671,7 +787,26 @@ next_page:
                        extent_length = be->be_length -
                            (isect - be->be_f_offset);
                }
-               if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+
+               dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
+               pg_offset = offset & ~PAGE_CACHE_MASK;
+               if (pg_offset + count > PAGE_CACHE_SIZE)
+                       pg_len = PAGE_CACHE_SIZE - pg_offset;
+               else
+                       pg_len = count;
+
+               saved_len = pg_len;
+               if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
+                   !bl_is_sector_init(be->be_inval, isect)) {
+                       ret = bl_read_partial_page_sync(pages[i], cow_read,
+                                               pg_offset, pg_len, true);
+                       if (ret) {
+                               dprintk("%s bl_read_partial_page_sync fail 
%d\n",
+                                       __func__, ret);
+                               wdata->pnfs_error = ret;
+                               goto out;
+                       }
+
                        ret = bl_mark_sectors_init(be->be_inval, isect,
                                                       PAGE_CACHE_SECTORS);
                        if (unlikely(ret)) {
@@ -680,15 +815,33 @@ next_page:
                                wdata->pnfs_error = ret;
                                goto out;
                        }
+
+                       /* Expand to full page write */
+                       pg_offset = 0;
+                       pg_len = PAGE_CACHE_SIZE;
+               } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
+                           (pg_len & (SECTOR_SIZE - 1))) {
+                       /* ahh, nasty case. We have to do sync full sector
+                        * read-modify-write cycles.
+                        */
+                       unsigned int saved_offset = pg_offset;
+                       ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
+                                                       pg_len, false);
+                       pg_offset = round_down(pg_offset, SECTOR_SIZE);
+                       pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
+                                - pg_offset;
                }
-               bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+               bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
                                         isect, pages[i], be,
-                                        bl_end_io_write, par);
+                                        bl_end_io_write, par,
+                                        pg_offset, pg_len);
                if (IS_ERR(bio)) {
                        wdata->pnfs_error = PTR_ERR(bio);
                        bio = NULL;
                        goto out;
                }
+               offset += saved_len;
+               count -= saved_len;
                isect += PAGE_CACHE_SECTORS;
                last_isect = isect;
                extent_length -= PAGE_CACHE_SECTORS;
@@ -706,17 +859,16 @@ next_page:
        }
 
 write_done:
-       wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
-       if (count < wdata->res.count) {
-               wdata->res.count = count;
-       }
+       wdata->res.count = wdata->args.count;
 out:
        bl_put_extent(be);
+       bl_put_extent(cow_read);
        bl_submit_bio(WRITE, bio);
        put_parallel(par);
        return PNFS_ATTEMPTED;
 out_mds:
        bl_put_extent(be);
+       bl_put_extent(cow_read);
        kfree(par);
        return PNFS_NOT_ATTEMPTED;
 }
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 0335069..39bb51a 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -41,6 +41,7 @@
 
 #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
 #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
 
 struct block_mount_id {
        spinlock_t                      bm_lock;    /* protects list */
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 0984a21..15f60d0 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct 
autogroup *ag)
 
        p->signal->autogroup = autogroup_kref_get(ag);
 
-       if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
-               goto out;
-
        t = p;
        do {
                sched_move_task(t);
        } while_each_thread(p, t);
 
-out:
        unlock_task_sighand(p, &flags);
        autogroup_kref_put(prev);
 }
diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
index 8bd0471..443232e 100644
--- a/kernel/sched/auto_group.h
+++ b/kernel/sched/auto_group.h
@@ -4,11 +4,6 @@
 #include <linux/rwsem.h>
 
 struct autogroup {
-       /*
-        * reference doesn't mean how many thread attach to this
-        * autogroup now. It just stands for the number of task
-        * could use this autogroup.
-        */
        struct kref             kref;
        struct task_group       *tg;
        struct rw_semaphore     lock;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 56f793d..bcb9d34 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2040,8 +2040,10 @@ static int rescuer_thread(void *__wq)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop())
+       if (kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
                return 0;
+       }
 
        /*
         * See whether any cpu is asking for help.  Unbounded
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 274c3cc..d86fb20 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1481,9 +1481,17 @@ int soft_offline_page(struct page *page, int flags)
 {
        int ret;
        unsigned long pfn = page_to_pfn(page);
+       struct page *hpage = compound_trans_head(page);
 
        if (PageHuge(page))
                return soft_offline_huge_page(page, flags);
+       if (PageTransHuge(hpage)) {
+               if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
+                       pr_info("soft offline: %#lx: failed to split THP\n",
+                               pfn);
+                       return -EBUSY;
+               }
+       }
 
        ret = get_any_page(page, pfn, flags);
        if (ret < 0)
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d3..290dba2 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -619,7 +619,7 @@ static void __kfree_section_memmap(struct page *memmap, 
unsigned long nr_pages)
 {
        return; /* XXX: Not implemented yet */
 }
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
 }
 #else
@@ -660,10 +660,11 @@ static void __kfree_section_memmap(struct page *memmap, 
unsigned long nr_pages)
                           get_order(sizeof(struct page) * nr_pages));
 }
 
-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
 {
        unsigned long maps_section_nr, removing_section_nr, i;
        unsigned long magic;
+       struct page *page = virt_to_page(memmap);
 
        for (i = 0; i < nr_pages; i++, page++) {
                magic = (unsigned long) page->lru.next;
@@ -712,13 +713,10 @@ static void free_section_usemap(struct page *memmap, 
unsigned long *usemap)
         */
 
        if (memmap) {
-               struct page *memmap_page;
-               memmap_page = virt_to_page(memmap);
-
                nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
                        >> PAGE_SHIFT;
 
-               free_map_bootmem(memmap_page, nr_pages);
+               free_map_bootmem(memmap, nr_pages);
        }
 }
 
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index 8a7b155..d0d748e 100644
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -109,7 +109,7 @@ esac
        if tar --owner=root --group=root --help >/dev/null 2>&1; then
                opts="--owner=root --group=root"
        fi
-       tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
+       tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
 )
 
 echo "Tarball successfully created in ${tarball}${file_ext}"
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to