diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
index aa47be7..397ee05 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1764,6 +1764,11 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
 
        noresidual      [PPC] Don't use residual data on PReP machines.
 
+       nordrand        [X86] Disable the direct use of the RDRAND
+                       instruction even if it is supported by the
+                       processor.  RDRAND is still available to user
+                       space applications.
+
        noresume        [SWSUSP] Disables resume and restores original swap
                        space.
 
diff --git a/Makefile b/Makefile
index 1cb8c1d..82f6dfe 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 0
-SUBLEVEL = 46
+SUBLEVEL = 47
 EXTRAVERSION =
 NAME = Sneaky Weasel
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 157781e..17d179c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1260,6 +1260,16 @@ config PL310_ERRATA_769419
          on systems with an outer cache, the store buffer is drained
          explicitly.
 
+config ARM_ERRATA_775420
+       bool "ARM errata: A data cache maintenance operation which aborts, 
might lead to deadlock"
+       depends on CPU_V7
+       help
+        This option enables the workaround for the 775420 Cortex-A9 (r2p2,
+        r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
+        operation aborts with MMU exception, it might cause the processor
+        to deadlock. This workaround puts DSB before executing ISB if
+        an abort may occur on cache maintenance.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index 3d5fc41..bf53047 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -28,7 +28,7 @@
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
        tst     \tmp, #HWCAP_VFPv3D16
-       ldceq   p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
+       ldceql  p11, cr0, [\base],#32*4             @ FLDMIAD \base!, {d16-d31}
        addne   \base, \base, #32*4                 @ step over unused register 
space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature 
Register 0
@@ -52,7 +52,7 @@
        ldr     \tmp, =elf_hwcap                    @ may not have MVFR regs
        ldr     \tmp, [\tmp, #0]
        tst     \tmp, #HWCAP_VFPv3D16
-       stceq   p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
+       stceql  p11, cr0, [\base],#32*4             @ FSTMIAD \base!, {d16-d31}
        addne   \base, \base, #32*4                 @ step over unused register 
space
 #else
        VFPFMRX \tmp, MVFR0                         @ Media and VFP Feature 
Register 0
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 1ed1fd3..428b243 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
  * isn't mapped, just try the next page.
  */
 9001:
+#ifdef CONFIG_ARM_ERRATA_775420
+       dsb
+#endif
        mov     r12, r12, lsr #12
        mov     r12, r12, lsl #12
        add     r12, r12, #4096
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index f4546e9..23817a6 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, 
unsigned long cmd,
        struct pt_regs *regs = args->regs;
        int trap = (regs->cp0_cause & 0x7c) >> 2;
 
+#ifdef CONFIG_KPROBES
+       /*
+        * Return immediately if the kprobes fault notifier has set
+        * DIE_PAGE_FAULT.
+        */
+       if (cmd == DIE_PAGE_FAULT)
+               return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
        /* Userspace events, ignore. */
        if (user_mode(regs))
                return NOTIFY_DONE;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 37357a5..a0e9bda 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1451,6 +1451,15 @@ config ARCH_USES_PG_UNCACHED
        def_bool y
        depends on X86_PAT
 
+config ARCH_RANDOM
+       def_bool y
+       prompt "x86 architectural random number generator" if EXPERT
+       ---help---
+         Enable the x86 architectural RDRAND instruction
+         (Intel Bull Mountain technology) to generate random numbers.
+         If supported, this is a high bandwidth, cryptographically
+         secure hardware random number generator.
+
 config EFI
        bool "EFI runtime service support"
        depends on ACPI
diff --git a/arch/x86/include/asm/archrandom.h 
b/arch/x86/include/asm/archrandom.h
new file mode 100644
index 0000000..0d9ec77
--- /dev/null
+++ b/arch/x86/include/asm/archrandom.h
@@ -0,0 +1,75 @@
+/*
+ * This file is part of the Linux kernel.
+ *
+ * Copyright (c) 2011, Intel Corporation
+ * Authors: Fenghua Yu <fenghua...@intel.com>,
+ *          H. Peter Anvin <h...@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef ASM_X86_ARCHRANDOM_H
+#define ASM_X86_ARCHRANDOM_H
+
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
+#include <asm/nops.h>
+
+#define RDRAND_RETRY_LOOPS     10
+
+#define RDRAND_INT     ".byte 0x0f,0xc7,0xf0"
+#ifdef CONFIG_X86_64
+# define RDRAND_LONG   ".byte 0x48,0x0f,0xc7,0xf0"
+#else
+# define RDRAND_LONG   RDRAND_INT
+#endif
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#define GET_RANDOM(name, type, rdrand, nop)                    \
+static inline int name(type *v)                                        \
+{                                                              \
+       int ok;                                                 \
+       alternative_io("movl $0, %0\n\t"                        \
+                      nop,                                     \
+                      "\n1: " rdrand "\n\t"                    \
+                      "jc 2f\n\t"                              \
+                      "decl %0\n\t"                            \
+                      "jnz 1b\n\t"                             \
+                      "2:",                                    \
+                      X86_FEATURE_RDRAND,                      \
+                      ASM_OUTPUT2("=r" (ok), "=a" (*v)),       \
+                      "0" (RDRAND_RETRY_LOOPS));               \
+       return ok;                                              \
+}
+
+#ifdef CONFIG_X86_64
+
+GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
+GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
+
+#else
+
+GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
+GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
+
+#endif /* CONFIG_X86_64 */
+
+#endif  /* CONFIG_ARCH_RANDOM */
+
+extern void x86_init_rdrand(struct cpuinfo_x86 *c);
+
+#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6042981..0e3a82a 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -15,6 +15,7 @@ CFLAGS_common.o               := $(nostackp)
 obj-y                  := intel_cacheinfo.o scattered.o topology.o
 obj-y                  += proc.o capflags.o powerflags.o common.o
 obj-y                  += vmware.o hypervisor.o sched.o mshyperv.o
+obj-y                  += rdrand.o
 
 obj-$(CONFIG_X86_32)   += bugs.o
 obj-$(CONFIG_X86_64)   += bugs_64.o
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0cb2883..1579ab9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -15,6 +15,7 @@
 #include <asm/stackprotector.h>
 #include <asm/perf_event.h>
 #include <asm/mmu_context.h>
+#include <asm/archrandom.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
@@ -852,6 +853,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 #endif
 
        init_hypervisor(c);
+       x86_init_rdrand(c);
 
        /*
         * Clear/Set all flags overriden by options, need do it
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
new file mode 100644
index 0000000..feca286
--- /dev/null
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the Linux kernel.
+ *
+ * Copyright (c) 2011, Intel Corporation
+ * Authors: Fenghua Yu <fenghua...@intel.com>,
+ *          H. Peter Anvin <h...@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/archrandom.h>
+#include <asm/sections.h>
+
+static int __init x86_rdrand_setup(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+       return 1;
+}
+__setup("nordrand", x86_rdrand_setup);
+
+/* We can't use arch_get_random_long() here since alternatives haven't run */
+static inline int rdrand_long(unsigned long *v)
+{
+       int ok;
+       asm volatile("1: " RDRAND_LONG "\n\t"
+                    "jc 2f\n\t"
+                    "decl %0\n\t"
+                    "jnz 1b\n\t"
+                    "2:"
+                    : "=r" (ok), "=a" (*v)
+                    : "0" (RDRAND_RETRY_LOOPS));
+       return ok;
+}
+
+/*
+ * Force a reseed cycle; we are architecturally guaranteed a reseed
+ * after no more than 512 128-bit chunks of random data.  This also
+ * acts as a test of the CPU capability.
+ */
+#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
+
+void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_ARCH_RANDOM
+       unsigned long tmp;
+       int i, count, ok;
+
+       if (!cpu_has(c, X86_FEATURE_RDRAND))
+               return;         /* Nothing to do */
+
+       for (count = i = 0; i < RESEED_LOOP; i++) {
+               ok = rdrand_long(&tmp);
+               if (ok)
+                       count++;
+       }
+
+       if (count != RESEED_LOOP)
+               clear_cpu_cap(c, X86_FEATURE_RDRAND);
+#endif
+}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8385d1d..9f808af 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -803,7 +803,16 @@ static void xen_write_cr4(unsigned long cr4)
 
        native_write_cr4(cr4);
 }
-
+#ifdef CONFIG_X86_64
+static inline unsigned long xen_read_cr8(void)
+{
+       return 0;
+}
+static inline void xen_write_cr8(unsigned long val)
+{
+       BUG_ON(val);
+}
+#endif
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -968,6 +977,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = xen_write_cr4,
 
+#ifdef CONFIG_X86_64
+       .read_cr8 = xen_read_cr8,
+       .write_cr8 = xen_write_cr8,
+#endif
+
        .wbinvd = native_wbinvd,
 
        .read_msr = native_read_msr_safe,
@@ -975,6 +989,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .read_tsc = native_read_tsc,
        .read_pmc = native_read_pmc,
 
+       .read_tscp = native_read_tscp,
+
        .iret = xen_iret,
        .irq_enable_sysexit = xen_sysexit,
 #ifdef CONFIG_X86_64
diff --git a/block/blk-core.c b/block/blk-core.c
index 35ae52d..2f49f43 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -524,7 +524,7 @@ blk_init_allocated_queue(struct request_queue *q, 
request_fn_proc *rfn,
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unprep_rq_fn         = NULL;
-       q->queue_flags          = QUEUE_FLAG_DEFAULT;
+       q->queue_flags          |= QUEUE_FLAG_DEFAULT;
 
        /* Override internal queue lock with supplied lock pointer */
        if (lock)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b19a18d..d2519b2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -71,9 +71,6 @@ enum ec_command {
 #define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
 #define ACPI_EC_MSI_UDELAY     550     /* Wait 550us for MSI EC */
 
-#define ACPI_EC_STORM_THRESHOLD 8      /* number of false interrupts
-                                          per one transaction */
-
 enum {
        EC_FLAGS_QUERY_PENDING,         /* Query is pending */
        EC_FLAGS_GPE_STORM,             /* GPE storm detected */
@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 module_param(ec_delay, uint, 0644);
 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
 
+/*
+ * If the number of false interrupts per one transaction exceeds
+ * this threshold, will think there is a GPE storm happened and
+ * will disable the GPE for normal transaction.
+ */
+static unsigned int ec_storm_threshold  __read_mostly = 8;
+module_param(ec_storm_threshold, uint, 0644);
+MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered 
as GPE storm");
+
 /* If we find an EC via the ECDT, we need to keep a ptr to its context */
 /* External interfaces use first EC only, so remember */
 typedef int (*acpi_ec_query_func) (void *data);
@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct 
transaction *t)
                msleep(1);
                /* It is safe to enable the GPE outside of the transaction. */
                acpi_enable_gpe(NULL, ec->gpe);
-       } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+       } else if (t->irq_count > ec_storm_threshold) {
                pr_info(PREFIX "GPE storm detected, "
                        "transactions will use polling mode\n");
                set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
@@ -914,6 +920,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
        return 0;
 }
 
+/*
+ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
+ * the GPE storm threshold back to 20
+ */
+static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+{
+       pr_debug("Setting the EC GPE storm threshold to 20\n");
+       ec_storm_threshold  = 20;
+       return 0;
+}
+
 static struct dmi_system_id __initdata ec_dmi_table[] = {
        {
        ec_skip_dsdt_scan, "Compal JFL92", {
@@ -945,10 +962,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
        {
        ec_validate_ecdt, "ASUS hardware", {
        DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
+       {
+       ec_enlarge_storm_threshold, "CLEVO hardware", {
+       DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+       DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
        {},
 };
 
-
 int __init acpi_ec_ecdt_probe(void)
 {
        acpi_status status;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index b85ee76..65b9d6f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1019,17 +1019,20 @@ ssize_t tpm_write(struct file *file, const char __user 
*buf,
                  size_t size, loff_t *off)
 {
        struct tpm_chip *chip = file->private_data;
-       size_t in_size = size, out_size;
+       size_t in_size = size;
+       ssize_t out_size;
 
        /* cannot perform a write until the read has cleared
-          either via tpm_read or a user_read_timer timeout */
-       while (atomic_read(&chip->data_pending) != 0)
-               msleep(TPM_TIMEOUT);
-
-       mutex_lock(&chip->buffer_mutex);
+          either via tpm_read or a user_read_timer timeout.
+          This also prevents splitted buffered writes from blocking here.
+       */
+       if (atomic_read(&chip->data_pending) != 0)
+               return -EBUSY;
 
        if (in_size > TPM_BUFSIZE)
-               in_size = TPM_BUFSIZE;
+               return -E2BIG;
+
+       mutex_lock(&chip->buffer_mutex);
 
        if (copy_from_user
            (chip->data_buffer, (void __user *) buf, in_size)) {
@@ -1039,6 +1042,10 @@ ssize_t tpm_write(struct file *file, const char __user 
*buf,
 
        /* atomic tpm command send and result receive */
        out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
+       if (out_size < 0) {
+               mutex_unlock(&chip->buffer_mutex);
+               return out_size;
+       }
 
        atomic_set(&chip->data_pending, out_size);
        mutex_unlock(&chip->buffer_mutex);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4799393..b97d4f0 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union 
ioctl_arg *arg)
        client->bus_reset_closure = a->bus_reset_closure;
        if (a->bus_reset != 0) {
                fill_bus_reset_event(&bus_reset, client);
-               ret = copy_to_user(u64_to_uptr(a->bus_reset),
-                                  &bus_reset, sizeof(bus_reset));
+               /* unaligned size of bus_reset is 36 bytes */
+               ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
        }
        if (ret == 0 && list_empty(&client->link))
                list_add_tail(&client->link, &client->device->client_list);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c 
b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2f46e0c..3ad3cc6 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct 
drm_encoder *encoder,
 static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
-       if (tmds) {
-               if (tmds->i2c_bus)
-                       radeon_i2c_destroy(tmds->i2c_bus);
-       }
+       /* don't destroy the i2c bus record here, this will be done in 
radeon_i2c_fini */
        kfree(radeon_encoder->enc_priv);
        drm_encoder_cleanup(encoder);
        kfree(radeon_encoder);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 85931ca..10a99e4 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -13689,8 +13689,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         */
        tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
 
-       if (tg3_flag(tp, 5755_PLUS))
-               tg3_flag_set(tp, SHORT_DMA_BUG);
+       if (tg3_flag(tp, 5755_PLUS) ||
+               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+                       tg3_flag_set(tp, SHORT_DMA_BUG);
        else
                tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index de80171..3b2ce7d 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1496,6 +1496,9 @@ static const struct usb_device_id acm_ids[] = {
                                           Maybe we should define a new
                                           quirk for this. */
        },
+       { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
+       .driver_info = NO_UNION_NORMAL,
+       },
        { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
        .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
        },
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 415e9b2..6a7725a 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -613,7 +613,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const 
char __user *buf,
        result = fb_sys_write(info, buf, count, ppos);
 
        if (result > 0) {
-               int start = max((int)(offset / info->fix.line_length) - 1, 0);
+               int start = max((int)(offset / info->fix.line_length), 0);
                int lines = min((u32)((result / info->fix.line_length) + 1),
                                (u32)info->var.yres);
 
diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
index af8f26b..db1e392 100644
--- a/drivers/video/via/via_clock.c
+++ b/drivers/video/via/via_clock.c
@@ -25,6 +25,7 @@
 
 #include <linux/kernel.h>
 #include <linux/via-core.h>
+#include <asm/olpc.h>
 #include "via_clock.h"
 #include "global.h"
 #include "debug.h"
@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
        printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
 }
 
+static void noop_set_clock_state(u8 state)
+{
+}
+
 void via_clock_init(struct via_clock *clock, int gfx_chip)
 {
        switch (gfx_chip) {
@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
                break;
 
        }
+
+       if (machine_is_olpc()) {
+               /* The OLPC XO-1.5 cannot suspend/resume reliably if the
+                * IGA1/IGA2 clocks are set as on or off (memory rot
+                * occasionally happens during suspend under such
+                * configurations).
+                *
+                * The only known stable scenario is to leave this bits as-is,
+                * which in their default states are documented to enable the
+                * clock only when it is needed.
+                */
+               clock->set_primary_clock_state = noop_set_clock_state;
+               clock->set_secondary_clock_state = noop_set_clock_state;
+       }
 }
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index f55ae23..790fa63 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path 
*path)
                ino->flags |= AUTOFS_INF_PENDING;
                spin_unlock(&sbi->fs_lock);
                status = autofs4_mount_wait(dentry);
-               if (status)
-                       return ERR_PTR(status);
                spin_lock(&sbi->fs_lock);
                ino->flags &= ~AUTOFS_INF_PENDING;
+               if (status) {
+                       spin_unlock(&sbi->fs_lock);
+                       return ERR_PTR(status);
+               }
        }
 done:
        if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index f67b687..a080779 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -84,7 +84,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, 
int *max_len,
  * FIXME: we should try harder by querying the mds for the ino.
  */
 static struct dentry *__fh_to_dentry(struct super_block *sb,
-                                    struct ceph_nfs_fh *fh)
+                                    struct ceph_nfs_fh *fh, int fh_len)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
        struct inode *inode;
@@ -92,6 +92,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
        struct ceph_vino vino;
        int err;
 
+       if (fh_len < sizeof(*fh) / 4)
+               return ERR_PTR(-ESTALE);
+
        dout("__fh_to_dentry %llx\n", fh->ino);
        vino.ino = fh->ino;
        vino.snap = CEPH_NOSNAP;
@@ -136,7 +139,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
  * convert connectable fh to dentry
  */
 static struct dentry *__cfh_to_dentry(struct super_block *sb,
-                                     struct ceph_nfs_confh *cfh)
+                                     struct ceph_nfs_confh *cfh, int fh_len)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
        struct inode *inode;
@@ -144,6 +147,9 @@ static struct dentry *__cfh_to_dentry(struct super_block 
*sb,
        struct ceph_vino vino;
        int err;
 
+       if (fh_len < sizeof(*cfh) / 4)
+               return ERR_PTR(-ESTALE);
+
        dout("__cfh_to_dentry %llx (%llx/%x)\n",
             cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
 
@@ -193,9 +199,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block 
*sb, struct fid *fid,
                                        int fh_len, int fh_type)
 {
        if (fh_type == 1)
-               return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
+               return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
+                                                               fh_len);
        else
-               return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
+               return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
+                                                               fh_len);
 }
 
 /*
@@ -216,6 +224,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block 
*sb,
 
        if (fh_type == 1)
                return ERR_PTR(-ESTALE);
+       if (fh_len < sizeof(*cfh) / 4)
+               return ERR_PTR(-ESTALE);
 
        pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
                 cfh->parent_name_hash);
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index fe9945f..5235d6e 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -167,6 +167,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block 
*sb, struct fid *fid,
        case GFS2_SMALL_FH_SIZE:
        case GFS2_LARGE_FH_SIZE:
        case GFS2_OLD_FH_SIZE:
+               if (fh_len < GFS2_SMALL_FH_SIZE)
+                       return NULL;
                this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
                this.no_formal_ino |= be32_to_cpu(fh[1]);
                this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
@@ -186,6 +188,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block 
*sb, struct fid *fid,
        switch (fh_type) {
        case GFS2_LARGE_FH_SIZE:
        case GFS2_OLD_FH_SIZE:
+               if (fh_len < GFS2_LARGE_FH_SIZE)
+                       return NULL;
                parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
                parent.no_formal_ino |= be32_to_cpu(fh[5]);
                parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index dd4687f..516eb21 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block 
*sb,
 {
        struct isofs_fid *ifid = (struct isofs_fid *)fid;
 
-       if (fh_type != 2)
+       if (fh_len < 2 || fh_type != 2)
                return NULL;
 
        return isofs_export_iget(sb,
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 72ffa97..dcd23f8 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -85,7 +85,12 @@ nope:
 static void release_data_buffer(struct buffer_head *bh)
 {
        if (buffer_freed(bh)) {
+               WARN_ON_ONCE(buffer_dirty(bh));
                clear_buffer_freed(bh);
+               clear_buffer_mapped(bh);
+               clear_buffer_new(bh);
+               clear_buffer_req(bh);
+               bh->b_bdev = NULL;
                release_buffer_page(bh);
        } else
                put_bh(bh);
@@ -840,17 +845,35 @@ restart_loop:
                 * there's no point in keeping a checkpoint record for
                 * it. */
 
-               /* A buffer which has been freed while still being
-                * journaled by a previous transaction may end up still
-                * being dirty here, but we want to avoid writing back
-                * that buffer in the future after the "add to orphan"
-                * operation been committed,  That's not only a performance
-                * gain, it also stops aliasing problems if the buffer is
-                * left behind for writeback and gets reallocated for another
-                * use in a different page. */
-               if (buffer_freed(bh) && !jh->b_next_transaction) {
-                       clear_buffer_freed(bh);
-                       clear_buffer_jbddirty(bh);
+               /*
+                * A buffer which has been freed while still being journaled by
+                * a previous transaction.
+                */
+               if (buffer_freed(bh)) {
+                       /*
+                        * If the running transaction is the one containing
+                        * "add to orphan" operation (b_next_transaction !=
+                        * NULL), we have to wait for that transaction to
+                        * commit before we can really get rid of the buffer.
+                        * So just clear b_modified to not confuse transaction
+                        * credit accounting and refile the buffer to
+                        * BJ_Forget of the running transaction. If the just
+                        * committed transaction contains "add to orphan"
+                        * operation, we can completely invalidate the buffer
+                        * now. We are rather throughout in that since the
+                        * buffer may be still accessible when blocksize <
+                        * pagesize and it is attached to the last partial
+                        * page.
+                        */
+                       jh->b_modified = 0;
+                       if (!jh->b_next_transaction) {
+                               clear_buffer_freed(bh);
+                               clear_buffer_jbddirty(bh);
+                               clear_buffer_mapped(bh);
+                               clear_buffer_new(bh);
+                               clear_buffer_req(bh);
+                               bh->b_bdev = NULL;
+                       }
                }
 
                if (buffer_jbddirty(bh)) {
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index f7ee81a..b0161a6 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1837,15 +1837,16 @@ static int __dispose_buffer(struct journal_head *jh, 
transaction_t *transaction)
  * We're outside-transaction here.  Either or both of j_running_transaction
  * and j_committing_transaction may be NULL.
  */
-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+                               int partial_page)
 {
        transaction_t *transaction;
        struct journal_head *jh;
        int may_free = 1;
-       int ret;
 
        BUFFER_TRACE(bh, "entry");
 
+retry:
        /*
         * It is safe to proceed here without the j_list_lock because the
         * buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1873,10 +1874,18 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh)
         * clear the buffer dirty bit at latest at the moment when the
         * transaction marking the buffer as freed in the filesystem
         * structures is committed because from that moment on the
-        * buffer can be reallocated and used by a different page.
+        * block can be reallocated and used by a different page.
         * Since the block hasn't been freed yet but the inode has
         * already been added to orphan list, it is safe for us to add
         * the buffer to BJ_Forget list of the newest transaction.
+        *
+        * Also we have to clear buffer_mapped flag of a truncated buffer
+        * because the buffer_head may be attached to the page straddling
+        * i_size (can happen only when blocksize < pagesize) and thus the
+        * buffer_head can be reused when the file is extended again. So we end
+        * up keeping around invalidated buffers attached to transactions'
+        * BJ_Forget list just to stop checkpointing code from cleaning up
+        * the transaction this buffer was modified in.
         */
        transaction = jh->b_transaction;
        if (transaction == NULL) {
@@ -1903,13 +1912,9 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh)
                         * committed, the buffer won't be needed any
                         * longer. */
                        JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
-                       ret = __dispose_buffer(jh,
+                       may_free = __dispose_buffer(jh,
                                        journal->j_running_transaction);
-                       journal_put_journal_head(jh);
-                       spin_unlock(&journal->j_list_lock);
-                       jbd_unlock_bh_state(bh);
-                       spin_unlock(&journal->j_state_lock);
-                       return ret;
+                       goto zap_buffer;
                } else {
                        /* There is no currently-running transaction. So the
                         * orphan record which we wrote for this file must have
@@ -1917,13 +1922,9 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh)
                         * the committing transaction, if it exists. */
                        if (journal->j_committing_transaction) {
                                JBUFFER_TRACE(jh, "give to committing trans");
-                               ret = __dispose_buffer(jh,
+                               may_free = __dispose_buffer(jh,
                                        journal->j_committing_transaction);
-                               journal_put_journal_head(jh);
-                               spin_unlock(&journal->j_list_lock);
-                               jbd_unlock_bh_state(bh);
-                               spin_unlock(&journal->j_state_lock);
-                               return ret;
+                               goto zap_buffer;
                        } else {
                                /* The orphan record's transaction has
                                 * committed.  We can cleanse this buffer */
@@ -1944,10 +1945,24 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh)
                }
                /*
                 * The buffer is committing, we simply cannot touch
-                * it. So we just set j_next_transaction to the
-                * running transaction (if there is one) and mark
-                * buffer as freed so that commit code knows it should
-                * clear dirty bits when it is done with the buffer.
+                * it. If the page is straddling i_size we have to wait
+                * for commit and try again.
+                */
+               if (partial_page) {
+                       tid_t tid = journal->j_committing_transaction->t_tid;
+
+                       journal_put_journal_head(jh);
+                       spin_unlock(&journal->j_list_lock);
+                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&journal->j_state_lock);
+                       log_wait_commit(journal, tid);
+                       goto retry;
+               }
+               /*
+                * OK, buffer won't be reachable after truncate. We just set
+                * j_next_transaction to the running transaction (if there is
+                * one) and mark buffer as freed so that commit code knows it
+                * should clear dirty bits when it is done with the buffer.
                 */
                set_buffer_freed(bh);
                if (journal->j_running_transaction && buffer_jbddirty(bh))
@@ -1970,6 +1985,14 @@ static int journal_unmap_buffer(journal_t *journal, 
struct buffer_head *bh)
        }
 
 zap_buffer:
+       /*
+        * This is tricky. Although the buffer is truncated, it may be reused
+        * if blocksize < pagesize and it is attached to the page straddling
+        * EOF. Since the buffer might have been added to BJ_Forget list of the
+        * running transaction, journal_get_write_access() won't clear
+        * b_modified and credit accounting gets confused. So clear b_modified
+        * here. */
+       jh->b_modified = 0;
        journal_put_journal_head(jh);
 zap_buffer_no_jh:
        spin_unlock(&journal->j_list_lock);
@@ -2018,7 +2041,8 @@ void journal_invalidatepage(journal_t *journal,
                if (offset <= curr_off) {
                        /* This block is wholly outside the truncation point */
                        lock_buffer(bh);
-                       may_free &= journal_unmap_buffer(journal, bh);
+                       may_free &= journal_unmap_buffer(journal, bh,
+                                                        offset > 0);
                        unlock_buffer(bh);
                }
                curr_off = next_off;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 23d7451..df753a1 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -40,6 +40,7 @@ struct nsm_args {
        u32                     proc;
 
        char                    *mon_name;
+       char                    *nodename;
 };
 
 struct nsm_res {
@@ -93,6 +94,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, 
struct nsm_res *res)
                .vers           = 3,
                .proc           = NLMPROC_NSM_NOTIFY,
                .mon_name       = nsm->sm_mon_name,
+               .nodename       = utsname()->nodename,
        };
        struct rpc_message msg = {
                .rpc_argp       = &args,
@@ -429,7 +431,7 @@ static void encode_my_id(struct xdr_stream *xdr, const 
struct nsm_args *argp)
 {
        __be32 *p;
 
-       encode_nsm_string(xdr, utsname()->nodename);
+       encode_nsm_string(xdr, argp->nodename);
        p = xdr_reserve_space(xdr, 4 + 4 + 4);
        *p++ = cpu_to_be32(argp->prog);
        *p++ = cpu_to_be32(argp->vers);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 4fd5bb3..0363aa4 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1568,8 +1568,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block 
*sb, struct fid *fid,
                        reiserfs_warning(sb, "reiserfs-13077",
                                "nfsd/reiserfs, fhtype=%d, len=%d - odd",
                                fh_type, fh_len);
-               fh_type = 5;
+               fh_type = fh_len;
        }
+       if (fh_len < 2)
+               return NULL;
 
        return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
                (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
@@ -1578,6 +1580,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block 
*sb, struct fid *fid,
 struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
                int fh_len, int fh_type)
 {
+       if (fh_type > fh_len)
+               fh_type = fh_len;
        if (fh_type < 4)
                return NULL;
 
diff --git a/fs/udf/super.c b/fs/udf/super.c
index a8e867a..b0c7b53 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1316,6 +1316,7 @@ static int udf_load_logicalvol(struct super_block *sb, 
sector_t block,
                udf_error(sb, __func__, "error loading logical volume 
descriptor: "
                        "Partition table too long (%u > %lu)\n", table_len,
                        sb->s_blocksize - sizeof(*lvd));
+               ret = 1;
                goto out_bh;
        }
 
@@ -1360,8 +1361,10 @@ static int udf_load_logicalvol(struct super_block *sb, 
sector_t block,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
                                if (udf_load_sparable_map(sb, map,
-                                   (struct sparablePartitionMap *)gpm) < 0)
+                                   (struct sparablePartitionMap *)gpm) < 0) {
+                                       ret = 1;
                                        goto out_bh;
+                               }
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index fed3f3c..844b22b 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
        struct xfs_fid64        *fid64 = (struct xfs_fid64 *)fid;
        struct inode            *inode = NULL;
 
+       if (fh_len < xfs_fileid_length(fileid_type))
+               return NULL;
+
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 481f856..15b62bb 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1361,7 +1361,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
-               nf_reset(skb);
+               nf_conntrack_put(skb->nfct);
                skb->nfct = &nf_ct_untracked_get()->ct_general;
                skb->nfctinfo = IP_CT_NEW;
                nf_conntrack_get(skb->nfct);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h 
b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508..3a0feb1 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 pid;                /* netlink pid of destroyer */
+       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
diff --git a/kernel/module.c b/kernel/module.c
index b9d0667..a8bd215 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2605,6 +2605,10 @@ static int check_module_license_and_versions(struct 
module *mod)
        if (strcmp(mod->name, "driverloader") == 0)
                add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
 
+       /* lve claims to be GPL but upstream won't provide source */
+       if (strcmp(mod->name, "lve") == 0)
+               add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+
 #ifdef CONFIG_MODVERSIONS
        if ((mod->num_syms && !mod->crcs)
            || (mod->num_gpl_syms && !mod->gpl_crcs)
diff --git a/kernel/timer.c b/kernel/timer.c
index 8cff361..27982d9 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
 #define TVR_SIZE (1 << TVR_BITS)
 #define TVN_MASK (TVN_SIZE - 1)
 #define TVR_MASK (TVR_SIZE - 1)
+#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
 
 struct tvec {
        struct list_head vec[TVN_SIZE];
@@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, 
struct timer_list *timer)
                vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
        } else {
                int i;
-               /* If the timeout is larger than 0xffffffff on 64-bit
-                * architectures then we use the maximum timeout:
+               /* If the timeout is larger than MAX_TVAL (on 64-bit
+                * architectures or with CONFIG_BASE_SMALL=1) then we
+                * use the maximum timeout.
                 */
-               if (idx > 0xffffffffUL) {
-                       idx = 0xffffffffUL;
+               if (idx > MAX_TVAL) {
+                       idx = MAX_TVAL;
                        expires = idx + base->timer_jiffies;
                }
                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
diff --git a/mm/shmem.c b/mm/shmem.c
index fcedf54..769941f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2348,12 +2348,14 @@ static struct dentry *shmem_fh_to_dentry(struct 
super_block *sb,
 {
        struct inode *inode;
        struct dentry *dentry = NULL;
-       u64 inum = fid->raw[2];
-       inum = (inum << 32) | fid->raw[1];
+       u64 inum;
 
        if (fh_len < 3)
                return NULL;
 
+       inum = fid->raw[2];
+       inum = (inum << 32) | fid->raw[1];
+
        inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
                        shmem_match, fid->raw);
        if (inode) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c0e0f76..01890e1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2932,7 +2932,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device 
*odev,
                  sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
                  pkt_dev->pkt_overhead;
 
-       if (datalen < sizeof(struct pktgen_hdr)) {
+       if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
                datalen = sizeof(struct pktgen_hdr);
                if (net_ratelimit())
                        pr_info("increased datalen to %d\n", datalen);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c 
b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21..d7d63f4 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, 
unsigned int nhoff,
        *dataoff = nhoff + (iph->ihl << 2);
        *protonum = iph->protocol;
 
+       /* Check bogus IP headers */
+       if (*dataoff > skb->len) {
+               pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
+                        "nhoff %u, ihl %u, skblen %u\n",
+                        nhoff, iph->ihl << 2, skb->len);
+               return -NF_ACCEPT;
+       }
+
        return NF_ACCEPT;
 }
 
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf78..cd6881e 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, 
unsigned int dataoff,
        if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
                                    hdr, NULL, &matchoff, &matchlen,
                                    &addr, &port) > 0) {
-               unsigned int matchend, poff, plen, buflen, n;
+               unsigned int olen, matchend, poff, plen, buflen, n;
                char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
 
                /* We're only interested in headers related to this
@@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, 
unsigned int dataoff,
                                goto next;
                }
 
+               olen = *datalen;
                if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
                              &addr, port))
                        return NF_DROP;
 
-               matchend = matchoff + matchlen;
+               matchend = matchoff + matchlen + *datalen - olen;
 
                /* The maddr= parameter (RFC 2361) specifies where to send
                 * the reply. */
@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, 
unsigned int dataoff,
                ret = nf_ct_expect_related(rtcp_exp);
                if (ret == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (ret == -EBUSY) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       continue;
+               } else if (ret < 0) {
                        nf_ct_unexpect_related(rtp_exp);
                        port = 0;
                        break;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 9528ea0..d75eb39 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1520,11 +1520,12 @@ static int ip_vs_dst_event(struct notifier_block *this, 
unsigned long event,
 {
        struct net_device *dev = ptr;
        struct net *net = dev_net(dev);
+       struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_service *svc;
        struct ip_vs_dest *dest;
        unsigned int idx;
 
-       if (event != NETDEV_UNREGISTER)
+       if (event != NETDEV_UNREGISTER || !ipvs)
                return NOTIFY_DONE;
        IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
        EnterFunction(2);
@@ -1550,7 +1551,7 @@ static int ip_vs_dst_event(struct notifier_block *this, 
unsigned long event,
                }
        }
 
-       list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+       list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
                __ip_vs_dev_reset(dest, dev);
        }
        mutex_unlock(&__ip_vs_mutex);
diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index f7af8b8..dff164e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -247,12 +247,15 @@ static void death_by_event(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
                /* bad luck, let's retry again */
-               ct->timeout.expires = jiffies +
+               ecache->timeout.expires = jiffies +
                        (random32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ct->timeout);
+               add_timer(&ecache->timeout);
                return;
        }
        /* we've got the event delivered, now it's dying */
@@ -266,6 +269,9 @@ static void death_by_event(unsigned long ul_conntrack)
 void nf_ct_insert_dying_list(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        /* add this conntrack to the dying list */
        spin_lock_bh(&nf_conntrack_lock);
@@ -273,10 +279,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
                             &net->ct.dying);
        spin_unlock_bh(&nf_conntrack_lock);
        /* set a new timer to retry event delivery */
-       setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
-       ct->timeout.expires = jiffies +
+       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+       ecache->timeout.expires = jiffies +
                (random32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ct->timeout);
+       add_timer(&ecache->timeout);
 }
 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 
diff --git a/net/netfilter/nf_conntrack_expect.c 
b/net/netfilter/nf_conntrack_expect.c
index cd1e8e0..a3dffab 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -364,23 +364,6 @@ static void evict_oldest_expect(struct nf_conn *master,
        }
 }
 
-static inline int refresh_timer(struct nf_conntrack_expect *i)
-{
-       struct nf_conn_help *master_help = nfct_help(i->master);
-       const struct nf_conntrack_expect_policy *p;
-
-       if (!del_timer(&i->timeout))
-               return 0;
-
-       p = &rcu_dereference_protected(
-               master_help->helper,
-               lockdep_is_held(&nf_conntrack_lock)
-               )->expect_policy[i->class];
-       i->timeout.expires = jiffies + p->timeout * HZ;
-       add_timer(&i->timeout);
-       return 1;
-}
-
 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
 {
        const struct nf_conntrack_expect_policy *p;
@@ -388,7 +371,7 @@ static inline int __nf_ct_expect_check(struct 
nf_conntrack_expect *expect)
        struct nf_conn *master = expect->master;
        struct nf_conn_help *master_help = nfct_help(master);
        struct net *net = nf_ct_exp_net(expect);
-       struct hlist_node *n;
+       struct hlist_node *n, *next;
        unsigned int h;
        int ret = 1;
 
@@ -399,12 +382,12 @@ static inline int __nf_ct_expect_check(struct 
nf_conntrack_expect *expect)
                goto out;
        }
        h = nf_ct_expect_dst_hash(&expect->tuple);
-       hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
+       hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
                if (expect_matches(i, expect)) {
-                       /* Refresh timer: if it's dying, ignore.. */
-                       if (refresh_timer(i)) {
-                               ret = 0;
-                               goto out;
+                       if (del_timer(&i->timeout)) {
+                               nf_ct_unlink_expect(i);
+                               nf_ct_expect_put(i);
+                               break;
                        }
                } else if (expect_clash(i, expect)) {
                        ret = -EBUSY;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9228ee0d..6092b0c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -392,8 +392,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
 
 /* Precision saver. */
-static inline u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
 {
        /* If multiplying would overflow... */
        if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -403,7 +402,7 @@ user2credits(u_int32_t user)
        return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
 }
 
-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
 {
        dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
        if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
@@ -534,8 +533,7 @@ hashlimit_mt(const struct sk_buff *skb, struct 
xt_action_param *par)
                dh->rateinfo.prev = jiffies;
                dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
                                      hinfo->cfg.burst);
-               dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
-                                         hinfo->cfg.burst);
+               dh->rateinfo.credit_cap = dh->rateinfo.credit;
                dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
        } else {
                /* update expiration timeout */
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 32b7a57..a4c1e45 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param 
*par)
 }
 
 /* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
 {
        /* If multiplying would overflow... */
        if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param 
*par)
 
        /* For SMP, we only want to use one set of state. */
        r->master = priv;
+       /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+          128. */
+       priv->prev = jiffies;
+       priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
        if (r->cost == 0) {
-               /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
-                  128. */
-               priv->prev = jiffies;
-               priv->credit = user2credits(r->avg * r->burst); /* Credits 
full. */
-               r->credit_cap = user2credits(r->avg * r->burst); /* Credits 
full. */
+               r->credit_cap = priv->credit; /* Credits full. */
                r->cost = user2credits(r->avg);
        }
        return 0;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 554111f..cfd7d15 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1015,6 +1015,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
+/*
+ * Helper function to force a TCP close if the server is sending
+ * junk and/or it has put us in CLOSE_WAIT
+ */
+static void xs_tcp_force_close(struct rpc_xprt *xprt)
+{
+       set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+       xprt_force_disconnect(xprt);
+}
+
 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct 
xdr_skb_reader *desc)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 
xprt);
@@ -1041,7 +1051,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt 
*xprt, struct xdr_skb_rea
        /* Sanity check of the record length */
        if (unlikely(transport->tcp_reclen < 8)) {
                dprintk("RPC:       invalid TCP record fragment length\n");
-               xprt_force_disconnect(xprt);
+               xs_tcp_force_close(xprt);
                return;
        }
        dprintk("RPC:       reading TCP record fragment of length %d\n",
@@ -1122,7 +1132,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt 
*transport,
                break;
        default:
                dprintk("RPC:       invalid request message type\n");
-               xprt_force_disconnect(&transport->xprt);
+               xs_tcp_force_close(&transport->xprt);
        }
        xs_tcp_check_fraghdr(transport);
 }
@@ -1445,6 +1455,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt 
*xprt)
 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
 {
        smp_mb__before_clear_bit();
+       clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+       clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
        smp_mb__after_clear_bit();
@@ -1502,8 +1514,8 @@ static void xs_tcp_state_change(struct sock *sk)
                break;
        case TCP_CLOSE_WAIT:
                /* The server initiated a shutdown of the socket */
-               xprt_force_disconnect(xprt);
                xprt->connect_cookie++;
+               xs_tcp_force_close(xprt);
        case TCP_CLOSING:
                /*
                 * If the server closed down the connection, make sure that
@@ -2146,8 +2158,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
                /* We're probably in TIME_WAIT. Get rid of existing socket,
                 * and retry
                 */
-               set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
-               xprt_force_disconnect(xprt);
+               xs_tcp_force_close(xprt);
                break;
        case -ECONNREFUSED:
        case -ECONNRESET:
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 7f4d619..11ccc23 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char 
*name, int reg, unsigne
                tmp.index = ac97->num;
                kctl = snd_ctl_new1(&tmp, ac97);
        }
+       if (!kctl)
+               return -ENOMEM;
        if (reg >= AC97_PHONE && reg <= AC97_PCM)
                set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
        else
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 15f0161..0800bcc 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1415,6 +1415,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
         .ca0108_chip = 1,
         .spk71 = 1,
         .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
+       /* Tested by Maxim Kachur <mcdebug...@duganet.ru> 17th Oct 2012. */
+       /* This is MAEM8986, 0202 is MAEM8980 */
+       {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
+        .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
+        .id = "EMU1010",
+        .emu10k2_chip = 1,
+        .ca0108_chip = 1,
+        .spk71 = 1,
+        .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
        /* Tested by ja...@superbug.co.uk 8th July 2005. */
        /* This is MAEM8810, 0202 is MAEM8820 */
        {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to