diff --git a/Makefile b/Makefile
index eabbd7748a24..39d7af0165a8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 14
-SUBLEVEL = 6
+SUBLEVEL = 7
 EXTRAVERSION =
 NAME = Petit Gorille
 
@@ -373,9 +373,6 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call 
cc-disable-warning,maybe-uninitialized,)
-CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
-
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
@@ -394,21 +391,19 @@ LINUXINCLUDE    := \
                -I$(objtree)/include \
                $(USERINCLUDE)
 
-KBUILD_CPPFLAGS := -D__KERNEL__
-
+KBUILD_AFLAGS   := -D__ASSEMBLY__
 KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                   -fno-strict-aliasing -fno-common -fshort-wchar \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
-                  -std=gnu89 $(call cc-option,-fno-PIE)
-
-
+                  -std=gnu89
+KBUILD_CPPFLAGS := -D__KERNEL__
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+GCC_PLUGINS_CFLAGS :=
 
 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
@@ -421,7 +416,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON 
UTS_MACHINE
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV 
CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -622,6 +617,12 @@ endif
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+KBUILD_CFLAGS  += $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS  += $(call cc-option,-fno-PIE)
+CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call 
cc-disable-warning,maybe-uninitialized,)
+CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
+export CFLAGS_GCOV CFLAGS_KCOV
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index e39d487bf724..a3c7f271ad4c 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -215,7 +215,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 /*
  * A pointer passed in from user mode. This should not
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 8e2b5b556488..49691331ada4 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -200,7 +200,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 /*
  * A pointer passed in from user mode. This should not
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 07f48827afda..acf8aa07cbe0 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -195,7 +195,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 struct compat_ipc64_perm {
        compat_key_t key;
diff --git a/arch/powerpc/include/asm/compat.h 
b/arch/powerpc/include/asm/compat.h
index a035b1e5dfa7..8a2aecfe9b02 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -185,7 +185,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 /*
  * A pointer passed in from user mode. This should not
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 1b60eb3676d5..5e6a63641a5f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -263,7 +263,6 @@ typedef struct compat_siginfo {
 #define si_overrun     _sifields._timer._overrun
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 /*
  * A pointer passed in from user mode. This should not
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 977c3f280ba1..fa38c78de0f0 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -209,7 +209,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 /*
  * A pointer passed in from user mode. This should not
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index c14e36f008c8..62a7b83025dd 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -173,7 +173,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 struct compat_ipc64_perm {
        compat_key_t key;
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 9eef9cc64c68..70bc1df580b2 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -209,7 +209,6 @@ typedef struct compat_siginfo {
 } compat_siginfo_t;
 
 #define COMPAT_OFF_T_MAX       0x7fffffff
-#define COMPAT_LOFF_T_MAX      0x7fffffffffffffffL
 
 struct compat_ipc64_perm {
        compat_key_t key;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c 
b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 3d433af856a5..7be35b600299 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1297,9 +1297,7 @@ static void rmdir_all_sub(void)
                kfree(rdtgrp);
        }
        /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
-       get_online_cpus();
        update_closid_rmid(cpu_online_mask, &rdtgroup_default);
-       put_online_cpus();
 
        kernfs_remove(kn_info);
        kernfs_remove(kn_mongrp);
@@ -1310,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb)
 {
        struct rdt_resource *r;
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
 
        /*Put everything back to default values. */
@@ -1317,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb)
                reset_all_ctrls(r);
        cdp_disable();
        rmdir_all_sub();
-       static_branch_disable(&rdt_alloc_enable_key);
-       static_branch_disable(&rdt_mon_enable_key);
-       static_branch_disable(&rdt_enable_key);
+       static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
+       static_branch_disable_cpuslocked(&rdt_mon_enable_key);
+       static_branch_disable_cpuslocked(&rdt_enable_key);
        kernfs_kill_sb(sb);
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
 }
 
 static struct file_system_type rdt_fs_type = {
diff --git a/block/blk-core.c b/block/blk-core.c
index 516ce3174683..7b30bf10b1d4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -339,6 +339,7 @@ void blk_sync_queue(struct request_queue *q)
                struct blk_mq_hw_ctx *hctx;
                int i;
 
+               cancel_delayed_work_sync(&q->requeue_work);
                queue_for_each_hw_ctx(q, hctx, i)
                        cancel_delayed_work_sync(&hctx->run_work);
        } else {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bc3984ffe867..c04aa11f0e21 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -242,6 +242,9 @@ struct smi_info {
        /* The timer for this si. */
        struct timer_list   si_timer;
 
+       /* This flag is set, if the timer can be set */
+       bool                timer_can_start;
+
        /* This flag is set, if the timer is running (timer_pending() isn't 
enough) */
        bool                timer_running;
 
@@ -417,6 +420,8 @@ static enum si_sm_result start_next_msg(struct smi_info 
*smi_info)
 
 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 {
+       if (!smi_info->timer_can_start)
+               return;
        smi_info->last_timeout_jiffies = jiffies;
        mod_timer(&smi_info->si_timer, new_val);
        smi_info->timer_running = true;
@@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, 
unsigned char *msg,
        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 }
 
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
 {
        unsigned char msg[2];
 
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 2);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+       start_new_msg(smi_info, msg, 2);
        smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
 {
        unsigned char msg[3];
 
@@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, 
bool start_timer)
        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
        msg[2] = WDT_PRE_TIMEOUT_INT;
 
-       if (start_timer)
-               start_new_msg(smi_info, msg, 3);
-       else
-               smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+       start_new_msg(smi_info, msg, 3);
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info 
*smi_info)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = true;
-               start_check_enables(smi_info, start_timer);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
                smi_info->interrupt_disabled = false;
-               start_check_enables(smi_info, true);
+               start_check_enables(smi_info);
                return true;
        }
        return false;
@@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct 
smi_info *smi_info)
 
        msg = ipmi_alloc_smi_msg();
        if (!msg) {
-               if (!disable_si_irq(smi_info, true))
+               if (!disable_si_irq(smi_info))
                        smi_info->si_state = SI_NORMAL;
        } else if (enable_si_irq(smi_info)) {
                ipmi_free_smi_msg(msg);
@@ -545,7 +544,7 @@ static void handle_flags(struct smi_info *smi_info)
                /* Watchdog pre-timeout */
                smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-               start_clear_flags(smi_info, true);
+               start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
                if (smi_info->intf)
                        ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -928,7 +927,7 @@ static enum si_sm_result smi_event_handler(struct smi_info 
*smi_info,
                 * disable and messages disabled.
                 */
                if (smi_info->supports_event_msg_buff || smi_info->irq) {
-                       start_check_enables(smi_info, true);
+                       start_check_enables(smi_info);
                } else {
                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
                        if (!smi_info->curr_msg)
@@ -1235,6 +1234,7 @@ static int smi_start_processing(void       *send_info,
 
        /* Set up the timer that drives the interface. */
        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+       new_smi->timer_can_start = true;
        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
        /* Try to claim any interrupts. */
@@ -3416,10 +3416,12 @@ static void check_for_broken_irqs(struct smi_info 
*smi_info)
        check_set_rcv_irq(smi_info);
 }
 
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
 {
        if (smi_info->thread != NULL)
                kthread_stop(smi_info->thread);
+
+       smi_info->timer_can_start = false;
        if (smi_info->timer_running)
                del_timer_sync(&smi_info->si_timer);
 }
@@ -3605,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi)
         * Start clearing the flags before we enable interrupts or the
         * timer to avoid racing with the timer.
         */
-       start_clear_flags(new_smi, false);
+       start_clear_flags(new_smi);
 
        /*
         * IRQ is defined to be set when non-zero.  req_events will
@@ -3674,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi)
        return 0;
 
 out_err_stop_timer:
-       wait_for_timer_and_thread(new_smi);
+       stop_timer_and_thread(new_smi);
 
 out_err:
        new_smi->interrupt_disabled = true;
@@ -3866,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
         */
        if (to_clean->irq_cleanup)
                to_clean->irq_cleanup(to_clean);
-       wait_for_timer_and_thread(to_clean);
+       stop_timer_and_thread(to_clean);
 
        /*
         * Timeouts are stopped, now make sure the interrupts are off
@@ -3878,7 +3880,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
                schedule_timeout_uninterruptible(1);
        }
        if (to_clean->handlers)
-               disable_si_irq(to_clean, false);
+               disable_si_irq(to_clean);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                schedule_timeout_uninterruptible(1);
diff --git a/drivers/cpuidle/cpuidle-powernv.c 
b/drivers/cpuidle/cpuidle-powernv.c
index ed6531f075c6..e06605b21841 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -384,9 +384,9 @@ static int powernv_add_idle_states(void)
                 * Firmware passes residency and latency values in ns.
                 * cpuidle expects it in us.
                 */
-               exit_latency = latency_ns[i] / 1000;
+               exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
                if (!rc)
-                       target_residency = residency_ns[i] / 1000;
+                       target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
                else
                        target_residency = 0;
 
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 14d1e7d9a1d6..0e6bc631a1ca 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive)
        struct request *rq = drive->hwif->rq;
        unsigned long wait = 0;
 
-       debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
+       debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, 
scsi_req(rq)->cmd[0]);
 
        /*
         * Some commands are *slow* and normally take a long time to complete.
@@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                                return ide_do_reset(drive);
                        }
 
-                       debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
+                       debug_log("[cmd %x]: check condition\n", 
scsi_req(rq)->cmd[0]);
 
                        /* Retry operation */
                        ide_retry_pc(drive);
@@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                ide_pad_transfer(drive, write, bcount);
 
        debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
-                 rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
+                 scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
 
        /* And set the interrupt handler again */
        ide_set_handler(drive, ide_pc_intr, timeout);
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h 
b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 010c709ba3bb..58c531db4f4a 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
        __u16  wrid;
        __u8   r1[3];
        __u8   len16;
-       __u32  r2;
-       __u32  stag;
+       __be32  r2;
+       __be32  stag;
        struct fw_ri_tpte tpte;
        __u64  pbl[2];
 };
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f425905c97fa..0cabf31fb163 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -2158,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
                                for (k = 0; k < page; k++) {
                                        kfree(new_bp[k].map);
                                }
+                               kfree(new_bp);
 
                                /* restore some fields from old_counts */
                                bitmap->counts.bp = old_counts.bp;
@@ -2208,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
                block += old_blocks;
        }
 
+       if (bitmap->counts.bp != old_counts.bp) {
+               unsigned long k;
+               for (k = 0; k < old_counts.pages; k++)
+                       if (!old_counts.bp[k].hijacked)
+                               kfree(old_counts.bp[k].map);
+               kfree(old_counts.bp);
+       }
+
        if (!init) {
                int i;
                while (block < (chunks << chunkshift)) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2245d06d2045..a25eebd98996 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct 
md_rdev *refdev)
        struct dm_raid_superblock *refsb;
        uint64_t events_sb, events_refsb;
 
-       rdev->sb_start = 0;
-       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
-       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
-               DMERR("superblock size of a logical block is no longer valid");
-               return -EINVAL;
-       }
-
        r = read_disk_sb(rdev, rdev->sb_size, false);
        if (r)
                return r;
@@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, 
struct raid_set *rs)
                if (test_bit(Journal, &rdev->flags))
                        continue;
 
+               if (!rdev->meta_bdev)
+                       continue;
+
+               /* Set superblock offset/size for metadata device. */
+               rdev->sb_start = 0;
+               rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+               if (rdev->sb_size < sizeof(struct dm_raid_superblock) || 
rdev->sb_size > PAGE_SIZE) {
+                       DMERR("superblock size of a logical block is no longer 
valid");
+                       return -EINVAL;
+               }
+
                /*
                 * Skipping super_load due to CTR_FLAG_SYNC will cause
                 * the array to undergo initialization again as
@@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, 
struct raid_set *rs)
                if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
                        continue;
 
-               if (!rdev->meta_bdev)
-                       continue;
-
                r = super_load(rdev, freshest);
 
                switch (r) {
diff --git a/drivers/media/dvb-core/dvb_frontend.c 
b/drivers/media/dvb-core/dvb_frontend.c
index 9139d01ba7ed..33d844fe2e70 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
 {
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
 
-       if (!fepriv)
-               return;
-
-       dvb_free_device(fepriv->dvbdev);
+       if (fepriv)
+               dvb_free_device(fepriv->dvbdev);
 
        dvb_frontend_invoke_release(fe, fe->ops.release);
 
-       kfree(fepriv);
-       fe->frontend_priv = NULL;
+       if (fepriv)
+               kfree(fepriv);
 }
 
 static void dvb_frontend_free(struct kref *ref)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c 
b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d4496e9afcdf..a3d12dbde95b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct 
snd_queue *sq, int qentry,
 
        /* Offload checksum calculation to HW */
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               hdr->csum_l3 = 1; /* Enable IP csum calculation */
+               if (ip.v4->version == 4)
+                       hdr->csum_l3 = 1; /* Enable IP csum calculation */
                hdr->l3_offset = skb_network_offset(skb);
                hdr->l4_offset = skb_transport_offset(skb);
 
diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
index a3c949ea7d1a..9541465e43e9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2025,21 +2025,6 @@ static int rtl8169_set_speed(struct net_device *dev,
        return ret;
 }
 
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd 
*cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int ret;
-
-       del_timer_sync(&tp->timer);
-
-       rtl_lock_work(tp);
-       ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
-                               cmd->duplex, cmd->advertising);
-       rtl_unlock_work(tp);
-
-       return ret;
-}
-
 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
@@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device 
*dev,
        return rc;
 }
 
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
+{
+       struct rtl8169_private *tp = netdev_priv(dev);
+       int rc;
+       u32 advertising;
+
+       if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+           cmd->link_modes.advertising))
+               return -EINVAL;
+
+       del_timer_sync(&tp->timer);
+
+       rtl_lock_work(tp);
+       rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+                              cmd->base.duplex, advertising);
+       rtl_unlock_work(tp);
+
+       return rc;
+}
+
 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                             void *p)
 {
@@ -2367,7 +2373,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_drvinfo            = rtl8169_get_drvinfo,
        .get_regs_len           = rtl8169_get_regs_len,
        .get_link               = ethtool_op_get_link,
-       .set_settings           = rtl8169_set_settings,
        .get_msglevel           = rtl8169_get_msglevel,
        .set_msglevel           = rtl8169_set_msglevel,
        .get_regs               = rtl8169_get_regs,
@@ -2379,6 +2384,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_ts_info            = ethtool_op_get_ts_info,
        .nway_reset             = rtl8169_nway_reset,
        .get_link_ksettings     = rtl8169_get_link_ksettings,
+       .set_link_ksettings     = rtl8169_set_link_ksettings,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 16bd50929084..28c4d6fa096c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2564,6 +2564,7 @@ static int stmmac_open(struct net_device *dev)
 
        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+       priv->mss = 0;
 
        ret = alloc_dma_desc_resources(priv);
        if (ret < 0) {
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 1f3295e274d0..8feb84fd4ca7 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -409,7 +409,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
        struct dst_entry *dst;
        int err, ret = NET_XMIT_DROP;
        struct flowi6 fl6 = {
-               .flowi6_iif = dev->ifindex,
+               .flowi6_oif = dev->ifindex,
                .daddr = ip6h->daddr,
                .saddr = ip6h->saddr,
                .flowi6_flags = FLOWI_FLAG_ANYSRC,
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 6c0c84c33e1f..bfd4ded0a53f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -829,8 +829,11 @@ static ssize_t tap_do_read(struct tap_queue *q,
        DEFINE_WAIT(wait);
        ssize_t ret = 0;
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (skb)
                goto put;
@@ -1077,7 +1080,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN))
+                           TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
                rtnl_lock();
@@ -1154,11 +1157,14 @@ static int tap_recvmsg(struct socket *sock, struct 
msghdr *m,
                       size_t total_len, int flags)
 {
        struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       struct sk_buff *skb = m->msg_control;
        int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+               if (skb)
+                       kfree_skb(skb);
                return -EINVAL;
-       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       }
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42bb820a56c9..c91b110f2169 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1326,6 +1326,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct 
*tun,
                        err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
                        if (err)
                                goto err_redirect;
+                       rcu_read_unlock();
                        return NULL;
                case XDP_TX:
                        xdp_xmit = true;
@@ -1358,7 +1359,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct 
*tun,
        if (xdp_xmit) {
                skb->dev = tun->dev;
                generic_xdp_tx(skb, xdp_prog);
-               rcu_read_lock();
+               rcu_read_unlock();
                return NULL;
        }
 
@@ -1734,8 +1735,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, 
struct tun_file *tfile,
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (!iov_iter_count(to))
+       if (!iov_iter_count(to)) {
+               if (skb)
+                       kfree_skb(skb);
                return 0;
+       }
 
        if (!skb) {
                /* Read frames from ring */
@@ -1851,22 +1855,24 @@ static int tun_recvmsg(struct socket *sock, struct 
msghdr *m, size_t total_len,
 {
        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
        struct tun_struct *tun = __tun_get(tfile);
+       struct sk_buff *skb = m->msg_control;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
+       if (!tun) {
+               ret = -EBADFD;
+               goto out_free_skb;
+       }
 
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
-               goto out;
+               goto out_put_tun;
        }
        if (flags & MSG_ERRQUEUE) {
                ret = sock_recv_errqueue(sock->sk, m, total_len,
                                         SOL_PACKET, TUN_TX_TIMESTAMP);
                goto out;
        }
-       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
-                         m->msg_control);
+       ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
        if (ret > (ssize_t)total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
@@ -1874,6 +1880,13 @@ static int tun_recvmsg(struct socket *sock, struct 
msghdr *m, size_t total_len,
 out:
        tun_put(tun);
        return ret;
+
+out_put_tun:
+       tun_put(tun);
+out_free_skb:
+       if (skb)
+               kfree_skb(skb);
+       return ret;
 }
 
 static int tun_peek_len(struct socket *sock)
@@ -2144,6 +2157,8 @@ static int set_offload(struct tun_struct *tun, unsigned 
long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
+
+               arg &= ~TUN_F_UFO;
        }
 
        /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8d4a6f7cba61..81394a4b2803 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
                net->hard_header_len = 0;
                net->addr_len        = 0;
                net->flags           = IFF_POINTOPOINT | IFF_NOARP | 
IFF_MULTICAST;
+               set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: raw IP\n");
        } else if (!net->header_ops) { /* don't bother if already set */
                ether_setup(net);
+               clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
                netdev_dbg(net, "mode: Ethernet\n");
        }
 
@@ -1239,6 +1241,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  
Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6510e5cc1817..42baad125a7d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -484,7 +484,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, 
gfp_t flags)
                return -ENOLINK;
        }
 
-       skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+       if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+               skb = __netdev_alloc_skb(dev->net, size, flags);
+       else
+               skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 76d2bb793afe..3333d417b248 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1512,15 +1512,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
 
 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void 
*client_data)
 {
-       struct nvmet_rdma_queue *queue;
+       struct nvmet_rdma_queue *queue, *tmp;
 
        /* Device is being removed, delete all queues using this device */
        mutex_lock(&nvmet_rdma_queue_mutex);
-       list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+       list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+                                queue_list) {
                if (queue->dev->device != ib_device)
                        continue;
 
                pr_info("Removing queue %d\n", queue->idx);
+               list_del_init(&queue->queue_list);
                __nvmet_rdma_queue_disconnect(queue);
        }
        mutex_unlock(&nvmet_rdma_queue_mutex);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 47a13c5723c6..5340efc673a9 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -985,6 +985,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct 
qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 int qeth_recover_features(struct net_device *);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
 int qeth_vm_request_mac(struct qeth_card *card);
 int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int 
len);
 
diff --git a/drivers/s390/net/qeth_core_main.c 
b/drivers/s390/net/qeth_core_main.c
index bae7440abc01..330e5d3dadf3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -19,6 +19,11 @@
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -6505,6 +6510,32 @@ netdev_features_t qeth_fix_features(struct net_device 
*dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       /* GSO segmentation builds skbs with
+        *      a (small) linear part for the headers, and
+        *      page frags for the data.
+        * Compared to a linear skb, the header-only part consumes an
+        * additional buffer element. This reduces buffer utilization, and
+        * hurts throughput. So compress small segments into one element.
+        */
+       if (netif_needs_gso(skb, features)) {
+               /* match skb_segment(): */
+               unsigned int doffset = skb->data - skb_mac_header(skb);
+               unsigned int hsize = skb_shinfo(skb)->gso_size;
+               unsigned int hroom = skb_headroom(skb);
+
+               /* linearize only if resulting skb allocations are order-0: */
+               if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+                       features &= ~NETIF_F_SG;
+       }
+
+       return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
        int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 760b023eae95..5a973ebcb13c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -963,6 +963,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_stop               = qeth_l2_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l2_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l2_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -1009,6 +1010,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
                card->dev->hw_features = NETIF_F_SG;
                card->dev->vlan_features = NETIF_F_SG;
+               card->dev->features |= NETIF_F_SG;
                /* OSA 3S and earlier has no RX/TX support */
                if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
                        card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1027,8 +1029,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ab661a431f7c..27185ab38136 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1376,6 +1376,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct 
in_device *in4_dev)
 
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
                memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
                if (ipm) {
@@ -1553,7 +1554,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card 
*card,
 
        addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
        if (!addr)
-               return;
+               goto out;
 
        spin_lock_bh(&card->ip_lock);
 
@@ -1567,6 +1568,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card 
*card,
        spin_unlock_bh(&card->ip_lock);
 
        kfree(addr);
+out:
        in_dev_put(in_dev);
 }
 
@@ -1591,7 +1593,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card 
*card,
 
        addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
        if (!addr)
-               return;
+               goto out;
 
        spin_lock_bh(&card->ip_lock);
 
@@ -1606,6 +1608,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card 
*card,
        spin_unlock_bh(&card->ip_lock);
 
        kfree(addr);
+out:
        in6_dev_put(in6_dev);
 #endif /* CONFIG_QETH_IPV6 */
 }
@@ -2920,6 +2923,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops 
= {
        .ndo_stop               = qeth_l3_stop,
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
+       .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
        .ndo_do_ioctl           = qeth_do_ioctl,
@@ -2960,6 +2964,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                card->dev->vlan_features = NETIF_F_SG |
                                        NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
                                        NETIF_F_TSO;
+                               card->dev->features |= NETIF_F_SG;
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2987,8 +2992,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
        netif_keep_dst(card->dev);
-       card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                 PAGE_SIZE;
+       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+                                         PAGE_SIZE);
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/usb/gadget/function/f_fs.c 
b/drivers/usb/gadget/function/f_fs.c
index 0202e5132fa7..876cdbec1307 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1016,7 +1016,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct 
ffs_io_data *io_data)
                else
                        ret = ep->status;
                goto error_mutex;
-       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+       } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
                ret = -ENOMEM;
        } else {
                req->buf      = data;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 58585ec8699e..bd15309ac5f1 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -782,16 +782,6 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        goto out;
-               if (nvq->rx_array)
-                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
-               /* On overrun, truncate and discard */
-               if (unlikely(headcount > UIO_MAXIOV)) {
-                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
-                       err = sock->ops->recvmsg(sock, &msg,
-                                                1, MSG_DONTWAIT | MSG_TRUNC);
-                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
-                       continue;
-               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
@@ -804,6 +794,16 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               if (nvq->rx_array)
+                       msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
+                       err = sock->ops->recvmsg(sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* We don't need to be notified again. */
                iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
                fixup = msg.msg_iter;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6fd311367efc..0345a46b8856 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -563,6 +563,9 @@ static int put_compat_flock64(const struct flock *kfl, 
struct compat_flock64 __u
 {
        struct compat_flock64 fl;
 
+       BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
+       BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
+
        memset(&fl, 0, sizeof(struct compat_flock64));
        copy_flock_fields(&fl, kfl);
        if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
@@ -641,12 +644,8 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned 
int, cmd,
                if (err)
                        break;
                err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
-               if (err)
-                       break;
-               err = fixup_compat_flock(&flock);
-               if (err)
-                       return err;
-               err = put_compat_flock64(&flock, compat_ptr(arg));
+               if (!err)
+                       err = put_compat_flock64(&flock, compat_ptr(arg));
                break;
        case F_SETLK:
        case F_SETLKW:
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dc8b4896b77b..b1b0ca7ccb2b 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -54,8 +54,9 @@ enum {
        NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
        NETIF_F_GSO_SCTP_BIT,           /* ... SCTP fragmentation */
        NETIF_F_GSO_ESP_BIT,            /* ... ESP with TSO */
+       NETIF_F_GSO_UDP_BIT,            /* ... UFO, deprecated except tuntap */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
-               NETIF_F_GSO_ESP_BIT,
+               NETIF_F_GSO_UDP_BIT,
 
        NETIF_F_FCOE_CRC_BIT,           /* FCoE CRC32 */
        NETIF_F_SCTP_CRC_BIT,           /* SCTP checksum offload */
@@ -132,6 +133,7 @@ enum {
 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_GSO_SCTP       __NETIF_F(GSO_SCTP)
 #define NETIF_F_GSO_ESP                __NETIF_F(GSO_ESP)
+#define NETIF_F_GSO_UDP                __NETIF_F(GSO_UDP)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2eaac7d75af4..46bf7cc7d5d5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4101,6 +4101,7 @@ static inline bool net_gso_ok(netdev_features_t features, 
int gso_type)
        BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> 
NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_SCTP    != (NETIF_F_GSO_SCTP >> 
NETIF_F_GSO_SHIFT));
        BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
 
        return (features & feature) == feature;
 }
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index a328e8181e49..e4b257ff881b 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct 
hlist_nulls_node *n,
                first->pprev = &n->next;
 }
 
-/**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-                                       struct hlist_nulls_head *h)
-{
-       struct hlist_nulls_node *i, *last = NULL;
-
-       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-            i = hlist_nulls_next_rcu(i))
-               last = i;
-
-       if (last) {
-               n->next = last->next;
-               n->pprev = &last->next;
-               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-       } else {
-               hlist_nulls_add_head_rcu(n, h);
-       }
-}
-
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d448a4804aea..051e0939ec19 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -569,6 +569,8 @@ enum {
        SKB_GSO_SCTP = 1 << 14,
 
        SKB_GSO_ESP = 1 << 15,
+
+       SKB_GSO_UDP = 1 << 16,
 };
 
 #if BITS_PER_LONG > 32
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 97116379db5f..e87a805cbfef 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -81,6 +81,7 @@ struct usbnet {
 #              define EVENT_RX_KILL    10
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
+#              define EVENT_NO_IP_ALIGN        13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 210034c896e3..f144216febc6 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
 {
-       unsigned short gso_type = 0;
+       unsigned int gso_type = 0;
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        gso_type = SKB_GSO_TCPV6;
                        break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+                       gso_type = SKB_GSO_UDP;
+                       break;
                default:
                        return -EINVAL;
                }
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6eac5cf8f1e6..35e9dd2d18ba 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -727,7 +727,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, 
const struct in6_add
 __be32 ipv6_select_ident(struct net *net,
                         const struct in6_addr *daddr,
                         const struct in6_addr *saddr);
-void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
diff --git a/include/net/sock.h b/include/net/sock.h
index a6b9a8d1a6df..006580155a87 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -683,11 +683,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct 
hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct 
hlist_nulls_head *list)
 {
-       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-           sk->sk_family == AF_INET6)
-               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-       else
-               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct 
hlist_nulls_head *list)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e6d0002a1b0b..6ced69940f5c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -563,7 +563,7 @@ void tcp_push_one(struct sock *, unsigned int mss_now);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
-bool tcp_schedule_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
                             const struct sk_buff *next_skb);
 
@@ -874,12 +874,11 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 }
 #endif
 
-/* TCP_SKB_CB reference means this can not be used from early demux */
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
        if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
-           skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
                return true;
 #endif
        return false;
diff --git a/kernel/audit.c b/kernel/audit.c
index be1c28fd4d57..5b34d3114af4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -85,13 +85,13 @@ static int  audit_initialized;
 #define AUDIT_OFF      0
 #define AUDIT_ON       1
 #define AUDIT_LOCKED   2
-u32            audit_enabled;
-u32            audit_ever_enabled;
+u32            audit_enabled = AUDIT_OFF;
+u32            audit_ever_enabled = !!AUDIT_OFF;
 
 EXPORT_SYMBOL_GPL(audit_enabled);
 
 /* Default state when kernel boots without any parameters. */
-static u32     audit_default;
+static u32     audit_default = AUDIT_OFF;
 
 /* If auditing cannot proceed, audit_failure selects what happens. */
 static u32     audit_failure = AUDIT_FAIL_PRINTK;
@@ -1197,25 +1197,28 @@ static int audit_receive_msg(struct sk_buff *skb, 
struct nlmsghdr *nlh)
                        pid_t auditd_pid;
                        struct pid *req_pid = task_tgid(current);
 
-                       /* sanity check - PID values must match */
-                       if (new_pid != pid_vnr(req_pid))
+                       /* Sanity check - PID values must match. Setting
+                        * pid to 0 is how auditd ends auditing. */
+                       if (new_pid && (new_pid != pid_vnr(req_pid)))
                                return -EINVAL;
 
                        /* test the auditd connection */
                        audit_replace(req_pid);
 
                        auditd_pid = auditd_pid_vnr();
-                       /* only the current auditd can unregister itself */
-                       if ((!new_pid) && (new_pid != auditd_pid)) {
-                               audit_log_config_change("audit_pid", new_pid,
-                                                       auditd_pid, 0);
-                               return -EACCES;
-                       }
-                       /* replacing a healthy auditd is not allowed */
-                       if (auditd_pid && new_pid) {
-                               audit_log_config_change("audit_pid", new_pid,
-                                                       auditd_pid, 0);
-                               return -EEXIST;
+                       if (auditd_pid) {
+                               /* replacing a healthy auditd is not allowed */
+                               if (new_pid) {
+                                       audit_log_config_change("audit_pid",
+                                                       new_pid, auditd_pid, 0);
+                                       return -EEXIST;
+                               }
+                               /* only current auditd can unregister itself */
+                               if (pid_vnr(req_pid) != auditd_pid) {
+                                       audit_log_config_change("audit_pid",
+                                                       new_pid, auditd_pid, 0);
+                                       return -EACCES;
+                               }
                        }
 
                        if (new_pid) {
@@ -1549,8 +1552,6 @@ static int __init audit_init(void)
        register_pernet_subsys(&audit_net_ops);
 
        audit_initialized = AUDIT_INITIALIZED;
-       audit_enabled = audit_default;
-       audit_ever_enabled |= !!audit_default;
 
        kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
        if (IS_ERR(kauditd_task)) {
@@ -1572,6 +1573,8 @@ static int __init audit_enable(char *str)
        audit_default = !!simple_strtol(str, NULL, 0);
        if (!audit_default)
                audit_initialized = AUDIT_DISABLED;
+       audit_enabled = audit_default;
+       audit_ever_enabled = !!audit_enabled;
 
        pr_info("%s\n", audit_default ?
                "enabled (after initialization)" : "disabled (until reboot)");
diff --git a/net/core/dev.c b/net/core/dev.c
index 11596a302a26..27357fc1730b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2735,7 +2735,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 {
        if (tx_path)
-               return skb->ip_summed != CHECKSUM_PARTIAL;
+               return skb->ip_summed != CHECKSUM_PARTIAL &&
+                      skb->ip_summed != CHECKSUM_UNNECESSARY;
 
        return skb->ip_summed == CHECKSUM_NONE;
 }
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index abd07a443219..178bb9833311 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                if (state == DCCP_TIME_WAIT)
                        timeo = DCCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run 
before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e31108e5ef79..b9d9a2b8792c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1221,9 +1221,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
-       bool fixedid = false, gso_partial, encap;
+       bool udpfrag = false, fixedid = false, gso_partial, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
+       unsigned int offset = 0;
        struct iphdr *iph;
        int proto, tot_len;
        int nhoff;
@@ -1258,6 +1259,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
        if (!skb->encapsulation || encap) {
+               udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
                fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
 
                /* fixed ID is invalid if DF bit is not set */
@@ -1277,7 +1279,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        skb = segs;
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
-               if (skb_is_gso(skb)) {
+               if (udpfrag) {
+                       iph->frag_off = htons(offset >> 3);
+                       if (skb->next)
+                               iph->frag_off |= htons(IP_MF);
+                       offset += skb->len - nhoff - ihl;
+                       tot_len = skb->len - nhoff;
+               } else if (skb_is_gso(skb)) {
                        if (!fixedid) {
                                iph->id = htons(id);
                                id += skb_shinfo(skb)->gso_segs;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b6bb3cdfad09..c5447b9f8517 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -592,6 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
        int time;
        int copied;
 
+       tcp_mstamp_refresh(tp);
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
        if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
                return;
@@ -3020,7 +3021,7 @@ void tcp_rearm_rto(struct sock *sk)
 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
 static void tcp_set_xmit_timer(struct sock *sk)
 {
-       if (!tcp_schedule_loss_probe(sk))
+       if (!tcp_schedule_loss_probe(sk, true))
                tcp_rearm_rto(sk);
 }
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5b027c69cbc5..5a5ed4f14678 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1587,6 +1587,34 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_filter);
 
+static void tcp_v4_restore_cb(struct sk_buff *skb)
+{
+       memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
+               sizeof(struct inet_skb_parm));
+}
+
+static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
+                          const struct tcphdr *th)
+{
+       /* This is tricky : We move IPCB at its correct location into 
TCP_SKB_CB()
+        * barrier() makes sure compiler wont play fool^Waliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+               sizeof(struct inet_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff * 4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+       TCP_SKB_CB(skb)->sacked  = 0;
+       TCP_SKB_CB(skb)->has_rxtstamp =
+                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
+}
+
 /*
  *     From tcp_input.c
  */
@@ -1627,24 +1655,6 @@ int tcp_v4_rcv(struct sk_buff *skb)
 
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into 
TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
-               sizeof(struct inet_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff * 4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
-       TCP_SKB_CB(skb)->sacked  = 0;
-       TCP_SKB_CB(skb)->has_rxtstamp =
-                       skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
-
 lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
                               th->dest, sdif, &refcounted);
@@ -1675,14 +1685,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       iph = ip_hdr(skb);
+                       tcp_v4_fill_cb(skb, iph, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
                }
                if (nsk == sk) {
                        reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
@@ -1708,6 +1723,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
+       tcp_v4_fill_cb(skb, iph, th);
 
        skb->dev = NULL;
 
@@ -1738,6 +1754,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
 csum_error:
                __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
@@ -1764,6 +1782,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
                goto discard_it;
        }
 
+       tcp_v4_fill_cb(skb, iph, th);
+
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1780,6 +1800,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
+                       tcp_v4_restore_cb(skb);
                        refcounted = false;
                        goto process;
                }
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 188a6f31356d..420fecbb98fe 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -312,10 +312,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                if (state == TCP_TIME_WAIT)
                        timeo = TCP_TIMEWAIT_LEN;
 
+               /* tw_timer is pinned, so we need to make sure BH are disabled
+                * in following section, otherwise timer handler could run 
before
+                * we complete the initialization.
+                */
+               local_bh_disable();
                inet_twsk_schedule(tw, timeo);
                /* Linkage updates. */
                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
                inet_twsk_put(tw);
+               local_bh_enable();
        } else {
                /* Sorry, if we're out of memory, just CLOSE this
                 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 478909f4694d..cd3d60bb7cc8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2337,7 +2337,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
 
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
-                       tcp_schedule_loss_probe(sk);
+                       tcp_schedule_loss_probe(sk, false);
                is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
                tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
@@ -2345,7 +2345,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
        return !tp->packets_out && tcp_send_head(sk);
 }
 
-bool tcp_schedule_loss_probe(struct sock *sk)
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2384,7 +2384,9 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        }
 
        /* If the RTO formula yields an earlier time, then use that time. */
-       rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
+       rto_delta_us = advancing_rto ?
+                       jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
+                       tcp_rto_delta_us(sk);  /* How far in future is RTO? */
        if (rto_delta_us > 0)
                timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
 
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index e360d55be555..01801b77bd0d 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -187,16 +187,57 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff 
*skb,
 }
 EXPORT_SYMBOL(skb_udp_tunnel_segment);
 
-static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
-                                          netdev_features_t features)
+static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+                                        netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int mss;
+       __wsum csum;
+       struct udphdr *uh;
+       struct iphdr *iph;
 
        if (skb->encapsulation &&
            (skb_shinfo(skb)->gso_type &
-            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
+            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
                segs = skb_udp_tunnel_segment(skb, features, false);
+               goto out;
+       }
+
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               goto out;
+
+       mss = skb_shinfo(skb)->gso_size;
+       if (unlikely(skb->len <= mss))
+               goto out;
+
+       /* Do software UFO. Complete and fill in the UDP checksum as
+        * HW cannot do checksum of UDP packets sent as multiple
+        * IP fragments.
+        */
 
+       uh = udp_hdr(skb);
+       iph = ip_hdr(skb);
+
+       uh->check = 0;
+       csum = skb_checksum(skb, 0, skb->len, 0);
+       uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+       if (uh->check == 0)
+               uh->check = CSUM_MANGLED_0;
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       /* If there is no outer header we can fake a checksum offload
+        * due to the fact that we have already done the checksum in
+        * software prior to segmenting the frame.
+        */
+       if (!skb->encap_hdr_csum)
+               features |= NETIF_F_HW_CSUM;
+
+       /* Fragment the skb. IP headers of the fragments are updated in
+        * inet_gso_segment()
+        */
+       segs = skb_segment(skb, features);
+out:
        return segs;
 }
 
@@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
 
 static const struct net_offload udpv4_offload = {
        .callbacks = {
-               .gso_segment = udp4_tunnel_segment,
+               .gso_segment = udp4_ufo_fragment,
                .gro_receive  = udp4_gro_receive,
                .gro_complete = udp4_gro_complete,
        },
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index a338bbc33cf3..4fe7c90962dd 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -39,7 +39,7 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
  *
  * The network header must be set before calling this.
  */
-void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
 {
        static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
@@ -51,14 +51,14 @@ void ipv6_proxy_select_ident(struct net *net, struct 
sk_buff *skb)
                                   offsetof(struct ipv6hdr, saddr),
                                   sizeof(buf), buf);
        if (!addrs)
-               return;
+               return 0;
 
        net_get_random_once(&ip6_proxy_idents_hashrnd,
                            sizeof(ip6_proxy_idents_hashrnd));
 
        id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
                                 &addrs[1], &addrs[0]);
-       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+       return htonl(id);
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a96d5b385d8f..598efa8cfe25 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -960,7 +960,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct 
rt6_info *rt)
 {
        struct net_device *dev = rt->dst.dev;
 
-       if (rt->rt6i_flags & RTF_LOCAL) {
+       if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
                /* for copies of local routes, dst->dev needs to be the
                 * device if it is a master device, the master device if
                 * device is enslaved, and the loopback as the default
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ac912bb21747..e79854cc5790 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1087,6 +1087,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, 
struct ip_tunnel_parm *p,
        ipip6_tunnel_link(sitn, t);
        t->parms.iph.ttl = p->iph.ttl;
        t->parms.iph.tos = p->iph.tos;
+       t->parms.iph.frag_off = p->iph.frag_off;
        if (t->parms.link != p->link || t->fwmark != fwmark) {
                t->parms.link = p->link;
                t->fwmark = fwmark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 64d94afa427f..32ded300633d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1448,7 +1448,6 @@ static int tcp_v6_rcv(struct sk_buff *skb)
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               tcp_v6_fill_cb(skb, hdr, th);
                if (tcp_v6_inbound_md5_hash(sk, skb)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
@@ -1461,8 +1460,12 @@ static int tcp_v6_rcv(struct sk_buff *skb)
                sock_hold(sk);
                refcounted = true;
                nsk = NULL;
-               if (!tcp_filter(sk, skb))
+               if (!tcp_filter(sk, skb)) {
+                       th = (const struct tcphdr *)skb->data;
+                       hdr = ipv6_hdr(skb);
+                       tcp_v6_fill_cb(skb, hdr, th);
                        nsk = tcp_check_req(sk, skb, req, false);
+               }
                if (!nsk) {
                        reqsk_put(req);
                        goto discard_and_relse;
@@ -1486,8 +1489,6 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       tcp_v6_fill_cb(skb, hdr, th);
-
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
@@ -1495,6 +1496,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
                goto discard_and_relse;
        th = (const struct tcphdr *)skb->data;
        hdr = ipv6_hdr(skb);
+       tcp_v6_fill_cb(skb, hdr, th);
 
        skb->dev = NULL;
 
@@ -1583,7 +1585,6 @@ static int tcp_v6_rcv(struct sk_buff *skb)
                tcp_v6_timewait_ack(sk, skb);
                break;
        case TCP_TW_RST:
-               tcp_v6_restore_cb(skb);
                tcp_v6_send_reset(sk, skb);
                inet_twsk_deschedule_put(inet_twsk(sk));
                goto discard_it;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 455fd4e39333..a0f89ad76f9d 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -17,15 +17,94 @@
 #include <net/ip6_checksum.h>
 #include "ip6_offload.h"
 
-static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb,
-                                          netdev_features_t features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+                                        netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int mss;
+       unsigned int unfrag_ip6hlen, unfrag_len;
+       struct frag_hdr *fptr;
+       u8 *packet_start, *prevhdr;
+       u8 nexthdr;
+       u8 frag_hdr_sz = sizeof(struct frag_hdr);
+       __wsum csum;
+       int tnl_hlen;
+       int err;
+
+       mss = skb_shinfo(skb)->gso_size;
+       if (unlikely(skb->len <= mss))
+               goto out;
 
        if (skb->encapsulation && skb_shinfo(skb)->gso_type &
            (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
                segs = skb_udp_tunnel_segment(skb, features, true);
+       else {
+               const struct ipv6hdr *ipv6h;
+               struct udphdr *uh;
+
+               if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+                       goto out;
+
+               /* Do software UFO. Complete and fill in the UDP checksum as HW 
cannot
+                * do checksum of UDP packets sent as multiple IP fragments.
+                */
+
+               uh = udp_hdr(skb);
+               ipv6h = ipv6_hdr(skb);
+
+               uh->check = 0;
+               csum = skb_checksum(skb, 0, skb->len, 0);
+               uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
+                                         &ipv6h->daddr, csum);
+               if (uh->check == 0)
+                       uh->check = CSUM_MANGLED_0;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* If there is no outer header we can fake a checksum offload
+                * due to the fact that we have already done the checksum in
+                * software prior to segmenting the frame.
+                */
+               if (!skb->encap_hdr_csum)
+                       features |= NETIF_F_HW_CSUM;
+
+               /* Check if there is enough headroom to insert fragment header. 
*/
+               tnl_hlen = skb_tnl_header_len(skb);
+               if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+                       if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+                               goto out;
+               }
+
+               /* Find the unfragmentable header and shift it left by 
frag_hdr_sz
+                * bytes to insert fragment header.
+                */
+               err = ip6_find_1stfragopt(skb, &prevhdr);
+               if (err < 0)
+                       return ERR_PTR(err);
+               unfrag_ip6hlen = err;
+               nexthdr = *prevhdr;
+               *prevhdr = NEXTHDR_FRAGMENT;
+               unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+                            unfrag_ip6hlen + tnl_hlen;
+               packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+               memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+               SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+               skb->mac_header -= frag_hdr_sz;
+               skb->network_header -= frag_hdr_sz;
+
+               fptr = (struct frag_hdr *)(skb_network_header(skb) + 
unfrag_ip6hlen);
+               fptr->nexthdr = nexthdr;
+               fptr->reserved = 0;
+               fptr->identification = 
ipv6_proxy_select_ident(dev_net(skb->dev), skb);
+
+               /* Fragment the skb. ipv6 header and the remaining fields of the
+                * fragment header are updated in ipv6_gso_segment()
+                */
+               segs = skb_segment(skb, features);
+       }
 
+out:
        return segs;
 }
 
@@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
 
 static const struct net_offload udpv6_offload = {
        .callbacks = {
-               .gso_segment    =       udp6_tunnel_segment,
+               .gso_segment    =       udp6_ufo_fragment,
                .gro_receive    =       udp6_gro_receive,
                .gro_complete   =       udp6_gro_complete,
        },
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index af4e76ac88ff..c5fa634e63ca 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1625,60 +1625,35 @@ static struct proto kcm_proto = {
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-                    struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
        struct socket *newsock;
        struct sock *newsk;
-       struct file *newfile;
-       int err, newfd;
+       struct file *file;
 
-       err = -ENFILE;
        newsock = sock_alloc();
        if (!newsock)
-               goto out;
+               return ERR_PTR(-ENFILE);
 
        newsock->type = osock->type;
        newsock->ops = osock->ops;
 
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(0);
-       if (unlikely(newfd < 0)) {
-               err = newfd;
-               goto out_fd_fail;
-       }
-
-       newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-       if (unlikely(IS_ERR(newfile))) {
-               err = PTR_ERR(newfile);
-               goto out_sock_alloc_fail;
-       }
-
        newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
                         &kcm_proto, true);
        if (!newsk) {
-               err = -ENOMEM;
-               goto out_sk_alloc_fail;
+               sock_release(newsock);
+               return ERR_PTR(-ENOMEM);
        }
-
        sock_init_data(newsock, newsk);
        init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-       fd_install(newfd, newfile);
-       *newsockp = newsock;
-       info->fd = newfd;
-
-       return 0;
+       file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
+       if (IS_ERR(file))
+               sock_release(newsock);
 
-out_sk_alloc_fail:
-       fput(newfile);
-out_sock_alloc_fail:
-       put_unused_fd(newfd);
-out_fd_fail:
-       sock_release(newsock);
-out:
-       return err;
+       return file;
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1708,17 +1683,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int 
cmd, unsigned long arg)
        }
        case SIOCKCMCLONE: {
                struct kcm_clone info;
-               struct socket *newsock = NULL;
-
-               err = kcm_clone(sock, &info, &newsock);
-               if (!err) {
-                       if (copy_to_user((void __user *)arg, &info,
-                                        sizeof(info))) {
-                               err = -EFAULT;
-                               sys_close(info.fd);
-                       }
-               }
+               struct file *file;
+
+               info.fd = get_unused_fd_flags(0);
+               if (unlikely(info.fd < 0))
+                       return info.fd;
 
+               file = kcm_clone(sock);
+               if (IS_ERR(file)) {
+                       put_unused_fd(info.fd);
+                       return PTR_ERR(file);
+               }
+               if (copy_to_user((void __user *)arg, &info,
+                                sizeof(info))) {
+                       put_unused_fd(info.fd);
+                       fput(file);
+                       return -EFAULT;
+               }
+               fd_install(info.fd, file);
+               err = 0;
                break;
        }
        default:
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index c3aec6227c91..363dd904733d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -335,6 +335,8 @@ static int queue_gso_packets(struct datapath *dp, struct 
sk_buff *skb,
                             const struct dp_upcall_info *upcall_info,
                                 uint32_t cutlen)
 {
+       unsigned int gso_type = skb_shinfo(skb)->gso_type;
+       struct sw_flow_key later_key;
        struct sk_buff *segs, *nskb;
        int err;
 
@@ -345,9 +347,21 @@ static int queue_gso_packets(struct datapath *dp, struct 
sk_buff *skb,
        if (segs == NULL)
                return -EINVAL;
 
+       if (gso_type & SKB_GSO_UDP) {
+               /* The initial flow key extracted by ovs_flow_key_extract()
+                * in this case is for a first fragment, so we need to
+                * properly mark later fragments.
+                */
+               later_key = *key;
+               later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+       }
+
        /* Queue all of the segments. */
        skb = segs;
        do {
+               if (gso_type & SKB_GSO_UDP && skb != segs)
+                       key = &later_key;
+
                err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
                if (err)
                        break;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 8c94cef25a72..cfb652a4e007 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -584,7 +584,8 @@ static int key_extract(struct sk_buff *skb, struct 
sw_flow_key *key)
                        key->ip.frag = OVS_FRAG_TYPE_LATER;
                        return 0;
                }
-               if (nh->frag_off & htons(IP_MF))
+               if (nh->frag_off & htons(IP_MF) ||
+                       skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
                        key->ip.frag = OVS_FRAG_TYPE_FIRST;
                else
                        key->ip.frag = OVS_FRAG_TYPE_NONE;
@@ -700,6 +701,9 @@ static int key_extract(struct sk_buff *skb, struct 
sw_flow_key *key)
 
                if (key->ip.frag == OVS_FRAG_TYPE_LATER)
                        return 0;
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
                /* Transport layer. */
                if (key->ip.proto == NEXTHDR_TCP) {
                        if (tcphdr_ok(skb)) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2986941164b1..f4a0587b7d5e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1697,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 
type_flags)
                atomic_long_set(&rollover->num, 0);
                atomic_long_set(&rollover->num_huge, 0);
                atomic_long_set(&rollover->num_failed, 0);
-               po->rollover = rollover;
        }
 
        if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
@@ -1755,6 +1754,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 
type_flags)
                if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
                        __dev_remove_pack(&po->prot_hook);
                        po->fanout = match;
+                       po->rollover = rollover;
+                       rollover = NULL;
                        refcount_set(&match->sk_ref, 
refcount_read(&match->sk_ref) + 1);
                        __fanout_link(sk, po);
                        err = 0;
@@ -1768,10 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 
type_flags)
        }
 
 out:
-       if (err && rollover) {
-               kfree_rcu(rollover, rcu);
-               po->rollover = NULL;
-       }
+       kfree(rollover);
        mutex_unlock(&fanout_mutex);
        return err;
 }
@@ -1795,11 +1793,6 @@ static struct packet_fanout *fanout_release(struct sock 
*sk)
                        list_del(&f->list);
                else
                        f = NULL;
-
-               if (po->rollover) {
-                       kfree_rcu(po->rollover, rcu);
-                       po->rollover = NULL;
-               }
        }
        mutex_unlock(&fanout_mutex);
 
@@ -3039,6 +3032,7 @@ static int packet_release(struct socket *sock)
        synchronize_net();
 
        if (f) {
+               kfree(po->rollover);
                fanout_release_data(f);
                kfree(f);
        }
@@ -3107,6 +3101,10 @@ static int packet_do_bind(struct sock *sk, const char 
*name, int ifindex,
        if (need_rehook) {
                if (po->running) {
                        rcu_read_unlock();
+                       /* prevents packet_notifier() from calling
+                        * register_prot_hook()
+                        */
+                       po->num = 0;
                        __unregister_prot_hook(sk, true);
                        rcu_read_lock();
                        dev_curr = po->prot_hook.dev;
@@ -3115,6 +3113,7 @@ static int packet_do_bind(struct sock *sk, const char 
*name, int ifindex,
                                                                 dev->ifindex);
                }
 
+               BUG_ON(po->running);
                po->num = proto;
                po->prot_hook.type = proto;
 
@@ -3853,7 +3852,6 @@ static int packet_getsockopt(struct socket *sock, int 
level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
-       struct packet_rollover *rollover;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3932,18 +3930,13 @@ static int packet_getsockopt(struct socket *sock, int 
level, int optname,
                       0);
                break;
        case PACKET_ROLLOVER_STATS:
-               rcu_read_lock();
-               rollover = rcu_dereference(po->rollover);
-               if (rollover) {
-                       rstats.tp_all = atomic_long_read(&rollover->num);
-                       rstats.tp_huge = atomic_long_read(&rollover->num_huge);
-                       rstats.tp_failed = 
atomic_long_read(&rollover->num_failed);
-                       data = &rstats;
-                       lv = sizeof(rstats);
-               }
-               rcu_read_unlock();
-               if (!rollover)
+               if (!po->rollover)
                        return -EINVAL;
+               rstats.tp_all = atomic_long_read(&po->rollover->num);
+               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+               data = &rstats;
+               lv = sizeof(rstats);
                break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 562fbc155006..a1d2b2319ae9 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -95,7 +95,6 @@ struct packet_fanout {
 
 struct packet_rollover {
        int                     sock;
-       struct rcu_head         rcu;
        atomic_long_t           num;
        atomic_long_t           num_huge;
        atomic_long_t           num_failed;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8886f15abe90..bc2f1e0977d6 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -183,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct 
rds_get_mr_args *args,
        long i;
        int ret;
 
-       if (rs->rs_bound_addr == 0) {
+       if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
                ret = -ENOTCONN; /* XXX not a great errno */
                goto out;
        }
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 1c40caadcff9..d836f998117b 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned 
int ihl,
        const struct iphdr *iph;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
@@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned 
int ihl,
        const struct ipv6hdr *ip6h;
        u16 ul;
 
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+               return 1;
+
        /*
         * Support both UDP and UDPLITE checksum algorithms, Don't use
         * udph->len to get the real length without any protocol check,
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 990eb4d91d54..3a499530f321 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -246,11 +246,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
 {
-       tcf_exts_destroy(&prog->exts);
-       tcf_exts_put_net(&prog->exts);
-
        if (cls_bpf_is_ebpf(prog))
                bpf_prog_put(prog->filter);
        else
@@ -258,6 +255,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog 
*prog)
 
        kfree(prog->bpf_name);
        kfree(prog->bpf_ops);
+}
+
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+{
+       tcf_exts_destroy(&prog->exts);
+       tcf_exts_put_net(&prog->exts);
+
+       cls_bpf_free_parms(prog);
        kfree(prog);
 }
 
@@ -509,10 +514,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff 
*in_skb,
                goto errout;
 
        ret = cls_bpf_offload(tp, prog, oldprog);
-       if (ret) {
-               __cls_bpf_delete_prog(prog);
-               return ret;
-       }
+       if (ret)
+               goto errout_parms;
 
        if (!tc_in_hw(prog->gen_flags))
                prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -529,6 +532,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff 
*in_skb,
        *arg = prog;
        return 0;
 
+errout_parms:
+       cls_bpf_free_parms(prog);
 errout:
        tcf_exts_destroy(&prog->exts);
        kfree(prog);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index dcef97fa8047..aeffa320429d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1157,9 +1157,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr 
*opt)
        if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
                return -EINVAL;
 
+       err = tcf_block_get(&q->link.block, &q->link.filter_list);
+       if (err)
+               goto put_rtab;
+
        err = qdisc_class_hash_init(&q->clhash);
        if (err < 0)
-               goto put_rtab;
+               goto put_block;
 
        q->link.sibling = &q->link;
        q->link.common.classid = sch->handle;
@@ -1193,6 +1197,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        cbq_addprio(q, &q->link);
        return 0;
 
+put_block:
+       tcf_block_put(q->link.block);
+
 put_rtab:
        qdisc_put_rtab(q->link.R_tab);
        return err;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 14c28fbfe6b8..d6163f7aefb1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -187,13 +187,13 @@ static void sctp_for_each_tx_datachunk(struct 
sctp_association *asoc,
                list_for_each_entry(chunk, &t->transmitted, transmitted_list)
                        cb(chunk);
 
-       list_for_each_entry(chunk, &q->retransmit, list)
+       list_for_each_entry(chunk, &q->retransmit, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->sacked, list)
+       list_for_each_entry(chunk, &q->sacked, transmitted_list)
                cb(chunk);
 
-       list_for_each_entry(chunk, &q->abandoned, list)
+       list_for_each_entry(chunk, &q->abandoned, transmitted_list)
                cb(chunk);
 
        list_for_each_entry(chunk, &q->out_chunk_list, list)
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 3cd6402e812c..f4c1b18c5fb0 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -313,6 +313,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
        newcon->usr_data = s->tipc_conn_new(newcon->conid);
        if (!newcon->usr_data) {
                sock_release(newsock);
+               conn_put(newcon);
                return -ENOMEM;
        }
 
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ecca64fc6a6f..3deabcab4882 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff 
*skb)
                        goto rcu_out;
        }
 
-       tipc_rcv(sock_net(sk), skb, b);
-       rcu_read_unlock();
-       return 0;
-
 rcu_out:
        rcu_read_unlock();
 out:
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 3108e07526af..59ce2fb49821 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -393,6 +393,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
        int ret = 0;
        u32 *intids;
        int nr_irqs, i;
+       u8 pendmask;
 
        nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
        if (nr_irqs < 0)
@@ -400,7 +401,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < nr_irqs; i++) {
                int byte_offset, bit_nr;
-               u8 pendmask;
 
                byte_offset = intids[i] / BITS_PER_BYTE;
                bit_nr = intids[i] % BITS_PER_BYTE;

Reply via email to