commit:     1e9d12596e3b32b5f8db872785dba97e0f54d942
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun  4 11:08:46 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun  4 11:08:46 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e9d1259

Linux patch 5.1.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1006_linux-5.1.7.patch | 1551 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1555 insertions(+)

diff --git a/0000_README b/0000_README
index 7713f53..7c0827d 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-5.1.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.1.6
 
+Patch:  1006_linux-5.1.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.1.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.1.7.patch b/1006_linux-5.1.7.patch
new file mode 100644
index 0000000..6a91998
--- /dev/null
+++ b/1006_linux-5.1.7.patch
@@ -0,0 +1,1551 @@
+diff --git a/Makefile b/Makefile
+index d8bdd2bb55dc..299578ce385a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 1
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+ 
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index dd8b8716467a..2d1a8cd35509 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -1,22 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+  * GHASH routines supporting VMX instructions on the Power 8
+  *
+- * Copyright (C) 2015 International Business Machines Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 only.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * Copyright (C) 2015, 2019 International Business Machines Inc.
+  *
+  * Author: Marcelo Henrique Cerri <mhce...@br.ibm.com>
++ *
++ * Extended by Daniel Axtens <d...@axtens.net> to replace the fallback
++ * mechanism. The new approach is based on arm64 code, which is:
++ *   Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheu...@linaro.org>
+  */
+ 
+ #include <linux/types.h>
+@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
+                 const u8 *in, size_t len);
+ 
+ struct p8_ghash_ctx {
++      /* key used by vector asm */
+       u128 htable[16];
+-      struct crypto_shash *fallback;
++      /* key used by software fallback */
++      be128 key;
+ };
+ 
+ struct p8_ghash_desc_ctx {
+       u64 shash[2];
+       u8 buffer[GHASH_DIGEST_SIZE];
+       int bytes;
+-      struct shash_desc fallback_desc;
+ };
+ 
+-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+-{
+-      const char *alg = "ghash-generic";
+-      struct crypto_shash *fallback;
+-      struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+-      if (IS_ERR(fallback)) {
+-              printk(KERN_ERR
+-                     "Failed to allocate transformation for '%s': %ld\n",
+-                     alg, PTR_ERR(fallback));
+-              return PTR_ERR(fallback);
+-      }
+-
+-      crypto_shash_set_flags(fallback,
+-                             crypto_shash_get_flags((struct crypto_shash
+-                                                     *) tfm));
+-
+-      /* Check if the descsize defined in the algorithm is still enough. */
+-      if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+-          + crypto_shash_descsize(fallback)) {
+-              printk(KERN_ERR
+-                     "Desc size of the fallback implementation (%s) does not 
match the expected value: %lu vs %u\n",
+-                     alg,
+-                     shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+-                     crypto_shash_descsize(fallback));
+-              return -EINVAL;
+-      }
+-      ctx->fallback = fallback;
+-
+-      return 0;
+-}
+-
+-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+-{
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+-      if (ctx->fallback) {
+-              crypto_free_shash(ctx->fallback);
+-              ctx->fallback = NULL;
+-      }
+-}
+-
+ static int p8_ghash_init(struct shash_desc *desc)
+ {
+-      struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ 
+       dctx->bytes = 0;
+       memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+-      dctx->fallback_desc.tfm = ctx->fallback;
+-      dctx->fallback_desc.flags = desc->flags;
+-      return crypto_shash_init(&dctx->fallback_desc);
++      return 0;
+ }
+ 
+ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const 
u8 *key,
+       disable_kernel_vsx();
+       pagefault_enable();
+       preempt_enable();
+-      return crypto_shash_setkey(ctx->fallback, key, keylen);
++
++      memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
++
++      return 0;
++}
++
++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
++                               struct p8_ghash_desc_ctx *dctx)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              dctx->buffer, GHASH_DIGEST_SIZE);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
++              gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++      }
++}
++
++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
++                                struct p8_ghash_desc_ctx *dctx,
++                                const u8 *src, unsigned int srclen)
++{
++      if (!IN_INTERRUPT) {
++              preempt_disable();
++              pagefault_disable();
++              enable_kernel_vsx();
++              gcm_ghash_p8(dctx->shash, ctx->htable,
++                              src, srclen);
++              disable_kernel_vsx();
++              pagefault_enable();
++              preempt_enable();
++      } else {
++              while (srclen >= GHASH_BLOCK_SIZE) {
++                      crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
++                      gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++                      srclen -= GHASH_BLOCK_SIZE;
++                      src += GHASH_BLOCK_SIZE;
++              }
++      }
+ }
+ 
+ static int p8_ghash_update(struct shash_desc *desc,
+@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ 
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_update(&dctx->fallback_desc, src,
+-                                         srclen);
+-      } else {
+-              if (dctx->bytes) {
+-                      if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+-                              memcpy(dctx->buffer + dctx->bytes, src,
+-                                     srclen);
+-                              dctx->bytes += srclen;
+-                              return 0;
+-                      }
++      if (dctx->bytes) {
++              if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+                       memcpy(dctx->buffer + dctx->bytes, src,
+-                             GHASH_DIGEST_SIZE - dctx->bytes);
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += GHASH_DIGEST_SIZE - dctx->bytes;
+-                      srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+-                      dctx->bytes = 0;
+-              }
+-              len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+-              if (len) {
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      src += len;
+-                      srclen -= len;
+-              }
+-              if (srclen) {
+-                      memcpy(dctx->buffer, src, srclen);
+-                      dctx->bytes = srclen;
++                              srclen);
++                      dctx->bytes += srclen;
++                      return 0;
+               }
+-              return 0;
++              memcpy(dctx->buffer + dctx->bytes, src,
++                      GHASH_DIGEST_SIZE - dctx->bytes);
++
++              __ghash_block(ctx, dctx);
++
++              src += GHASH_DIGEST_SIZE - dctx->bytes;
++              srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
++              dctx->bytes = 0;
++      }
++      len = srclen & ~(GHASH_DIGEST_SIZE - 1);
++      if (len) {
++              __ghash_blocks(ctx, dctx, src, len);
++              src += len;
++              srclen -= len;
+       }
++      if (srclen) {
++              memcpy(dctx->buffer, src, srclen);
++              dctx->bytes = srclen;
++      }
++      return 0;
+ }
+ 
+ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 
*out)
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ 
+-      if (IN_INTERRUPT) {
+-              return crypto_shash_final(&dctx->fallback_desc, out);
+-      } else {
+-              if (dctx->bytes) {
+-                      for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+-                              dctx->buffer[i] = 0;
+-                      preempt_disable();
+-                      pagefault_disable();
+-                      enable_kernel_vsx();
+-                      gcm_ghash_p8(dctx->shash, ctx->htable,
+-                                   dctx->buffer, GHASH_DIGEST_SIZE);
+-                      disable_kernel_vsx();
+-                      pagefault_enable();
+-                      preempt_enable();
+-                      dctx->bytes = 0;
+-              }
+-              memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+-              return 0;
++      if (dctx->bytes) {
++              for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
++                      dctx->buffer[i] = 0;
++              __ghash_block(ctx, dctx);
++              dctx->bytes = 0;
+       }
++      memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
++      return 0;
+ }
+ 
+ struct shash_alg p8_ghash_alg = {
+@@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = {
+                .cra_name = "ghash",
+                .cra_driver_name = "p8_ghash",
+                .cra_priority = 1000,
+-               .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+                .cra_blocksize = GHASH_BLOCK_SIZE,
+                .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+                .cra_module = THIS_MODULE,
+-               .cra_init = p8_ghash_init_tfm,
+-               .cra_exit = p8_ghash_exit_tfm,
+       },
+ };
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index ee610721098e..f96efa363d34 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3122,13 +3122,18 @@ static int bond_slave_netdev_event(unsigned long event,
+       case NETDEV_CHANGE:
+               /* For 802.3ad mode only:
+                * Getting invalid Speed/Duplex values here will put slave
+-               * in weird state. So mark it as link-fail for the time
+-               * being and let link-monitoring (miimon) set it right when
+-               * correct speeds/duplex are available.
++               * in weird state. Mark it as link-fail if the link was
++               * previously up or link-down if it hasn't yet come up, and
++               * let link-monitoring (miimon) set it right when correct
++               * speeds/duplex are available.
+                */
+               if (bond_update_speed_duplex(slave) &&
+-                  BOND_MODE(bond) == BOND_MODE_8023AD)
+-                      slave->link = BOND_LINK_FAIL;
++                  BOND_MODE(bond) == BOND_MODE_8023AD) {
++                      if (slave->last_link_up)
++                              slave->link = BOND_LINK_FAIL;
++                      else
++                              slave->link = BOND_LINK_DOWN;
++              }
+ 
+               if (BOND_MODE(bond) == BOND_MODE_8023AD)
+                       bond_3ad_adapter_speed_duplex_changed(slave);
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index f4e2db44ad91..720f1dde2c2d 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -910,7 +910,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct 
mv88e6xxx_chip *chip,
+                       err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
+                       if (err)
+                               return U64_MAX;
+-                      high = reg;
++                      low |= ((u32)reg) << 16;
+               }
+               break;
+       case STATS_TYPE_BANK1:
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 52ade133b57c..30cafe4cdb6e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1640,6 +1640,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_cp_ring_info *cpr,
+               skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (!skb) {
++                      if (agg_bufs)
++                              bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+                       rc = -ENOMEM;
+                       goto next_rx;
+               }
+@@ -6340,7 +6342,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
+       if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+               return 0;
+ 
+-      if (bp->flags & BNXT_FLAG_ROCE_CAP) {
++      if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
+               pg_lvl = 2;
+               extra_qps = 65536;
+               extra_srqs = 8192;
+@@ -7512,22 +7514,23 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
+       bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ }
+ 
+-int bnxt_reserve_rings(struct bnxt *bp)
++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
+ {
+       int tcs = netdev_get_num_tc(bp->dev);
+-      bool reinit_irq = false;
++      bool irq_cleared = false;
+       int rc;
+ 
+       if (!bnxt_need_reserve_rings(bp))
+               return 0;
+ 
+-      if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
++      if (irq_re_init && BNXT_NEW_RM(bp) &&
++          bnxt_get_num_msix(bp) != bp->total_irqs) {
+               bnxt_ulp_irq_stop(bp);
+               bnxt_clear_int_mode(bp);
+-              reinit_irq = true;
++              irq_cleared = true;
+       }
+       rc = __bnxt_reserve_rings(bp);
+-      if (reinit_irq) {
++      if (irq_cleared) {
+               if (!rc)
+                       rc = bnxt_init_int_mode(bp);
+               bnxt_ulp_irq_restart(bp, rc);
+@@ -8426,7 +8429,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool 
irq_re_init, bool link_re_init)
+                       return rc;
+               }
+       }
+-      rc = bnxt_reserve_rings(bp);
++      rc = bnxt_reserve_rings(bp, irq_re_init);
+       if (rc)
+               return rc;
+       if ((bp->flags & BNXT_FLAG_RFS) &&
+@@ -10337,7 +10340,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool 
sh)
+ 
+       if (sh)
+               bp->flags |= BNXT_FLAG_SHARED_RINGS;
+-      dflt_rings = netif_get_num_default_rss_queues();
++      dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
+       /* Reduce default rings on multi-port cards so that total default
+        * rings do not exceed CPU count.
+        */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index cf81ace7a6e6..0fb93280ad4e 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -20,6 +20,7 @@
+ 
+ #include <linux/interrupt.h>
+ #include <linux/rhashtable.h>
++#include <linux/crash_dump.h>
+ #include <net/devlink.h>
+ #include <net/dst_metadata.h>
+ #include <net/xdp.h>
+@@ -1367,7 +1368,8 @@ struct bnxt {
+ #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+ #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) &&        \
+-                               !(bp->flags & BNXT_FLAG_CHIP_P5))
++                               !(bp->flags & BNXT_FLAG_CHIP_P5) &&    \
++                               !is_kdump_kernel())
+ 
+ /* Chip class phase 5 */
+ #define BNXT_CHIP_P5(bp)                      \
+@@ -1778,7 +1780,7 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt 
*bp);
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
+ int bnxt_get_avail_msix(struct bnxt *bp, int num);
+-int bnxt_reserve_rings(struct bnxt *bp);
++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
+ void bnxt_tx_disable(struct bnxt *bp);
+ void bnxt_tx_enable(struct bnxt *bp);
+ int bnxt_hwrm_set_pause(struct bnxt *);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index adabbe94a259..e1460e391952 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -788,7 +788,7 @@ static int bnxt_set_channels(struct net_device *dev,
+                        */
+               }
+       } else {
+-              rc = bnxt_reserve_rings(bp);
++              rc = bnxt_reserve_rings(bp, true);
+       }
+ 
+       return rc;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index cf475873ce81..bfa342a98d08 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -147,7 +147,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, 
int ulp_id,
+                       bnxt_close_nic(bp, true, false);
+                       rc = bnxt_open_nic(bp, true, false);
+               } else {
+-                      rc = bnxt_reserve_rings(bp);
++                      rc = bnxt_reserve_rings(bp, true);
+               }
+       }
+       if (rc) {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c 
b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 82a8d1970060..35462bccd91a 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -197,6 +197,9 @@ static void cxgb4_process_flow_match(struct net_device 
*dev,
+               fs->val.ivlan = vlan_tci;
+               fs->mask.ivlan = vlan_tci_mask;
+ 
++              fs->val.ivlan_vld = 1;
++              fs->mask.ivlan_vld = 1;
++
+               /* Chelsio adapters use ivlan_vld bit to match vlan packets
+                * as 802.1Q. Also, when vlan tag is present in packets,
+                * ethtype match is used then to match on ethtype of inner
+@@ -207,8 +210,6 @@ static void cxgb4_process_flow_match(struct net_device 
*dev,
+                * ethtype value with ethtype of inner header.
+                */
+               if (fs->val.ethtype == ETH_P_8021Q) {
+-                      fs->val.ivlan_vld = 1;
+-                      fs->mask.ivlan_vld = 1;
+                       fs->val.ethtype = 0;
+                       fs->mask.ethtype = 0;
+               }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c 
b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index a3544041ad32..8d63eed628d7 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -7206,10 +7206,21 @@ int t4_fixup_host_params(struct adapter *adap, 
unsigned int page_size,
+                        unsigned int cache_line_size)
+ {
+       unsigned int page_shift = fls(page_size) - 1;
++      unsigned int sge_hps = page_shift - 10;
+       unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+       unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+       unsigned int fl_align_log = fls(fl_align) - 1;
+ 
++      t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
++                   HOSTPAGESIZEPF0_V(sge_hps) |
++                   HOSTPAGESIZEPF1_V(sge_hps) |
++                   HOSTPAGESIZEPF2_V(sge_hps) |
++                   HOSTPAGESIZEPF3_V(sge_hps) |
++                   HOSTPAGESIZEPF4_V(sge_hps) |
++                   HOSTPAGESIZEPF5_V(sge_hps) |
++                   HOSTPAGESIZEPF6_V(sge_hps) |
++                   HOSTPAGESIZEPF7_V(sge_hps));
++
+       if (is_t4(adap->params.chip)) {
+               t4_set_reg_field(adap, SGE_CONTROL_A,
+                                INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index a96ad20ee484..878ccce1dfcd 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3556,7 +3556,7 @@ failed_init:
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
+ failed_reset:
+-      pm_runtime_put(&pdev->dev);
++      pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+ failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
+index c0a3718b2e2a..c7f4b72b3c07 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4674,7 +4674,7 @@ static int mvneta_probe(struct platform_device *pdev)
+       err = register_netdev(dev);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register\n");
+-              goto err_free_stats;
++              goto err_netdev;
+       }
+ 
+       netdev_info(dev, "Using %s mac address %pM\n", mac_from,
+@@ -4685,14 +4685,12 @@ static int mvneta_probe(struct platform_device *pdev)
+       return 0;
+ 
+ err_netdev:
+-      unregister_netdev(dev);
+       if (pp->bm_priv) {
+               mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+               mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
+                                      1 << pp->id);
+               mvneta_bm_put(pp->bm_priv);
+       }
+-err_free_stats:
+       free_percpu(pp->stats);
+ err_free_ports:
+       free_percpu(pp->ports);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 
b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 25fbed2b8d94..f4f076d7090e 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -1455,7 +1455,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct 
mvpp2_port *port)
+ /* Set defaults to the MVPP2 port */
+ static void mvpp2_defaults_set(struct mvpp2_port *port)
+ {
+-      int tx_port_num, val, queue, ptxq, lrxq;
++      int tx_port_num, val, queue, lrxq;
+ 
+       if (port->priv->hw_version == MVPP21) {
+               /* Update TX FIFO MIN Threshold */
+@@ -1476,11 +1476,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
+ 
+       /* Close bandwidth for all queues */
+-      for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+-              ptxq = mvpp2_txq_phys(port->id, queue);
++      for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
+               mvpp2_write(port->priv,
+-                          MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+-      }
++                          MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
+ 
+       /* Set refill period to 1 usec, refill tokens
+        * and bucket size to maximum
+@@ -2336,7 +2334,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
+       txq->descs_dma         = 0;
+ 
+       /* Set minimum bandwidth for disabled TXQs */
+-      mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
++      mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
+ 
+       /* Set Tx descriptors queue starting address and size */
+       thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 46157e2a1e5a..1e2688e2ed47 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3750,6 +3750,12 @@ static netdev_features_t mlx5e_fix_features(struct 
net_device *netdev,
+                       netdev_warn(netdev, "Disabling LRO, not supported in 
legacy RQ\n");
+       }
+ 
++      if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
++              features &= ~NETIF_F_RXHASH;
++              if (netdev->features & NETIF_F_RXHASH)
++                      netdev_warn(netdev, "Disabling rxhash, not supported 
when CQE compress is active\n");
++      }
++
+       mutex_unlock(&priv->state_lock);
+ 
+       return features;
+@@ -3875,6 +3881,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct 
ifreq *ifr)
+       memcpy(&priv->tstamp, &config, sizeof(config));
+       mutex_unlock(&priv->state_lock);
+ 
++      /* might need to fix some features */
++      netdev_update_features(priv->netdev);
++
+       return copy_to_user(ifr->ifr_data, &config,
+                           sizeof(config)) ? -EFAULT : 0;
+ }
+@@ -4734,6 +4743,10 @@ static void mlx5e_build_nic_netdev(struct net_device 
*netdev)
+       if (!priv->channels.params.scatter_fcs_en)
+               netdev->features  &= ~NETIF_F_RXFCS;
+ 
++      /* prefere CQE compression over rxhash */
++      if (MLX5E_GET_PFLAG(&priv->channels.params, 
MLX5E_PFLAG_RX_CQE_COMPRESS))
++              netdev->features &= ~NETIF_F_RXHASH;
++
+ #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, 
flow_table_properties_nic_receive.f)
+       if (FT_CAP(flow_modify_en) &&
+           FT_CAP(modify_root) &&
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 581cc145795d..e29e5beb239d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2286,7 +2286,7 @@ static struct mlx5_flow_root_namespace
+               cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
+ 
+       /* Create the root namespace */
+-      root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
++      root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
+       if (!root_ns)
+               return NULL;
+ 
+@@ -2429,6 +2429,7 @@ static void cleanup_egress_acls_root_ns(struct 
mlx5_core_dev *dev)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ 
+       kfree(steering->esw_egress_root_ns);
++      steering->esw_egress_root_ns = NULL;
+ }
+ 
+ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+@@ -2443,6 +2444,7 @@ static void cleanup_ingress_acls_root_ns(struct 
mlx5_core_dev *dev)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ 
+       kfree(steering->esw_ingress_root_ns);
++      steering->esw_ingress_root_ns = NULL;
+ }
+ 
+ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+@@ -2611,6 +2613,7 @@ cleanup_root_ns:
+       for (i--; i >= 0; i--)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+       kfree(steering->esw_egress_root_ns);
++      steering->esw_egress_root_ns = NULL;
+       return err;
+ }
+ 
+@@ -2638,6 +2641,7 @@ cleanup_root_ns:
+       for (i--; i >= 0; i--)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+       kfree(steering->esw_ingress_root_ns);
++      steering->esw_ingress_root_ns = NULL;
+       return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+index c1a9cc9a3292..4c98950380d5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+@@ -1171,13 +1171,12 @@ mlxsw_sp_acl_erp_delta_fill(const struct 
mlxsw_sp_acl_erp_key *parent_key,
+                       return -EINVAL;
+       }
+       if (si == -1) {
+-              /* The masks are the same, this cannot happen.
+-               * That means the caller is broken.
++              /* The masks are the same, this can happen in case eRPs with
++               * the same mask were created in both A-TCAM and C-TCAM.
++               * The only possible condition under which this can happen
++               * is identical rule insertion. Delta is not possible here.
+                */
+-              WARN_ON(1);
+-              *delta_start = 0;
+-              *delta_mask = 0;
+-              return 0;
++              return -EINVAL;
+       }
+       pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+       mask = (unsigned char) key->mask[__MASK_IDX(si)];
+diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
+index ed651dde6ef9..6d176be51a6b 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -6914,6 +6914,8 @@ static int rtl8169_resume(struct device *device)
+       struct net_device *dev = dev_get_drvdata(device);
+       struct rtl8169_private *tp = netdev_priv(dev);
+ 
++      rtl_rar_set(tp, dev->dev_addr);
++
+       clk_prepare_enable(tp->clk);
+ 
+       if (netif_running(dev))
+@@ -6947,6 +6949,7 @@ static int rtl8169_runtime_resume(struct device *device)
+ {
+       struct net_device *dev = dev_get_drvdata(device);
+       struct rtl8169_private *tp = netdev_priv(dev);
++
+       rtl_rar_set(tp, dev->dev_addr);
+ 
+       if (!tp->TxDescArray)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 3c749c327cbd..e09522c5509a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -460,7 +460,7 @@ stmmac_get_pauseparam(struct net_device *netdev,
+       } else {
+               if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                                      netdev->phydev->supported) ||
+-                  linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
++                  !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                                     netdev->phydev->supported))
+                       return;
+       }
+@@ -491,7 +491,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
+       } else {
+               if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                                      phy->supported) ||
+-                  linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
++                  !linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                                     phy->supported))
+                       return -EOPNOTSUPP;
+       }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 48712437d0da..3c409862c52e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2208,6 +2208,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv 
*priv)
+       if (priv->plat->axi)
+               stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+ 
++      /* DMA CSR Channel configuration */
++      for (chan = 0; chan < dma_csr_ch; chan++)
++              stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
++
+       /* DMA RX Channel Configuration */
+       for (chan = 0; chan < rx_channels_count; chan++) {
+               rx_q = &priv->rx_queue[chan];
+@@ -2233,10 +2237,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv 
*priv)
+                                      tx_q->tx_tail_addr, chan);
+       }
+ 
+-      /* DMA CSR Channel configuration */
+-      for (chan = 0; chan < dma_csr_ch; chan++)
+-              stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+-
+       return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+index bdd351597b55..093a223fe408 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+@@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus)
+                       of_property_read_u32_array(np,
+                               "snps,reset-delays-us", data->delays, 3);
+ 
+-                      if (gpio_request(data->reset_gpio, "mdio-reset"))
++                      if (devm_gpio_request(priv->device, data->reset_gpio,
++                                            "mdio-reset"))
+                               return 0;
+               }
+ 
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
+index 100b401b1f4a..754cde873dde 100644
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -31,6 +31,9 @@
+ #define MV_PHY_ALASKA_NBT_QUIRK_REV   (MARVELL_PHY_ID_88X3310 | 0xa)
+ 
+ enum {
++      MV_PMA_BOOT             = 0xc050,
++      MV_PMA_BOOT_FATAL       = BIT(0),
++
+       MV_PCS_BASE_T           = 0x0000,
+       MV_PCS_BASE_R           = 0x1000,
+       MV_PCS_1000BASEX        = 0x2000,
+@@ -211,6 +214,16 @@ static int mv3310_probe(struct phy_device *phydev)
+           (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
+               return -ENODEV;
+ 
++      ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT);
++      if (ret < 0)
++              return ret;
++
++      if (ret & MV_PMA_BOOT_FATAL) {
++              dev_warn(&phydev->mdio.dev,
++                       "PHY failed to boot firmware, status=%04x\n", ret);
++              return -ENODEV;
++      }
++
+       priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 504282af27e5..921cc0571bd0 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -506,6 +506,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, 
gfp_t flags)
+ 
+       if (netif_running (dev->net) &&
+           netif_device_present (dev->net) &&
++          test_bit(EVENT_DEV_OPEN, &dev->flags) &&
+           !test_bit (EVENT_RX_HALT, &dev->flags) &&
+           !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
+               switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
+@@ -1431,6 +1432,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+               spin_unlock_irqrestore(&dev->txq.lock, flags);
+               goto drop;
+       }
++      if (netif_queue_stopped(net)) {
++              usb_autopm_put_interface_async(dev->intf);
++              spin_unlock_irqrestore(&dev->txq.lock, flags);
++              goto drop;
++      }
+ 
+ #ifdef CONFIG_PM
+       /* if this triggers the device is still a sleep */
+diff --git a/include/linux/siphash.h b/include/linux/siphash.h
+index fa7a6b9cedbf..bf21591a9e5e 100644
+--- a/include/linux/siphash.h
++++ b/include/linux/siphash.h
+@@ -21,6 +21,11 @@ typedef struct {
+       u64 key[2];
+ } siphash_key_t;
+ 
++static inline bool siphash_key_is_zero(const siphash_key_t *key)
++{
++      return !(key->key[0] | key->key[1]);
++}
++
+ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t 
*key);
+diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
+index 104a6669e344..7698460a3dd1 100644
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -9,6 +9,7 @@
+ #include <linux/uidgid.h>
+ #include <net/inet_frag.h>
+ #include <linux/rcupdate.h>
++#include <linux/siphash.h>
+ 
+ struct tcpm_hash_bucket;
+ struct ctl_table_header;
+@@ -217,5 +218,6 @@ struct netns_ipv4 {
+       unsigned int    ipmr_seq;       /* protected by rtnl_mutex */
+ 
+       atomic_t        rt_genid;
++      siphash_key_t   ip_id_key;
+ };
+ #endif
+diff --git a/include/uapi/linux/tipc_config.h 
b/include/uapi/linux/tipc_config.h
+index 4b2c93b1934c..4955e1a9f1bc 100644
+--- a/include/uapi/linux/tipc_config.h
++++ b/include/uapi/linux/tipc_config.h
+@@ -307,8 +307,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void 
*data, __u16 len)
+       tlv_ptr = (struct tlv_desc *)tlv;
+       tlv_ptr->tlv_type = htons(type);
+       tlv_ptr->tlv_len  = htons(tlv_len);
+-      if (len && data)
+-              memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
++      if (len && data) {
++              memcpy(TLV_DATA(tlv_ptr), data, len);
++              memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
++      }
+       return TLV_SPACE(len);
+ }
+ 
+@@ -405,8 +407,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 
flags,
+       tcm_hdr->tcm_len   = htonl(msg_len);
+       tcm_hdr->tcm_type  = htons(cmd);
+       tcm_hdr->tcm_flags = htons(flags);
+-      if (data_len && data)
++      if (data_len && data) {
+               memcpy(TCM_DATA(msg), data, data_len);
++              memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - 
msg_len);
++      }
+       return TCM_SPACE(data_len);
+ }
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 255f99cb7c48..c6b2f6db0a9b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5804,7 +5804,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct 
*napi)
+       skb_reset_mac_header(skb);
+       skb_gro_reset_offset(skb);
+ 
+-      eth = skb_gro_header_fast(skb, 0);
+       if (unlikely(skb_gro_header_hard(skb, hlen))) {
+               eth = skb_gro_header_slow(skb, hlen, 0);
+               if (unlikely(!eth)) {
+@@ -5814,6 +5813,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct 
*napi)
+                       return NULL;
+               }
+       } else {
++              eth = (const struct ethhdr *)skb->data;
+               gro_pull_from_frag0(skb, hlen);
+               NAPI_GRO_CB(skb)->frag0 += hlen;
+               NAPI_GRO_CB(skb)->frag0_len -= hlen;
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 36ed619faf36..014dcd63b451 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -3008,11 +3008,12 @@ ethtool_rx_flow_rule_create(const struct 
ethtool_rx_flow_spec_input *input)
+               const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+               const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+ 
+-              if (ext_m_spec->vlan_etype &&
+-                  ext_m_spec->vlan_tci) {
++              if (ext_m_spec->vlan_etype) {
+                       match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
+                       match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
++              }
+ 
++              if (ext_m_spec->vlan_tci) {
+                       match->key.vlan.vlan_id =
+                               ntohs(ext_h_spec->vlan_tci) & 0x0fff;
+                       match->mask.vlan.vlan_id =
+@@ -3022,7 +3023,10 @@ ethtool_rx_flow_rule_create(const struct 
ethtool_rx_flow_spec_input *input)
+                               (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
+                       match->mask.vlan.vlan_priority =
+                               (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
++              }
+ 
++              if (ext_m_spec->vlan_etype ||
++                  ext_m_spec->vlan_tci) {
+                       match->dissector.used_keys |=
+                               BIT(FLOW_DISSECTOR_KEY_VLAN);
+                       match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 40796b8bf820..e5bfd42fd083 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1001,7 +1001,11 @@ struct ubuf_info *sock_zerocopy_realloc(struct sock 
*sk, size_t size,
+                       uarg->len++;
+                       uarg->bytelen = bytelen;
+                       atomic_set(&sk->sk_zckey, ++next);
+-                      sock_zerocopy_get(uarg);
++
++                      /* no extra ref when appending to datagram (MSG_MORE) */
++                      if (sk->sk_type == SOCK_STREAM)
++                              sock_zerocopy_get(uarg);
++
+                       return uarg;
+               }
+       }
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 6c2febc39dca..eb03153dfe12 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -188,6 +188,17 @@ static void ip_ma_put(struct ip_mc_list *im)
+            pmc != NULL;                                       \
+            pmc = rtnl_dereference(pmc->next_rcu))
+ 
++static void ip_sf_list_clear_all(struct ip_sf_list *psf)
++{
++      struct ip_sf_list *next;
++
++      while (psf) {
++              next = psf->sf_next;
++              kfree(psf);
++              psf = next;
++      }
++}
++
+ #ifdef CONFIG_IP_MULTICAST
+ 
+ /*
+@@ -633,6 +644,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
+       }
+ }
+ 
++static void kfree_pmc(struct ip_mc_list *pmc)
++{
++      ip_sf_list_clear_all(pmc->sources);
++      ip_sf_list_clear_all(pmc->tomb);
++      kfree(pmc);
++}
++
+ static void igmpv3_send_cr(struct in_device *in_dev)
+ {
+       struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
+@@ -669,7 +687,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
+                       else
+                               in_dev->mc_tomb = pmc_next;
+                       in_dev_put(pmc->interface);
+-                      kfree(pmc);
++                      kfree_pmc(pmc);
+               } else
+                       pmc_prev = pmc;
+       }
+@@ -1215,14 +1233,18 @@ static void igmpv3_del_delrec(struct in_device 
*in_dev, struct ip_mc_list *im)
+               im->interface = pmc->interface;
+               if (im->sfmode == MCAST_INCLUDE) {
+                       im->tomb = pmc->tomb;
++                      pmc->tomb = NULL;
++
+                       im->sources = pmc->sources;
++                      pmc->sources = NULL;
++
+                       for (psf = im->sources; psf; psf = psf->sf_next)
+                               psf->sf_crcount = in_dev->mr_qrv ?: 
net->ipv4.sysctl_igmp_qrv;
+               } else {
+                       im->crcount = in_dev->mr_qrv ?: 
net->ipv4.sysctl_igmp_qrv;
+               }
+               in_dev_put(pmc->interface);
+-              kfree(pmc);
++              kfree_pmc(pmc);
+       }
+       spin_unlock_bh(&im->lock);
+ }
+@@ -1243,21 +1265,18 @@ static void igmpv3_clear_delrec(struct in_device 
*in_dev)
+               nextpmc = pmc->next;
+               ip_mc_clear_src(pmc);
+               in_dev_put(pmc->interface);
+-              kfree(pmc);
++              kfree_pmc(pmc);
+       }
+       /* clear dead sources, too */
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
+-              struct ip_sf_list *psf, *psf_next;
++              struct ip_sf_list *psf;
+ 
+               spin_lock_bh(&pmc->lock);
+               psf = pmc->tomb;
+               pmc->tomb = NULL;
+               spin_unlock_bh(&pmc->lock);
+-              for (; psf; psf = psf_next) {
+-                      psf_next = psf->sf_next;
+-                      kfree(psf);
+-              }
++              ip_sf_list_clear_all(psf);
+       }
+       rcu_read_unlock();
+ }
+@@ -2123,7 +2142,7 @@ static int ip_mc_add_src(struct in_device *in_dev, 
__be32 *pmca, int sfmode,
+ 
+ static void ip_mc_clear_src(struct ip_mc_list *pmc)
+ {
+-      struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
++      struct ip_sf_list *tomb, *sources;
+ 
+       spin_lock_bh(&pmc->lock);
+       tomb = pmc->tomb;
+@@ -2135,14 +2154,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
+       pmc->sfcount[MCAST_EXCLUDE] = 1;
+       spin_unlock_bh(&pmc->lock);
+ 
+-      for (psf = tomb; psf; psf = nextpsf) {
+-              nextpsf = psf->sf_next;
+-              kfree(psf);
+-      }
+-      for (psf = sources; psf; psf = nextpsf) {
+-              nextpsf = psf->sf_next;
+-              kfree(psf);
+-      }
++      ip_sf_list_clear_all(tomb);
++      ip_sf_list_clear_all(sources);
+ }
+ 
+ /* Join a multicast group
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index e8bb2e85c5a4..ac770940adb9 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -883,7 +883,7 @@ static int __ip_append_data(struct sock *sk,
+       int csummode = CHECKSUM_NONE;
+       struct rtable *rt = (struct rtable *)cork->dst;
+       unsigned int wmem_alloc_delta = 0;
+-      bool paged, extra_uref;
++      bool paged, extra_uref = false;
+       u32 tskey = 0;
+ 
+       skb = skb_peek_tail(queue);
+@@ -923,7 +923,7 @@ static int __ip_append_data(struct sock *sk,
+               uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
+               if (!uarg)
+                       return -ENOBUFS;
+-              extra_uref = true;
++              extra_uref = !skb;      /* only extra ref if !MSG_MORE */
+               if (rt->dst.dev->features & NETIF_F_SG &&
+                   csummode == CHECKSUM_PARTIAL) {
+                       paged = true;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6fdf1c195d8e..df6afb092936 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -500,15 +500,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
+ 
+ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
+ {
+-      static u32 ip_idents_hashrnd __read_mostly;
+       u32 hash, id;
+ 
+-      net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
++      /* Note the following code is not safe, but this is okay. */
++      if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
++              get_random_bytes(&net->ipv4.ip_id_key,
++                               sizeof(net->ipv4.ip_id_key));
+ 
+-      hash = jhash_3words((__force u32)iph->daddr,
++      hash = siphash_3u32((__force u32)iph->daddr,
+                           (__force u32)iph->saddr,
+-                          iph->protocol ^ net_hash_mix(net),
+-                          ip_idents_hashrnd);
++                          iph->protocol,
++                          &net->ipv4.ip_id_key);
+       id = ip_idents_reserve(hash, segs);
+       iph->id = htons(id);
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index e51f3c648b09..b5e0c85bcd57 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1275,7 +1275,7 @@ static int __ip6_append_data(struct sock *sk,
+       int csummode = CHECKSUM_NONE;
+       unsigned int maxnonfragsize, headersize;
+       unsigned int wmem_alloc_delta = 0;
+-      bool paged, extra_uref;
++      bool paged, extra_uref = false;
+ 
+       skb = skb_peek_tail(queue);
+       if (!skb) {
+@@ -1344,7 +1344,7 @@ emsgsize:
+               uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
+               if (!uarg)
+                       return -ENOBUFS;
+-              extra_uref = true;
++              extra_uref = !skb;      /* only extra ref if !MSG_MORE */
+               if (rt->dst.dev->features & NETIF_F_SG &&
+                   csummode == CHECKSUM_PARTIAL) {
+                       paged = true;
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 4fe7c90962dd..868ae23dbae1 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -10,15 +10,25 @@
+ #include <net/secure_seq.h>
+ #include <linux/netfilter.h>
+ 
+-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
++static u32 __ipv6_select_ident(struct net *net,
+                              const struct in6_addr *dst,
+                              const struct in6_addr *src)
+ {
++      const struct {
++              struct in6_addr dst;
++              struct in6_addr src;
++      } __aligned(SIPHASH_ALIGNMENT) combined = {
++              .dst = *dst,
++              .src = *src,
++      };
+       u32 hash, id;
+ 
+-      hash = __ipv6_addr_jhash(dst, hashrnd);
+-      hash = __ipv6_addr_jhash(src, hash);
+-      hash ^= net_hash_mix(net);
++      /* Note the following code is not safe, but this is okay. */
++      if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
++              get_random_bytes(&net->ipv4.ip_id_key,
++                               sizeof(net->ipv4.ip_id_key));
++
++      hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
+ 
+       /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+        * set the hight order instead thus minimizing possible future
+@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
+  */
+ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
+ {
+-      static u32 ip6_proxy_idents_hashrnd __read_mostly;
+       struct in6_addr buf[2];
+       struct in6_addr *addrs;
+       u32 id;
+@@ -53,11 +62,7 @@ __be32 ipv6_proxy_select_ident(struct net *net, struct 
sk_buff *skb)
+       if (!addrs)
+               return 0;
+ 
+-      net_get_random_once(&ip6_proxy_idents_hashrnd,
+-                          sizeof(ip6_proxy_idents_hashrnd));
+-
+-      id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
+-                               &addrs[1], &addrs[0]);
++      id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
+       return htonl(id);
+ }
+ EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
+                        const struct in6_addr *daddr,
+                        const struct in6_addr *saddr)
+ {
+-      static u32 ip6_idents_hashrnd __read_mostly;
+       u32 id;
+ 
+-      net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+-
+-      id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
++      id = __ipv6_select_ident(net, daddr, saddr);
+       return htonl(id);
+ }
+ EXPORT_SYMBOL(ipv6_select_ident);
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 5a426226c762..5cb14eabfc65 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -287,7 +287,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+                       /* Binding to link-local address requires an interface 
*/
+                       if (!sk->sk_bound_dev_if)
+                               goto out_unlock;
++              }
+ 
++              if (sk->sk_bound_dev_if) {
+                       err = -ENODEV;
+                       dev = dev_get_by_index_rcu(sock_net(sk),
+                                                  sk->sk_bound_dev_if);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index e470589fb93b..ab348489bd8a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2442,6 +2442,12 @@ static struct rt6_info *__ip6_route_redirect(struct net 
*net,
+       struct fib6_info *rt;
+       struct fib6_node *fn;
+ 
++      /* l3mdev_update_flow overrides oif if the device is enslaved; in
++       * this case we must match on the real ingress device, so reset it
++       */
++      if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
++              fl6->flowi6_oif = skb->dev->ifindex;
++
+       /* Get the "current" route for this destination and
+        * check if the redirect has come from appropriate router.
+        *
+diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
+index 94425e421213..9e4b6bcf6920 100644
+--- a/net/llc/llc_output.c
++++ b/net/llc/llc_output.c
+@@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct 
sk_buff *skb,
+       rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
+       if (likely(!rc))
+               rc = dev_queue_xmit(skb);
++      else
++              kfree_skb(skb);
+       return rc;
+ }
+ 
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 5a87e271d35a..5b56b1cb2417 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -800,7 +800,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action 
*actions[],
+ 
+       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+               a = actions[i];
+-              nest = nla_nest_start(skb, a->order);
++              nest = nla_nest_start(skb, i + 1);
+               if (nest == NULL)
+                       goto nla_put_failure;
+               err = tcf_action_dump_1(skb, a, bind, ref);
+@@ -1300,7 +1300,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, 
struct nlmsghdr *n,
+                       ret = PTR_ERR(act);
+                       goto err;
+               }
+-              act->order = i;
+               attr_size += tcf_action_fill_size(act);
+               actions[i - 1] = act;
+       }
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index d7b0688c98dd..3ecca3b88bf8 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -66,10 +66,6 @@ static int __net_init tipc_init_net(struct net *net)
+       INIT_LIST_HEAD(&tn->node_list);
+       spin_lock_init(&tn->node_list_lock);
+ 
+-      err = tipc_socket_init();
+-      if (err)
+-              goto out_socket;
+-
+       err = tipc_sk_rht_init(net);
+       if (err)
+               goto out_sk_rht;
+@@ -79,9 +75,6 @@ static int __net_init tipc_init_net(struct net *net)
+               goto out_nametbl;
+ 
+       INIT_LIST_HEAD(&tn->dist_queue);
+-      err = tipc_topsrv_start(net);
+-      if (err)
+-              goto out_subscr;
+ 
+       err = tipc_bcast_init(net);
+       if (err)
+@@ -90,25 +83,19 @@ static int __net_init tipc_init_net(struct net *net)
+       return 0;
+ 
+ out_bclink:
+-      tipc_bcast_stop(net);
+-out_subscr:
+       tipc_nametbl_stop(net);
+ out_nametbl:
+       tipc_sk_rht_destroy(net);
+ out_sk_rht:
+-      tipc_socket_stop();
+-out_socket:
+       return err;
+ }
+ 
+ static void __net_exit tipc_exit_net(struct net *net)
+ {
+-      tipc_topsrv_stop(net);
+       tipc_net_stop(net);
+       tipc_bcast_stop(net);
+       tipc_nametbl_stop(net);
+       tipc_sk_rht_destroy(net);
+-      tipc_socket_stop();
+ }
+ 
+ static struct pernet_operations tipc_net_ops = {
+@@ -118,6 +105,11 @@ static struct pernet_operations tipc_net_ops = {
+       .size = sizeof(struct tipc_net),
+ };
+ 
++static struct pernet_operations tipc_topsrv_net_ops = {
++      .init = tipc_topsrv_init_net,
++      .exit = tipc_topsrv_exit_net,
++};
++
+ static int __init tipc_init(void)
+ {
+       int err;
+@@ -144,6 +136,14 @@ static int __init tipc_init(void)
+       if (err)
+               goto out_pernet;
+ 
++      err = tipc_socket_init();
++      if (err)
++              goto out_socket;
++
++      err = register_pernet_subsys(&tipc_topsrv_net_ops);
++      if (err)
++              goto out_pernet_topsrv;
++
+       err = tipc_bearer_setup();
+       if (err)
+               goto out_bearer;
+@@ -151,6 +151,10 @@ static int __init tipc_init(void)
+       pr_info("Started in single node mode\n");
+       return 0;
+ out_bearer:
++      unregister_pernet_subsys(&tipc_topsrv_net_ops);
++out_pernet_topsrv:
++      tipc_socket_stop();
++out_socket:
+       unregister_pernet_subsys(&tipc_net_ops);
+ out_pernet:
+       tipc_unregister_sysctl();
+@@ -166,6 +170,8 @@ out_netlink:
+ static void __exit tipc_exit(void)
+ {
+       tipc_bearer_cleanup();
++      unregister_pernet_subsys(&tipc_topsrv_net_ops);
++      tipc_socket_stop();
+       unregister_pernet_subsys(&tipc_net_ops);
+       tipc_netlink_stop();
+       tipc_netlink_compat_stop();
+diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
+index d793b4343885..aa015c233898 100644
+--- a/net/tipc/subscr.h
++++ b/net/tipc/subscr.h
+@@ -77,8 +77,9 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub,
+                            u32 found_lower, u32 found_upper,
+                            u32 event, u32 port, u32 node,
+                            u32 scope, int must);
+-int tipc_topsrv_start(struct net *net);
+-void tipc_topsrv_stop(struct net *net);
++
++int __net_init tipc_topsrv_init_net(struct net *net);
++void __net_exit tipc_topsrv_exit_net(struct net *net);
+ 
+ void tipc_sub_put(struct tipc_subscription *subscription);
+ void tipc_sub_get(struct tipc_subscription *subscription);
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index b45932d78004..f345662890a6 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -635,7 +635,7 @@ static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
+       destroy_workqueue(s->send_wq);
+ }
+ 
+-int tipc_topsrv_start(struct net *net)
++static int tipc_topsrv_start(struct net *net)
+ {
+       struct tipc_net *tn = tipc_net(net);
+       const char name[] = "topology_server";
+@@ -668,7 +668,7 @@ int tipc_topsrv_start(struct net *net)
+       return ret;
+ }
+ 
+-void tipc_topsrv_stop(struct net *net)
++static void tipc_topsrv_stop(struct net *net)
+ {
+       struct tipc_topsrv *srv = tipc_topsrv(net);
+       struct socket *lsock = srv->listener;
+@@ -693,3 +693,13 @@ void tipc_topsrv_stop(struct net *net)
+       idr_destroy(&srv->conn_idr);
+       kfree(srv);
+ }
++
++int __net_init tipc_topsrv_init_net(struct net *net)
++{
++      return tipc_topsrv_start(net);
++}
++
++void __net_exit tipc_topsrv_exit_net(struct net *net)
++{
++      tipc_topsrv_stop(net);
++}
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 14dedb24fa7b..0fd8f0997ff5 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -943,12 +943,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
+       if (!netdev)
+               goto out;
+ 
+-      if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+-              pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX 
cap\n",
+-                                 __func__);
+-              goto out;
+-      }
+-
+       netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+                                       TLS_OFFLOAD_CTX_DIR_RX);
+ 
+@@ -1007,7 +1001,8 @@ static int tls_dev_event(struct notifier_block *this, 
unsigned long event,
+ {
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ 
+-      if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
++      if (!dev->tlsdev_ops &&
++          !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
+               return NOTIFY_DONE;
+ 
+       switch (event) {
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 29d6af43dd24..d350ff73a391 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1685,15 +1685,14 @@ int tls_sw_recvmsg(struct sock *sk,
+               copied = err;
+       }
+ 
+-      len = len - copied;
+-      if (len) {
+-              target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+-              timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+-      } else {
++      if (len <= copied)
+               goto recv_end;
+-      }
+ 
+-      do {
++      target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
++      len = len - copied;
++      timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
++
++      while (len && (decrypted + copied < target || ctx->recv_pkt)) {
+               bool retain_skb = false;
+               bool zc = false;
+               int to_decrypt;
+@@ -1824,11 +1823,7 @@ pick_next_record:
+               } else {
+                       break;
+               }
+-
+-              /* If we have a new message from strparser, continue now. */
+-              if (decrypted >= target && !ctx->recv_pkt)
+-                      break;
+-      } while (len);
++      }
+ 
+ recv_end:
+       if (num_async) {
+diff --git a/tools/testing/selftests/net/tls.c 
b/tools/testing/selftests/net/tls.c
+index 47ddfc154036..278c86134556 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -442,6 +442,21 @@ TEST_F(tls, multiple_send_single_recv)
+       EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
+ }
+ 
++TEST_F(tls, single_send_multiple_recv_non_align)
++{
++      const unsigned int total_len = 15;
++      const unsigned int recv_len = 10;
++      char recv_mem[recv_len * 2];
++      char send_mem[total_len];
++
++      EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
++      memset(recv_mem, 0, total_len);
++
++      EXPECT_EQ(recv(self->cfd, recv_mem, recv_len, 0), recv_len);
++      EXPECT_EQ(recv(self->cfd, recv_mem + recv_len, recv_len, 0), 5);
++      EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
++}
++
+ TEST_F(tls, recv_partial)
+ {
+       char const *test_str = "test_read_partial";
+@@ -575,6 +590,25 @@ TEST_F(tls, recv_peek_large_buf_mult_recs)
+       EXPECT_EQ(memcmp(test_str, buf, len), 0);
+ }
+ 
++TEST_F(tls, recv_lowat)
++{
++      char send_mem[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
++      char recv_mem[20];
++      int lowat = 8;
++
++      EXPECT_EQ(send(self->fd, send_mem, 10, 0), 10);
++      EXPECT_EQ(send(self->fd, send_mem, 5, 0), 5);
++
++      memset(recv_mem, 0, 20);
++      EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVLOWAT,
++                           &lowat, sizeof(lowat)), 0);
++      EXPECT_EQ(recv(self->cfd, recv_mem, 1, MSG_WAITALL), 1);
++      EXPECT_EQ(recv(self->cfd, recv_mem + 1, 6, MSG_WAITALL), 6);
++      EXPECT_EQ(recv(self->cfd, recv_mem + 7, 10, 0), 8);
++
++      EXPECT_EQ(memcmp(send_mem, recv_mem, 10), 0);
++      EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
++}
+ 
+ TEST_F(tls, pollin)
+ {

Reply via email to