commit:     10da52d34f75c039a20e3e60cb9dc3e05bc1cbb7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 15 12:42:37 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 15 12:42:37 2014 +0000
URL:        
http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=10da52d3

Linux patch 3.16.6

---
 0000_README             |    4 +
 1005_linux-3.16.6.patch | 2652 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2656 insertions(+)

diff --git a/0000_README b/0000_README
index ede03f9..a7526a7 100644
--- a/0000_README
+++ b/0000_README
@@ -62,6 +62,10 @@ Patch:  1004_linux-3.16.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.16.5
 
+Patch:  1005_linux-3.16.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.16.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-3.16.6.patch b/1005_linux-3.16.6.patch
new file mode 100644
index 0000000..422fde0
--- /dev/null
+++ b/1005_linux-3.16.6.patch
@@ -0,0 +1,2652 @@
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index f896f68a3ba3..c4da64b525b2 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3459,6 +3459,8 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+                                       READ_DISC_INFO command);
+                               e = NO_READ_CAPACITY_16 (don't use
+                                       READ_CAPACITY_16 command);
++                              f = NO_REPORT_OPCODES (don't use report opcodes
++                                      command, uas only);
+                               h = CAPACITY_HEURISTICS (decrease the
+                                       reported device capacity by one
+                                       sector if the number is odd);
+@@ -3478,6 +3480,8 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+                                       bogus residue values);
+                               s = SINGLE_LUN (the device has only one
+                                       Logical Unit);
++                              t = NO_ATA_1X (don't allow ATA(12) and ATA(16)
++                                      commands, uas only);
+                               u = IGNORE_UAS (don't bind to the uas driver);
+                               w = NO_WP_DETECT (don't test whether the
+                                       medium is write-protected).
+diff --git a/Makefile b/Makefile
+index 41efc3d9f2e0..5c4bc3fc18c0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 16
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Museum of Fishiegoodies
+ 
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 8f7ed9933a7c..40e4585f110a 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -603,7 +603,6 @@ void unregister_one_node(int nid)
+               return;
+ 
+       unregister_node(node_devices[nid]);
+-      kfree(node_devices[nid]);
+       node_devices[nid] = NULL;
+ }
+ 
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 0d9284ef96a8..42e41f3b5cf1 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -1338,9 +1338,9 @@ static int ahash_update_first(struct ahash_request *req)
+       struct device *jrdev = ctx->jrdev;
+       gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+                      CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+-      u8 *next_buf = state->buf_0 + state->current_buf *
+-                     CAAM_MAX_HASH_BLOCK_SIZE;
+-      int *next_buflen = &state->buflen_0 + state->current_buf;
++      u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
++      int *next_buflen = state->current_buf ?
++              &state->buflen_1 : &state->buflen_0;
+       int to_hash;
+       u32 *sh_desc = ctx->sh_desc_update_first, *desc;
+       dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 701f86cd5993..5f29c9a9a316 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3667,8 +3667,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, 
struct net_device *bond_dev
+               else
+                       bond_xmit_slave_id(bond, skb, 0);
+       } else {
+-              slave_id = bond_rr_gen_slave_id(bond);
+-              bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt);
++              int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
++
++              if (likely(slave_cnt)) {
++                      slave_id = bond_rr_gen_slave_id(bond);
++                      bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
++              } else {
++                      dev_kfree_skb_any(skb);
++              }
+       }
+ 
+       return NETDEV_TX_OK;
+@@ -3699,8 +3705,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, 
struct net_device *bond_d
+ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
+ {
+       struct bonding *bond = netdev_priv(bond_dev);
++      int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ 
+-      bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % 
bond->slave_cnt);
++      if (likely(slave_cnt))
++              bond_xmit_slave_id(bond, skb,
++                                 bond_xmit_hash(bond, skb) % slave_cnt);
++      else
++              dev_kfree_skb_any(skb);
+ 
+       return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c 
b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 5776e503e4c5..6e4a6bddf56e 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -757,7 +757,8 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void 
*dev_id)
+       return IRQ_HANDLED;
+ }
+ 
+-static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
++static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
++                                            struct net_device *dev)
+ {
+       struct sk_buff *nskb;
+       struct bcm_tsb *tsb;
+@@ -773,7 +774,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, 
struct net_device *dev)
+               if (!nskb) {
+                       dev->stats.tx_errors++;
+                       dev->stats.tx_dropped++;
+-                      return -ENOMEM;
++                      return NULL;
+               }
+               skb = nskb;
+       }
+@@ -792,7 +793,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, 
struct net_device *dev)
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+-                      return 0;
++                      return skb;
+               }
+ 
+               /* Get the checksum offset and the L4 (transport) offset */
+@@ -810,7 +811,7 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, 
struct net_device *dev)
+               tsb->l4_ptr_dest_map = csum_info;
+       }
+ 
+-      return 0;
++      return skb;
+ }
+ 
+ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+@@ -844,8 +845,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ 
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+-              ret = bcm_sysport_insert_tsb(skb, dev);
+-              if (ret) {
++              skb = bcm_sysport_insert_tsb(skb, dev);
++              if (!skb) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 6a8b1453a1b9..73cfb21899a7 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -10044,6 +10044,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x 
*bp,
+ }
+ 
+ #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 
4))
++#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
++                                      0x1848 + ((f) << 4))
+ #define BNX2X_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
+ #define BNX2X_PREV_UNDI_BD(val)               ((val) >> 16 & 0xffff)
+ #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
+@@ -10051,8 +10053,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x 
*bp,
+ #define BCM_5710_UNDI_FW_MF_MAJOR     (0x07)
+ #define BCM_5710_UNDI_FW_MF_MINOR     (0x08)
+ #define BCM_5710_UNDI_FW_MF_VERS      (0x05)
+-#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+-#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+ 
+ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+ {
+@@ -10071,72 +10071,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x 
*bp)
+       return false;
+ }
+ 
+-static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
+-{
+-      u8 major, minor, version;
+-      u32 fw;
+-
+-      /* Must check that FW is loaded */
+-      if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+-           MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
+-              BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
+-              return false;
+-      }
+-
+-      /* Read Currently loaded FW version */
+-      fw = REG_RD(bp, XSEM_REG_PRAM);
+-      major = fw & 0xff;
+-      minor = (fw >> 0x8) & 0xff;
+-      version = (fw >> 0x10) & 0xff;
+-      BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 
0x%02x\n",
+-                     fw, major, minor, version);
+-
+-      if (major > BCM_5710_UNDI_FW_MF_MAJOR)
+-              return true;
+-
+-      if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+-          (minor > BCM_5710_UNDI_FW_MF_MINOR))
+-              return true;
+-
+-      if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
+-          (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
+-          (version >= BCM_5710_UNDI_FW_MF_VERS))
+-              return true;
+-
+-      return false;
+-}
+-
+-static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
+-{
+-      int i;
+-
+-      /* Due to legacy (FW) code, the first function on each engine has a
+-       * different offset macro from the rest of the functions.
+-       * Setting this for all 8 functions is harmless regardless of whether
+-       * this is actually a multi-function device.
+-       */
+-      for (i = 0; i < 2; i++)
+-              REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
+-
+-      for (i = 2; i < 8; i++)
+-              REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
+-
+-      BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
+-}
+-
+-static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
++static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
+ {
+       u16 rcq, bd;
+-      u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
++      u32 addr, tmp_reg;
+ 
++      if (BP_FUNC(bp) < 2)
++              addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
++      else
++              addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
++
++      tmp_reg = REG_RD(bp, addr);
+       rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
+       bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
+ 
+       tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
+-      REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
++      REG_WR(bp, addr, tmp_reg);
+ 
+-      BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
+-                     port, bd, rcq);
++      BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 
0x%04x\n",
++                     BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
+ }
+ 
+ static int bnx2x_prev_mcp_done(struct bnx2x *bp)
+@@ -10375,7 +10328,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
+       /* Reset should be performed after BRB is emptied */
+       if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
+               u32 timer_count = 1000;
+-              bool need_write = true;
+ 
+               /* Close the MAC Rx to prevent BRB from filling up */
+               bnx2x_prev_unload_close_mac(bp, &mac_vals);
+@@ -10412,20 +10364,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
+                       else
+                               timer_count--;
+ 
+-                      /* New UNDI FW supports MF and contains better
+-                       * cleaning methods - might be redundant but harmless.
+-                       */
+-                      if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
+-                              if (need_write) {
+-                                      bnx2x_prev_unload_undi_mf(bp);
+-                                      need_write = false;
+-                              }
+-                      } else if (prev_undi) {
+-                              /* If UNDI resides in memory,
+-                               * manually increment it
+-                               */
+-                              bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
+-                      }
++                      /* If UNDI resides in memory, manually increment it */
++                      if (prev_undi)
++                              bnx2x_prev_unload_undi_inc(bp, 1);
++
+                       udelay(10);
+               }
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index a3dd5dc64f4c..8345c6523799 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+               skb->protocol = eth_type_trans(skb, tp->dev);
+ 
+               if (len > (tp->dev->mtu + ETH_HLEN) &&
+-                  skb->protocol != htons(ETH_P_8021Q)) {
++                  skb->protocol != htons(ETH_P_8021Q) &&
++                  skb->protocol != htons(ETH_P_8021AD)) {
+                       dev_kfree_skb_any(skb);
+                       goto drop_it_no_recycle;
+               }
+@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 
struct net_device *dev)
+ 
+       entry = tnapi->tx_prod;
+       base_flags = 0;
+-      if (skb->ip_summed == CHECKSUM_PARTIAL)
+-              base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ 
+       mss = skb_shinfo(skb)->gso_size;
+       if (mss) {
+@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 
struct net_device *dev)
+ 
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - 
ETH_HLEN;
+ 
++              /* HW/FW can not correctly segment packets that have been
++               * vlan encapsulated.
++               */
++              if (skb->protocol == htons(ETH_P_8021Q) ||
++                  skb->protocol == htons(ETH_P_8021AD))
++                      return tg3_tso_bug(tp, tnapi, txq, skb);
++
+               if (!skb_is_gso_v6(skb)) {
+                       if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+                           tg3_flag(tp, TSO_BUG))
+@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 
struct net_device *dev)
+                               base_flags |= tsflags << 12;
+                       }
+               }
++      } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++              /* HW/FW can not correctly checksum packets that have been
++               * vlan encapsulated.
++               */
++              if (skb->protocol == htons(ETH_P_8021Q) ||
++                  skb->protocol == htons(ETH_P_8021AD)) {
++                      if (skb_checksum_help(skb))
++                              goto drop;
++              } else  {
++                      base_flags |= TXD_FLAG_TCPUDP_CSUM;
++              }
+       }
+ 
+       if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+diff --git a/drivers/net/ethernet/cadence/macb.c 
b/drivers/net/ethernet/cadence/macb.c
+index e9daa072ebb4..45b13fda6bed 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -30,7 +30,6 @@
+ #include <linux/of_device.h>
+ #include <linux/of_mdio.h>
+ #include <linux/of_net.h>
+-#include <linux/pinctrl/consumer.h>
+ 
+ #include "macb.h"
+ 
+@@ -1803,7 +1802,6 @@ static int __init macb_probe(struct platform_device 
*pdev)
+       struct phy_device *phydev;
+       u32 config;
+       int err = -ENXIO;
+-      struct pinctrl *pinctrl;
+       const char *mac;
+ 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -1812,15 +1810,6 @@ static int __init macb_probe(struct platform_device 
*pdev)
+               goto err_out;
+       }
+ 
+-      pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+-      if (IS_ERR(pinctrl)) {
+-              err = PTR_ERR(pinctrl);
+-              if (err == -EPROBE_DEFER)
+-                      goto err_out;
+-
+-              dev_warn(&pdev->dev, "No pinctrl provided\n");
+-      }
+-
+       err = -ENOMEM;
+       dev = alloc_etherdev(sizeof(*bp));
+       if (!dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c 
b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index 5d940a26055c..c9d2988e364d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -2380,6 +2380,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+ }
+ EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+ 
++static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
++{
++      struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
++      int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
++                      + 1;
++      int max_port = min_port +
++              bitmap_weight(actv_ports.ports, dev->caps.num_ports);
++
++      if (port < min_port)
++              port = min_port;
++      else if (port >= max_port)
++              port = max_port - 1;
++
++      return port;
++}
++
+ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
+ {
+       struct mlx4_priv *priv = mlx4_priv(dev);
+@@ -2393,6 +2409,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int 
vf, u64 mac)
+       if (slave < 0)
+               return -EINVAL;
+ 
++      port = mlx4_slaves_closest_port(dev, slave, port);
+       s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+       s_info->mac = mac;
+       mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect 
only after vf restart\n",
+@@ -2419,6 +2436,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int 
vf, u16 vlan, u8 qos)
+       if (slave < 0)
+               return -EINVAL;
+ 
++      port = mlx4_slaves_closest_port(dev, slave, port);
+       vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ 
+       if ((0 == vlan) && (0 == qos))
+@@ -2446,6 +2464,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, 
int port, int slave,
+       struct mlx4_priv *priv;
+ 
+       priv = mlx4_priv(dev);
++      port = mlx4_slaves_closest_port(dev, slave, port);
+       vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ 
+       if (MLX4_VGT != vp_oper->state.default_vlan) {
+@@ -2473,6 +2492,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, 
int vf, bool setting)
+       if (slave < 0)
+               return -EINVAL;
+ 
++      port = mlx4_slaves_closest_port(dev, slave, port);
+       s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+       s_info->spoofchk = setting;
+ 
+@@ -2526,6 +2546,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int 
port, int vf, int link_stat
+       if (slave < 0)
+               return -EINVAL;
+ 
++      port = mlx4_slaves_closest_port(dev, slave, port);
+       switch (link_state) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               /* get current link state */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c 
b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 82ab427290c3..3bdc11e44ec3 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+ #endif /* CONFIG_PCI_MSI */
+ 
+ static uint8_t num_vfs[3] = {0, 0, 0};
+-static int num_vfs_argc = 3;
++static int num_vfs_argc;
+ module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+ MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
+                         "num_vfs=port1,port2,port1+2");
+ 
+ static uint8_t probe_vf[3] = {0, 0, 0};
+-static int probe_vfs_argc = 3;
++static int probe_vfs_argc;
+ module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
+ MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 
0)\n"
+                          "probe_vf=port1,port2,port1+2");
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c 
b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index f3d5d79f1cd1..a173c985aa73 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -872,6 +872,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, 
int test_type)
+               return -ENOMEM;
+       dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
+                                  DMA_BIDIRECTIONAL);
++      if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
++              __free_page(dmatest_page);
++              return -ENOMEM;
++      }
+ 
+       /* Run a small DMA test.
+        * The magic multipliers to the length tell the firmware
+@@ -1293,6 +1297,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, 
struct myri10ge_rx_buf *rx,
+                       int bytes, int watchdog)
+ {
+       struct page *page;
++      dma_addr_t bus;
+       int idx;
+ #if MYRI10GE_ALLOC_SIZE > 4096
+       int end_offset;
+@@ -1317,11 +1322,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, 
struct myri10ge_rx_buf *rx,
+                                       rx->watchdog_needed = 1;
+                               return;
+                       }
++
++                      bus = pci_map_page(mgp->pdev, page, 0,
++                                         MYRI10GE_ALLOC_SIZE,
++                                         PCI_DMA_FROMDEVICE);
++                      if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
++                              __free_pages(page, MYRI10GE_ALLOC_ORDER);
++                              if (rx->fill_cnt - rx->cnt < 16)
++                                      rx->watchdog_needed = 1;
++                              return;
++                      }
++
+                       rx->page = page;
+                       rx->page_offset = 0;
+-                      rx->bus = pci_map_page(mgp->pdev, page, 0,
+-                                             MYRI10GE_ALLOC_SIZE,
+-                                             PCI_DMA_FROMDEVICE);
++                      rx->bus = bus;
++
+               }
+               rx->info[idx].page = rx->page;
+               rx->info[idx].page_offset = rx->page_offset;
+@@ -2763,6 +2778,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct 
mcp_kreq_ether_send *src,
+       mb();
+ }
+ 
++static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
++                                struct myri10ge_tx_buf *tx, int idx)
++{
++      unsigned int len;
++      int last_idx;
++
++      /* Free any DMA resources we've alloced and clear out the skb slot */
++      last_idx = (idx + 1) & tx->mask;
++      idx = tx->req & tx->mask;
++      do {
++              len = dma_unmap_len(&tx->info[idx], len);
++              if (len) {
++                      if (tx->info[idx].skb != NULL)
++                              pci_unmap_single(mgp->pdev,
++                                               dma_unmap_addr(&tx->info[idx],
++                                                              bus), len,
++                                               PCI_DMA_TODEVICE);
++                      else
++                              pci_unmap_page(mgp->pdev,
++                                             dma_unmap_addr(&tx->info[idx],
++                                                            bus), len,
++                                             PCI_DMA_TODEVICE);
++                      dma_unmap_len_set(&tx->info[idx], len, 0);
++                      tx->info[idx].skb = NULL;
++              }
++              idx = (idx + 1) & tx->mask;
++      } while (idx != last_idx);
++}
++
+ /*
+  * Transmit a packet.  We need to split the packet so that a single
+  * segment does not cross myri10ge->tx_boundary, so this makes segment
+@@ -2786,7 +2830,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
+       u32 low;
+       __be32 high_swapped;
+       unsigned int len;
+-      int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
++      int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+       u16 pseudo_hdr_offset, cksum_offset, queue;
+       int cum_len, seglen, boundary, rdma_count;
+       u8 flags, odd_flag;
+@@ -2883,9 +2927,12 @@ again:
+ 
+       /* map the skb for DMA */
+       len = skb_headlen(skb);
++      bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
++      if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
++              goto drop;
++
+       idx = tx->req & tx->mask;
+       tx->info[idx].skb = skb;
+-      bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       dma_unmap_addr_set(&tx->info[idx], bus, bus);
+       dma_unmap_len_set(&tx->info[idx], len, len);
+ 
+@@ -2984,12 +3031,16 @@ again:
+                       break;
+ 
+               /* map next fragment for DMA */
+-              idx = (count + tx->req) & tx->mask;
+               frag = &skb_shinfo(skb)->frags[frag_idx];
+               frag_idx++;
+               len = skb_frag_size(frag);
+               bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
+                                      DMA_TO_DEVICE);
++              if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
++                      myri10ge_unmap_tx_dma(mgp, tx, idx);
++                      goto drop;
++              }
++              idx = (count + tx->req) & tx->mask;
+               dma_unmap_addr_set(&tx->info[idx], bus, bus);
+               dma_unmap_len_set(&tx->info[idx], len, len);
+       }
+@@ -3020,31 +3071,8 @@ again:
+       return NETDEV_TX_OK;
+ 
+ abort_linearize:
+-      /* Free any DMA resources we've alloced and clear out the skb
+-       * slot so as to not trip up assertions, and to avoid a
+-       * double-free if linearizing fails */
++      myri10ge_unmap_tx_dma(mgp, tx, idx);
+ 
+-      last_idx = (idx + 1) & tx->mask;
+-      idx = tx->req & tx->mask;
+-      tx->info[idx].skb = NULL;
+-      do {
+-              len = dma_unmap_len(&tx->info[idx], len);
+-              if (len) {
+-                      if (tx->info[idx].skb != NULL)
+-                              pci_unmap_single(mgp->pdev,
+-                                               dma_unmap_addr(&tx->info[idx],
+-                                                              bus), len,
+-                                               PCI_DMA_TODEVICE);
+-                      else
+-                              pci_unmap_page(mgp->pdev,
+-                                             dma_unmap_addr(&tx->info[idx],
+-                                                            bus), len,
+-                                             PCI_DMA_TODEVICE);
+-                      dma_unmap_len_set(&tx->info[idx], len, 0);
+-                      tx->info[idx].skb = NULL;
+-              }
+-              idx = (idx + 1) & tx->mask;
+-      } while (idx != last_idx);
+       if (skb_is_gso(skb)) {
+               netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
+               goto drop;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index d97d5f39a04e..7edf976ecfa0 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -708,6 +708,7 @@ int netvsc_send(struct hv_device *device,
+       unsigned int section_index = NETVSC_INVALID_INDEX;
+       u32 msg_size = 0;
+       struct sk_buff *skb;
++      u16 q_idx = packet->q_idx;
+ 
+ 
+       net_device = get_outbound_net_device(device);
+@@ -772,24 +773,24 @@ int netvsc_send(struct hv_device *device,
+ 
+       if (ret == 0) {
+               atomic_inc(&net_device->num_outstanding_sends);
+-              atomic_inc(&net_device->queue_sends[packet->q_idx]);
++              atomic_inc(&net_device->queue_sends[q_idx]);
+ 
+               if (hv_ringbuf_avail_percent(&out_channel->outbound) <
+                       RING_AVAIL_PERCENT_LOWATER) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(
+-                                          ndev, packet->q_idx));
++                                          ndev, q_idx));
+ 
+                       if (atomic_read(&net_device->
+-                              queue_sends[packet->q_idx]) < 1)
++                              queue_sends[q_idx]) < 1)
+                               netif_tx_wake_queue(netdev_get_tx_queue(
+-                                                  ndev, packet->q_idx));
++                                                  ndev, q_idx));
+               }
+       } else if (ret == -EAGAIN) {
+               netif_tx_stop_queue(netdev_get_tx_queue(
+-                                  ndev, packet->q_idx));
+-              if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
++                                  ndev, q_idx));
++              if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
+                       netif_tx_wake_queue(netdev_get_tx_queue(
+-                                          ndev, packet->q_idx));
++                                          ndev, q_idx));
+                       ret = -ENOSPC;
+               }
+       } else {
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 4fd71b75e666..f15297201777 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct 
net_device *net)
+       int  hdr_offset;
+       u32 net_trans_info;
+       u32 hash;
++      u32 skb_length = skb->len;
+ 
+ 
+       /* We will atmost need two pages to describe the rndis
+@@ -562,7 +563,7 @@ do_send:
+ 
+ drop:
+       if (ret == 0) {
+-              net->stats.tx_bytes += skb->len;
++              net->stats.tx_bytes += skb_length;
+               net->stats.tx_packets++;
+       } else {
+               kfree(packet);
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index ef8a5c20236a..f3008e3cf118 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -36,6 +36,7 @@
+ #include <linux/netpoll.h>
+ 
+ #define MACVLAN_HASH_SIZE     (1 << BITS_PER_BYTE)
++#define MACVLAN_BC_QUEUE_LEN  1000
+ 
+ struct macvlan_port {
+       struct net_device       *dev;
+@@ -45,10 +46,9 @@ struct macvlan_port {
+       struct sk_buff_head     bc_queue;
+       struct work_struct      bc_work;
+       bool                    passthru;
++      int                     count;
+ };
+ 
+-#define MACVLAN_PORT_IS_EMPTY(port)    list_empty(&port->vlans)
+-
+ struct macvlan_skb_cb {
+       const struct macvlan_dev *src;
+ };
+@@ -249,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port 
*port,
+               goto err;
+ 
+       spin_lock(&port->bc_queue.lock);
+-      if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
++      if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
+               __skb_queue_tail(&port->bc_queue, nskb);
+               err = 0;
+       }
+@@ -667,7 +667,8 @@ static void macvlan_uninit(struct net_device *dev)
+ 
+       free_percpu(vlan->pcpu_stats);
+ 
+-      if (MACVLAN_PORT_IS_EMPTY(port))
++      port->count -= 1;
++      if (!port->count)
+               macvlan_port_destroy(port->dev);
+ }
+ 
+@@ -800,6 +801,7 @@ static netdev_features_t macvlan_fix_features(struct 
net_device *dev,
+                                            features,
+                                            mask);
+       features |= ALWAYS_ON_FEATURES;
++      features &= ~NETIF_F_NETNS_LOCAL;
+ 
+       return features;
+ }
+@@ -1020,12 +1022,13 @@ int macvlan_common_newlink(struct net *src_net, struct 
net_device *dev,
+               vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
+ 
+       if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+-              if (!MACVLAN_PORT_IS_EMPTY(port))
++              if (port->count)
+                       return -EINVAL;
+               port->passthru = true;
+               eth_hw_addr_inherit(dev, lowerdev);
+       }
+ 
++      port->count += 1;
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto destroy_port;
+@@ -1043,7 +1046,8 @@ int macvlan_common_newlink(struct net *src_net, struct 
net_device *dev,
+ unregister_netdev:
+       unregister_netdevice(dev);
+ destroy_port:
+-      if (MACVLAN_PORT_IS_EMPTY(port))
++      port->count -= 1;
++      if (!port->count)
+               macvlan_port_destroy(lowerdev);
+ 
+       return err;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 3381c4f91a8c..0c6adaaf898c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -112,17 +112,15 @@ out:
+       return err;
+ }
+ 
++/* Requires RTNL */
+ static int macvtap_set_queue(struct net_device *dev, struct file *file,
+                            struct macvtap_queue *q)
+ {
+       struct macvlan_dev *vlan = netdev_priv(dev);
+-      int err = -EBUSY;
+ 
+-      rtnl_lock();
+       if (vlan->numqueues == MAX_MACVTAP_QUEUES)
+-              goto out;
++              return -EBUSY;
+ 
+-      err = 0;
+       rcu_assign_pointer(q->vlan, vlan);
+       rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
+       sock_hold(&q->sk);
+@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, 
struct file *file,
+       vlan->numvtaps++;
+       vlan->numqueues++;
+ 
+-out:
+-      rtnl_unlock();
+-      return err;
++      return 0;
+ }
+ 
+ static int macvtap_disable_queue(struct macvtap_queue *q)
+@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk)
+ static int macvtap_open(struct inode *inode, struct file *file)
+ {
+       struct net *net = current->nsproxy->net_ns;
+-      struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
++      struct net_device *dev;
+       struct macvtap_queue *q;
+-      int err;
++      int err = -ENODEV;
+ 
+-      err = -ENODEV;
++      rtnl_lock();
++      dev = dev_get_by_macvtap_minor(iminor(inode));
+       if (!dev)
+               goto out;
+ 
+@@ -498,6 +495,7 @@ out:
+       if (dev)
+               dev_put(dev);
+ 
++      rtnl_unlock();
+       return err;
+ }
+ 
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 180c49479c42..a4b08198fb9f 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+ 
+ static int smsc_phy_config_init(struct phy_device *phydev)
+ {
++      int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
++
++      if (rc < 0)
++              return rc;
++
++      /* Enable energy detect mode for this SMSC Transceivers */
++      rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
++                     rc | MII_LAN83C185_EDPWRDOWN);
++      if (rc < 0)
++              return rc;
++
++      return smsc_phy_ack_interrupt(phydev);
++}
++
++static int smsc_phy_reset(struct phy_device *phydev)
++{
+       int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
+       if (rc < 0)
+               return rc;
+@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
+                       rc = phy_read(phydev, MII_BMCR);
+               } while (rc & BMCR_RESET);
+       }
+-
+-      rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+-      if (rc < 0)
+-              return rc;
+-
+-      /* Enable energy detect mode for this SMSC Transceivers */
+-      rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+-                     rc | MII_LAN83C185_EDPWRDOWN);
+-      if (rc < 0)
+-              return rc;
+-
+-      return smsc_phy_ack_interrupt (phydev);
++      return 0;
+ }
+ 
+ static int lan911x_config_init(struct phy_device *phydev)
+@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = {
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .config_init    = smsc_phy_config_init,
++      .soft_reset     = smsc_phy_reset,
+ 
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .config_init    = smsc_phy_config_init,
++      .soft_reset     = smsc_phy_reset,
+ 
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = {
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .config_init    = smsc_phy_config_init,
++      .soft_reset     = smsc_phy_reset,
+ 
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = {
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = lan87xx_read_status,
+       .config_init    = smsc_phy_config_init,
++      .soft_reset     = smsc_phy_reset,
+ 
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index b4958c7ffa84..cb2a00e1d95a 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team)
+ {
+       if (!team->notify_peers.count || !netif_running(team->dev))
+               return;
+-      atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
++      atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
+       schedule_delayed_work(&team->notify_peers.dw, 0);
+ }
+ 
+@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team)
+ {
+       if (!team->mcast_rejoin.count || !netif_running(team->dev))
+               return;
+-      atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
++      atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
+       schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ }
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 9f79192c9aa0..31a7ad0d7d5f 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1325,7 +1325,7 @@ static int arp_reduce(struct net_device *dev, struct 
sk_buff *skb)
+       } else if (vxlan->flags & VXLAN_F_L3MISS) {
+               union vxlan_addr ipa = {
+                       .sin.sin_addr.s_addr = tip,
+-                      .sa.sa_family = AF_INET,
++                      .sin.sin_family = AF_INET,
+               };
+ 
+               vxlan_ip_miss(dev, &ipa);
+@@ -1486,7 +1486,7 @@ static int neigh_reduce(struct net_device *dev, struct 
sk_buff *skb)
+       } else if (vxlan->flags & VXLAN_F_L3MISS) {
+               union vxlan_addr ipa = {
+                       .sin6.sin6_addr = msg->target,
+-                      .sa.sa_family = AF_INET6,
++                      .sin6.sin6_family = AF_INET6,
+               };
+ 
+               vxlan_ip_miss(dev, &ipa);
+@@ -1519,7 +1519,7 @@ static bool route_shortcircuit(struct net_device *dev, 
struct sk_buff *skb)
+               if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+                       union vxlan_addr ipa = {
+                               .sin.sin_addr.s_addr = pip->daddr,
+-                              .sa.sa_family = AF_INET,
++                              .sin.sin_family = AF_INET,
+                       };
+ 
+                       vxlan_ip_miss(dev, &ipa);
+@@ -1540,7 +1540,7 @@ static bool route_shortcircuit(struct net_device *dev, 
struct sk_buff *skb)
+               if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+                       union vxlan_addr ipa = {
+                               .sin6.sin6_addr = pip6->daddr,
+-                              .sa.sa_family = AF_INET6,
++                              .sin6.sin6_family = AF_INET6,
+                       };
+ 
+                       vxlan_ip_miss(dev, &ipa);
+diff --git a/drivers/tty/serial/8250/8250_pci.c 
b/drivers/tty/serial/8250/8250_pci.c
+index 33137b3ba94d..370f6e46caf5 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1790,6 +1790,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_COMMTECH_4222PCIE       0x0022
+ #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
++#define PCI_DEVICE_ID_INTEL_QRK_UART  0x0936
+ 
+ #define PCI_VENDOR_ID_SUNIX           0x1fd4
+ #define PCI_DEVICE_ID_SUNIX_1999      0x1999
+@@ -1900,6 +1901,13 @@ static struct pci_serial_quirk pci_serial_quirks[] 
__refdata = {
+               .subdevice      = PCI_ANY_ID,
+               .setup          = byt_serial_setup,
+       },
++      {
++              .vendor         = PCI_VENDOR_ID_INTEL,
++              .device         = PCI_DEVICE_ID_INTEL_QRK_UART,
++              .subvendor      = PCI_ANY_ID,
++              .subdevice      = PCI_ANY_ID,
++              .setup          = pci_default_setup,
++      },
+       /*
+        * ITE
+        */
+@@ -2742,6 +2750,7 @@ enum pci_board_num_t {
+       pbn_ADDIDATA_PCIe_8_3906250,
+       pbn_ce4100_1_115200,
+       pbn_byt,
++      pbn_qrk,
+       pbn_omegapci,
+       pbn_NETMOS9900_2s_115200,
+       pbn_brcm_trumanage,
+@@ -3492,6 +3501,12 @@ static struct pciserial_board pci_boards[] = {
+               .uart_offset    = 0x80,
+               .reg_shift      = 2,
+       },
++      [pbn_qrk] = {
++              .flags          = FL_BASE0,
++              .num_ports      = 1,
++              .base_baud      = 2764800,
++              .reg_shift      = 2,
++      },
+       [pbn_omegapci] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 8,
+@@ -5194,6 +5209,12 @@ static struct pci_device_id serial_pci_tbl[] = {
+               pbn_byt },
+ 
+       /*
++       * Intel Quark x1000
++       */
++      {       PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
++              PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++              pbn_qrk },
++      /*
+        * Cronyx Omega PCI
+        */
+       {       PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 50e854509f55..ba2a8f3b8059 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1983,8 +1983,10 @@ void usb_set_device_state(struct usb_device *udev,
+                                       || new_state == USB_STATE_SUSPENDED)
+                               ;       /* No change to wakeup settings */
+                       else if (new_state == USB_STATE_CONFIGURED)
+-                              wakeup = udev->actconfig->desc.bmAttributes
+-                                       & USB_CONFIG_ATT_WAKEUP;
++                              wakeup = (udev->quirks &
++                                      USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
++                                      udev->actconfig->desc.bmAttributes &
++                                      USB_CONFIG_ATT_WAKEUP;
+                       else
+                               wakeup = 0;
+               }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 739ee8e8bdfd..5144d11d032c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -160,6 +160,10 @@ static const struct usb_device_id 
usb_interface_quirk_list[] = {
+       { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
+         .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++      /* ASUS Base Station(T100) */
++      { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++                      USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+       { }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 09529f94e72d..6983e805147b 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -780,6 +780,7 @@ static int dsps_suspend(struct device *dev)
+       struct musb *musb = platform_get_drvdata(glue->musb);
+       void __iomem *mbase = musb->ctrl_base;
+ 
++      del_timer_sync(&glue->timer);
+       glue->context.control = dsps_readl(mbase, wrp->control);
+       glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
+       glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
+@@ -805,6 +806,7 @@ static int dsps_resume(struct device *dev)
+       dsps_writel(mbase, wrp->mode, glue->context.mode);
+       dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+       dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
++      setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
+ 
+       return 0;
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 330df5ce435b..63b2af2a87c0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+       { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB 
Device */
++      { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+       { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+       { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++      { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+       { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+       { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+       { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source 
*/
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 3f42785f653c..27136935fec3 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -28,6 +28,7 @@
+ #include <scsi/scsi_tcq.h>
+ 
+ #include "uas-detect.h"
++#include "scsiglue.h"
+ 
+ /*
+  * The r00-r01c specs define this version of the SENSE IU data structure.
+@@ -49,6 +50,7 @@ struct uas_dev_info {
+       struct usb_anchor cmd_urbs;
+       struct usb_anchor sense_urbs;
+       struct usb_anchor data_urbs;
++      unsigned long flags;
+       int qdepth, resetting;
+       struct response_iu response;
+       unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
+@@ -714,6 +716,15 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
+ 
+       BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
+ 
++      if ((devinfo->flags & US_FL_NO_ATA_1X) &&
++                      (cmnd->cmnd[0] == ATA_12 || cmnd->cmnd[0] == ATA_16)) {
++              memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB,
++                     sizeof(usb_stor_sense_invalidCDB));
++              cmnd->result = SAM_STAT_CHECK_CONDITION;
++              cmnd->scsi_done(cmnd);
++              return 0;
++      }
++
+       spin_lock_irqsave(&devinfo->lock, flags);
+ 
+       if (devinfo->resetting) {
+@@ -950,6 +961,10 @@ static int uas_slave_alloc(struct scsi_device *sdev)
+ static int uas_slave_configure(struct scsi_device *sdev)
+ {
+       struct uas_dev_info *devinfo = sdev->hostdata;
++
++      if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
++              sdev->no_report_opcodes = 1;
++
+       scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
+       scsi_activate_tcq(sdev, devinfo->qdepth - 2);
+       return 0;
+@@ -1080,6 +1095,8 @@ static int uas_probe(struct usb_interface *intf, const 
struct usb_device_id *id)
+       devinfo->resetting = 0;
+       devinfo->running_task = 0;
+       devinfo->shutdown = 0;
++      devinfo->flags = id->driver_info;
++      usb_stor_adjust_quirks(udev, &devinfo->flags);
+       init_usb_anchor(&devinfo->cmd_urbs);
+       init_usb_anchor(&devinfo->sense_urbs);
+       init_usb_anchor(&devinfo->data_urbs);
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 7244444df8ee..8511b54a65d9 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -40,13 +40,38 @@
+  * and don't forget to CC: the USB development list 
<linux-...@vger.kernel.org>
+  */
+ 
+-/*
+- * This is an example entry for the US_FL_IGNORE_UAS flag. Once we have an
+- * actual entry using US_FL_IGNORE_UAS this entry should be removed.
+- *
+- * UNUSUAL_DEV(  0xabcd, 0x1234, 0x0100, 0x0100,
+- *            "Example",
+- *            "Storage with broken UAS",
+- *            USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+- *            US_FL_IGNORE_UAS),
+- */
++/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
++UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
++              "Seagate",
++              "Expansion Desk",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_ATA_1X),
++
++/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
++UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
++              "Seagate",
++              "Expansion Desk",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_ATA_1X),
++
++/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
++UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
++              "Seagate",
++              "Backup+ BK",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_ATA_1X),
++
++/* Reported-by: Claudio Bizzarri <claudio.bizza...@gmail.com> */
++UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
++              "JMicron",
++              "JMS567",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_REPORT_OPCODES),
++
++/* Most ASM1051 based devices have issues with uas, blacklist them all */
++/* Reported-by: Hans de Goede <hdego...@redhat.com> */
++UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
++              "ASMedia",
++              "ASM1051",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_IGNORE_UAS),
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index f1c96261a501..20c5bcc6d3df 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -476,7 +476,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, 
unsigned long *fflags)
+                       US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
+                       US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
+                       US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+-                      US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE);
++                      US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
++                      US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
+ 
+       p = quirks;
+       while (*p) {
+@@ -514,6 +515,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, 
unsigned long *fflags)
+               case 'e':
+                       f |= US_FL_NO_READ_CAPACITY_16;
+                       break;
++              case 'f':
++                      f |= US_FL_NO_REPORT_OPCODES;
++                      break;
+               case 'h':
+                       f |= US_FL_CAPACITY_HEURISTICS;
+                       break;
+@@ -541,6 +545,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, 
unsigned long *fflags)
+               case 's':
+                       f |= US_FL_SINGLE_LUN;
+                       break;
++              case 't':
++                      f |= US_FL_NO_ATA_1X;
++                      break;
+               case 'u':
+                       f |= US_FL_IGNORE_UAS;
+                       break;
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 4967916fe4ac..d69f0577a319 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -187,7 +187,6 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 
skprio)
+ }
+ 
+ extern bool vlan_do_receive(struct sk_buff **skb);
+-extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+ 
+ extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
+ extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
+@@ -241,11 +240,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb)
+       return false;
+ }
+ 
+-static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
+-{
+-      return skb;
+-}
+-
+ static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
+ {
+       return 0;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ec89301ada41..6bb6bd86b0dc 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2549,6 +2549,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 
int shiftlen);
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
++struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+ 
+ struct skb_checksum_ops {
+       __wsum (*update)(const void *mem, int len, __wsum wsum);
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 52f944dfe2fd..49587dc22f5d 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -30,4 +30,7 @@
+    descriptor */
+ #define USB_QUIRK_DELAY_INIT          0x00000040
+ 
++/* device generates spurious wakeup, ignore remote wakeup capability */
++#define USB_QUIRK_IGNORE_REMOTE_WAKEUP        0x00000200
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 9b7de1b46437..a7f2604c5f25 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -73,6 +73,10 @@
+               /* Device advertises UAS but it is broken */    \
+       US_FLAG(BROKEN_FUA,     0x01000000)                     \
+               /* Cannot handle FUA in WRITE or READ CDBs */   \
++      US_FLAG(NO_ATA_1X,      0x02000000)                     \
++              /* Cannot handle ATA_12 or ATA_16 CDBs */       \
++      US_FLAG(NO_REPORT_OPCODES,      0x04000000)             \
++              /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ 
+ #define US_FLAG(name, value)  US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 71c60f42be48..a8ae4e760778 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -480,6 +480,7 @@ void dst_init(void);
+ /* Flags for xfrm_lookup flags argument. */
+ enum {
+       XFRM_LOOKUP_ICMP = 1 << 0,
++      XFRM_LOOKUP_QUEUE = 1 << 1,
+ };
+ 
+ struct flowi;
+@@ -490,7 +491,16 @@ static inline struct dst_entry *xfrm_lookup(struct net 
*net,
+                                           int flags)
+ {
+       return dst_orig;
+-} 
++}
++
++static inline struct dst_entry *xfrm_lookup_route(struct net *net,
++                                                struct dst_entry *dst_orig,
++                                                const struct flowi *fl,
++                                                struct sock *sk,
++                                                int flags)
++{
++      return dst_orig;
++}
+ 
+ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+ {
+@@ -502,6 +512,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct 
dst_entry *dst_orig,
+                             const struct flowi *fl, struct sock *sk,
+                             int flags);
+ 
++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry 
*dst_orig,
++                                  const struct flowi *fl, struct sock *sk,
++                                  int flags);
++
+ /* skb attached with this dst needs transformation if dst->xfrm is valid */
+ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+ {
+diff --git a/include/net/inet_connection_sock.h 
b/include/net/inet_connection_sock.h
+index 7a4313887568..5fbe6568c3cf 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops {
+       void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+       int         (*bind_conflict)(const struct sock *sk,
+                                    const struct inet_bind_bucket *tb, bool 
relax);
++      void        (*mtu_reduced)(struct sock *sk);
+ };
+ 
+ /** inet_connection_sock - INET connection oriented sock
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index 9bcb220bd4ad..cf485f9aa563 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -114,16 +114,13 @@ struct rt6_info {
+       u32                             rt6i_flags;
+       struct rt6key                   rt6i_src;
+       struct rt6key                   rt6i_prefsrc;
+-      u32                             rt6i_metric;
+ 
+       struct inet6_dev                *rt6i_idev;
+       unsigned long                   _rt6i_peer;
+ 
+-      u32                             rt6i_genid;
+-
++      u32                             rt6i_metric;
+       /* more non-fragment space at head required */
+       unsigned short                  rt6i_nfheader_len;
+-
+       u8                              rt6i_protocol;
+ };
+ 
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 361d26077196..e0d64667a4b3 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net)
+       atomic_inc(&net->ipv4.rt_genid);
+ }
+ 
+-#if IS_ENABLED(CONFIG_IPV6)
+-static inline int rt_genid_ipv6(struct net *net)
+-{
+-      return atomic_read(&net->ipv6.rt_genid);
+-}
+-
+-static inline void rt_genid_bump_ipv6(struct net *net)
+-{
+-      atomic_inc(&net->ipv6.rt_genid);
+-}
+-#else
+-static inline int rt_genid_ipv6(struct net *net)
+-{
+-      return 0;
+-}
+-
++extern void (*__fib6_flush_trees)(struct net *net);
+ static inline void rt_genid_bump_ipv6(struct net *net)
+ {
++      if (__fib6_flush_trees)
++              __fib6_flush_trees(net);
+ }
+-#endif
+ 
+ #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+ static inline struct netns_ieee802154_lowpan *
+diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
+index 4b7cd695e431..cfcbc3f627bd 100644
+--- a/include/net/sctp/command.h
++++ b/include/net/sctp/command.h
+@@ -115,7 +115,7 @@ typedef enum {
+  * analysis of the state functions, but in reality just taken from
+  * thin air in the hopes othat we don't trigger a kernel panic.
+  */
+-#define SCTP_MAX_NUM_COMMANDS 14
++#define SCTP_MAX_NUM_COMMANDS 20
+ 
+ typedef union {
+       __s32 i32;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 156350745700..6cc7944d65bf 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -971,7 +971,6 @@ struct proto {
+                                               struct sk_buff *skb);
+ 
+       void            (*release_cb)(struct sock *sk);
+-      void            (*mtu_reduced)(struct sock *sk);
+ 
+       /* Keeping track of sk's, looking them up, and port selection methods. 
*/
+       void                    (*hash)(struct sock *sk);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 7286db80e8b8..d587ff0f8828 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -448,6 +448,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+  */
+ 
+ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
++void tcp_v4_mtu_reduced(struct sock *sk);
+ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+ struct sock *tcp_create_openreq_child(struct sock *sk,
+                                     struct request_sock *req,
+@@ -718,8 +719,10 @@ struct tcp_skb_cb {
+ #define TCPCB_SACKED_RETRANS  0x02    /* SKB retransmitted            */
+ #define TCPCB_LOST            0x04    /* SKB is lost                  */
+ #define TCPCB_TAGBITS         0x07    /* All tag bits                 */
++#define TCPCB_REPAIRED                0x10    /* SKB repaired (no skb_mstamp) 
*/
+ #define TCPCB_EVER_RETRANS    0x80    /* Ever retransmitted frame     */
+-#define TCPCB_RETRANS         (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
++#define TCPCB_RETRANS         (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
++                              TCPCB_REPAIRED)
+ 
+       __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
+       /* 1 byte hole */
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 75d427763992..90cc2bdd4064 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -112,59 +112,6 @@ __be16 vlan_dev_vlan_proto(const struct net_device *dev)
+ }
+ EXPORT_SYMBOL(vlan_dev_vlan_proto);
+ 
+-static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+-{
+-      if (skb_cow(skb, skb_headroom(skb)) < 0) {
+-              kfree_skb(skb);
+-              return NULL;
+-      }
+-
+-      memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+-      skb->mac_header += VLAN_HLEN;
+-      return skb;
+-}
+-
+-struct sk_buff *vlan_untag(struct sk_buff *skb)
+-{
+-      struct vlan_hdr *vhdr;
+-      u16 vlan_tci;
+-
+-      if (unlikely(vlan_tx_tag_present(skb))) {
+-              /* vlan_tci is already set-up so leave this for another time */
+-              return skb;
+-      }
+-
+-      skb = skb_share_check(skb, GFP_ATOMIC);
+-      if (unlikely(!skb))
+-              goto err_free;
+-
+-      if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+-              goto err_free;
+-
+-      vhdr = (struct vlan_hdr *) skb->data;
+-      vlan_tci = ntohs(vhdr->h_vlan_TCI);
+-      __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
+-
+-      skb_pull_rcsum(skb, VLAN_HLEN);
+-      vlan_set_encap_proto(skb, vhdr);
+-
+-      skb = vlan_reorder_header(skb);
+-      if (unlikely(!skb))
+-              goto err_free;
+-
+-      skb_reset_network_header(skb);
+-      skb_reset_transport_header(skb);
+-      skb_reset_mac_len(skb);
+-
+-      return skb;
+-
+-err_free:
+-      kfree_skb(skb);
+-      return NULL;
+-}
+-EXPORT_SYMBOL(vlan_untag);
+-
+-
+ /*
+  * vlan info and vid list
+  */
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 23caf5b0309e..4fd47a1a0e9a 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -309,6 +309,9 @@ struct br_input_skb_cb {
+       int igmp;
+       int mrouters_only;
+ #endif
++#ifdef CONFIG_BRIDGE_VLAN_FILTERING
++      bool vlan_filtered;
++#endif
+ };
+ 
+ #define BR_INPUT_SKB_CB(__skb)        ((struct br_input_skb_cb *)(__skb)->cb)
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 2b2774fe0703..b03e884fba3e 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -127,7 +127,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ {
+       u16 vid;
+ 
+-      if (!br->vlan_enabled)
++      /* If this packet was not filtered at input, let it pass */
++      if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
+               goto out;
+ 
+       /* Vlan filter table must be configured at this point.  The
+@@ -166,8 +167,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct 
net_port_vlans *v,
+       /* If VLAN filtering is disabled on the bridge, all packets are
+        * permitted.
+        */
+-      if (!br->vlan_enabled)
++      if (!br->vlan_enabled) {
++              BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+               return true;
++      }
+ 
+       /* If there are no vlan in the permitted list, all packets are
+        * rejected.
+@@ -175,6 +178,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct 
net_port_vlans *v,
+       if (!v)
+               goto drop;
+ 
++      BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
+       proto = br->vlan_proto;
+ 
+       /* If vlan tx offload is disabled on bridge device and frame was
+@@ -183,7 +187,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct 
net_port_vlans *v,
+        */
+       if (unlikely(!vlan_tx_tag_present(skb) &&
+                    skb->protocol == proto)) {
+-              skb = vlan_untag(skb);
++              skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+                       return false;
+       }
+@@ -253,7 +257,8 @@ bool br_allowed_egress(struct net_bridge *br,
+ {
+       u16 vid;
+ 
+-      if (!br->vlan_enabled)
++      /* If this packet was not filtered at input, let it pass */
++      if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
+               return true;
+ 
+       if (!v)
+@@ -272,6 +277,7 @@ bool br_should_learn(struct net_bridge_port *p, struct 
sk_buff *skb, u16 *vid)
+       struct net_bridge *br = p->br;
+       struct net_port_vlans *v;
+ 
++      /* If filtering was disabled at input, let it pass. */
+       if (!br->vlan_enabled)
+               return true;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 367a586d0c8a..2647b508eb4d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2576,13 +2576,19 @@ netdev_features_t netif_skb_features(struct sk_buff 
*skb)
+               return harmonize_features(skb, features);
+       }
+ 
+-      features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+-                                             NETIF_F_HW_VLAN_STAG_TX);
++      features = netdev_intersect_features(features,
++                                           skb->dev->vlan_features |
++                                           NETIF_F_HW_VLAN_CTAG_TX |
++                                           NETIF_F_HW_VLAN_STAG_TX);
+ 
+       if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
+-              features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+-                              NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
+-                              NETIF_F_HW_VLAN_STAG_TX;
++              features = netdev_intersect_features(features,
++                                                   NETIF_F_SG |
++                                                   NETIF_F_HIGHDMA |
++                                                   NETIF_F_FRAGLIST |
++                                                   NETIF_F_GEN_CSUM |
++                                                   NETIF_F_HW_VLAN_CTAG_TX |
++                                                   NETIF_F_HW_VLAN_STAG_TX);
+ 
+       return harmonize_features(skb, features);
+ }
+@@ -3588,7 +3594,7 @@ another_round:
+ 
+       if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+           skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+-              skb = vlan_untag(skb);
++              skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+                       goto unlock;
+       }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 1dbf6462f766..3139f966a178 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1318,6 +1318,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
+       fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
+       if (!fkprog->filter) {
+               kfree(fp->orig_prog);
++              fp->orig_prog = NULL;
+               return -ENOMEM;
+       }
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 1063996f8317..e0b5ca349049 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -799,7 +799,8 @@ static inline int rtnl_vfinfo_size(const struct net_device 
*dev,
+                       (nla_total_size(sizeof(struct ifla_vf_mac)) +
+                        nla_total_size(sizeof(struct ifla_vf_vlan)) +
+                        nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
+-                       nla_total_size(sizeof(struct ifla_vf_rate)));
++                       nla_total_size(sizeof(struct ifla_vf_rate)) +
++                       nla_total_size(sizeof(struct ifla_vf_link_state)));
+               return size;
+       } else
+               return 0;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 58ff88edbefd..f5f14d54d6a2 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -62,6 +62,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
++#include <linux/if_vlan.h>
+ 
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -3151,6 +3152,9 @@ int skb_gro_receive(struct sk_buff **head, struct 
sk_buff *skb)
+               NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
+               goto done;
+       }
++      /* switch back to head shinfo */
++      pinfo = skb_shinfo(p);
++
+       if (pinfo->frag_list)
+               goto merge;
+       if (skb_gro_len(p) != pinfo->gso_size)
+@@ -3959,3 +3963,55 @@ unsigned int skb_gso_transport_seglen(const struct 
sk_buff *skb)
+       return shinfo->gso_size;
+ }
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
++
++static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
++{
++      if (skb_cow(skb, skb_headroom(skb)) < 0) {
++              kfree_skb(skb);
++              return NULL;
++      }
++
++      memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
++      skb->mac_header += VLAN_HLEN;
++      return skb;
++}
++
++struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
++{
++      struct vlan_hdr *vhdr;
++      u16 vlan_tci;
++
++      if (unlikely(vlan_tx_tag_present(skb))) {
++              /* vlan_tci is already set-up so leave this for another time */
++              return skb;
++      }
++
++      skb = skb_share_check(skb, GFP_ATOMIC);
++      if (unlikely(!skb))
++              goto err_free;
++
++      if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
++              goto err_free;
++
++      vhdr = (struct vlan_hdr *)skb->data;
++      vlan_tci = ntohs(vhdr->h_vlan_TCI);
++      __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
++
++      skb_pull_rcsum(skb, VLAN_HLEN);
++      vlan_set_encap_proto(skb, vhdr);
++
++      skb = skb_reorder_vlan_header(skb);
++      if (unlikely(!skb))
++              goto err_free;
++
++      skb_reset_network_header(skb);
++      skb_reset_transport_header(skb);
++      skb_reset_mac_len(skb);
++
++      return skb;
++
++err_free:
++      kfree_skb(skb);
++      return NULL;
++}
++EXPORT_SYMBOL(skb_vlan_untag);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 45920d928341..6c2719373bc5 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -764,9 +764,14 @@ int ip_tunnel_ioctl(struct net_device *dev, struct 
ip_tunnel_parm *p, int cmd)
+ 
+               t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
+ 
+-              if (!t && (cmd == SIOCADDTUNNEL)) {
+-                      t = ip_tunnel_create(net, itn, p);
+-                      err = PTR_ERR_OR_ZERO(t);
++              if (cmd == SIOCADDTUNNEL) {
++                      if (!t) {
++                              t = ip_tunnel_create(net, itn, p);
++                              err = PTR_ERR_OR_ZERO(t);
++                              break;
++                      }
++
++                      err = -EEXIST;
+                       break;
+               }
+               if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 190199851c9a..4b340c30a037 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2267,9 +2267,9 @@ struct rtable *ip_route_output_flow(struct net *net, 
struct flowi4 *flp4,
+               return rt;
+ 
+       if (flp4->flowi4_proto)
+-              rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
+-                                                 flowi4_to_flowi(flp4),
+-                                                 sk, 0);
++              rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
++                                                      flowi4_to_flowi(flp4),
++                                                      sk, 0);
+ 
+       return rt;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 9d2118e5fbc7..0717f45b5171 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1175,13 +1175,6 @@ new_segment:
+                                       goto wait_for_memory;
+ 
+                               /*
+-                               * All packets are restored as if they have
+-                               * already been sent.
+-                               */
+-                              if (tp->repair)
+-                                      TCP_SKB_CB(skb)->when = tcp_time_stamp;
+-
+-                              /*
+                                * Check whether we can use HW checksum.
+                                */
+                               if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
+@@ -1190,6 +1183,13 @@ new_segment:
+                               skb_entail(sk, skb);
+                               copy = size_goal;
+                               max = size_goal;
++
++                              /* All packets are restored as if they have
++                               * already been sent. skb_mstamp isn't set to
++                               * avoid wrong rtt estimation.
++                               */
++                              if (tp->repair)
++                                      TCP_SKB_CB(skb)->sacked |= 
TCPCB_REPAIRED;
+                       }
+ 
+                       /* Try to append data to the end of skb. */
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 40639c288dc2..a1bbebb03490 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2680,7 +2680,6 @@ static void tcp_enter_recovery(struct sock *sk, bool 
ece_ack)
+  */
+ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+ {
+-      struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       bool recovered = !before(tp->snd_una, tp->high_seq);
+ 
+@@ -2706,12 +2705,9 @@ static void tcp_process_loss(struct sock *sk, int flag, 
bool is_dupack)
+ 
+       if (recovered) {
+               /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
+-              icsk->icsk_retransmits = 0;
+               tcp_try_undo_recovery(sk);
+               return;
+       }
+-      if (flag & FLAG_DATA_ACKED)
+-              icsk->icsk_retransmits = 0;
+       if (tcp_is_reno(tp)) {
+               /* A Reno DUPACK means new data in F-RTO step 2.b above are
+                * delivered. Lower inflight to clock out (re)tranmissions.
+@@ -3393,8 +3389,10 @@ static int tcp_ack(struct sock *sk, const struct 
sk_buff *skb, int flag)
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
+               tcp_rearm_rto(sk);
+ 
+-      if (after(ack, prior_snd_una))
++      if (after(ack, prior_snd_una)) {
+               flag |= FLAG_SND_UNA_ADVANCED;
++              icsk->icsk_retransmits = 0;
++      }
+ 
+       prior_fackets = tp->fackets_out;
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 77cccda1ad0c..f63c524de5d9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
+  * It can be called through tcp_release_cb() if socket was owned by user
+  * at the time tcp_v4_err() was called to handle ICMP message.
+  */
+-static void tcp_v4_mtu_reduced(struct sock *sk)
++void tcp_v4_mtu_reduced(struct sock *sk)
+ {
+       struct dst_entry *dst;
+       struct inet_sock *inet = inet_sk(sk);
+@@ -300,6 +300,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
+               tcp_simple_retransmit(sk);
+       } /* else let the usual retransmit timer handle it */
+ }
++EXPORT_SYMBOL(tcp_v4_mtu_reduced);
+ 
+ static void do_redirect(struct sk_buff *skb, struct sock *sk)
+ {
+@@ -1880,6 +1881,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = 
{
+       .compat_setsockopt = compat_ip_setsockopt,
+       .compat_getsockopt = compat_ip_getsockopt,
+ #endif
++      .mtu_reduced       = tcp_v4_mtu_reduced,
+ };
+ EXPORT_SYMBOL(ipv4_specific);
+ 
+@@ -2499,7 +2501,6 @@ struct proto tcp_prot = {
+       .sendpage               = tcp_sendpage,
+       .backlog_rcv            = tcp_v4_do_rcv,
+       .release_cb             = tcp_release_cb,
+-      .mtu_reduced            = tcp_v4_mtu_reduced,
+       .hash                   = inet_hash,
+       .unhash                 = inet_unhash,
+       .get_port               = inet_csk_get_port,
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 179b51e6bda3..4e4932b5079b 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -800,7 +800,7 @@ void tcp_release_cb(struct sock *sk)
+               __sock_put(sk);
+       }
+       if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
+-              sk->sk_prot->mtu_reduced(sk);
++              inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
+               __sock_put(sk);
+       }
+ }
+@@ -1916,8 +1916,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned 
int mss_now, int nonagle,
+               tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
+               BUG_ON(!tso_segs);
+ 
+-              if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
++              if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 
{
++                      /* "when" is used as a start point for the retransmit 
timer */
++                      TCP_SKB_CB(skb)->when = tcp_time_stamp;
+                       goto repair; /* Skip network transmission */
++              }
+ 
+               cwnd_quota = tcp_cwnd_test(tp, skb);
+               if (!cwnd_quota) {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 5667b3003af9..4a9a34954923 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1679,14 +1679,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
+       addrconf_mod_dad_work(ifp, 0);
+ }
+ 
+-/* Join to solicited addr multicast group. */
+-
++/* Join to solicited addr multicast group.
++ * caller must hold RTNL */
+ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
+ {
+       struct in6_addr maddr;
+ 
+-      ASSERT_RTNL();
+-
+       if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
+               return;
+ 
+@@ -1694,12 +1692,11 @@ void addrconf_join_solict(struct net_device *dev, 
const struct in6_addr *addr)
+       ipv6_dev_mc_inc(dev, &maddr);
+ }
+ 
++/* caller must hold RTNL */
+ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr 
*addr)
+ {
+       struct in6_addr maddr;
+ 
+-      ASSERT_RTNL();
+-
+       if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
+               return;
+ 
+@@ -1707,12 +1704,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, 
const struct in6_addr *addr)
+       __ipv6_dev_mc_dec(idev, &maddr);
+ }
+ 
++/* caller must hold RTNL */
+ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
+ {
+       struct in6_addr addr;
+ 
+-      ASSERT_RTNL();
+-
+       if (ifp->prefix_len >= 127) /* RFC 6164 */
+               return;
+       ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
+@@ -1721,12 +1717,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr 
*ifp)
+       ipv6_dev_ac_inc(ifp->idev->dev, &addr);
+ }
+ 
++/* caller must hold RTNL */
+ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
+ {
+       struct in6_addr addr;
+ 
+-      ASSERT_RTNL();
+-
+       if (ifp->prefix_len >= 127) /* RFC 6164 */
+               return;
+       ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
+@@ -4751,10 +4746,11 @@ static void __ipv6_ifa_notify(int event, struct 
inet6_ifaddr *ifp)
+ 
+               if (ip6_del_rt(ifp->rt))
+                       dst_free(&ifp->rt->dst);
++
++              rt_genid_bump_ipv6(net);
+               break;
+       }
+       atomic_inc(&net->ipv6.dev_addr_genid);
+-      rt_genid_bump_ipv6(net);
+ }
+ 
+ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
+index e6960457f625..98cc4cd570e2 100644
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -8,6 +8,13 @@
+ #include <net/addrconf.h>
+ #include <net/ip.h>
+ 
++/* if ipv6 module registers this function is used by xfrm to force all
++ * sockets to relookup their nodes - this is fairly expensive, be
++ * careful
++ */
++void (*__fib6_flush_trees)(struct net *);
++EXPORT_SYMBOL(__fib6_flush_trees);
++
+ #define IPV6_ADDR_SCOPE_TYPE(scope)   ((scope) << 16)
+ 
+ static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
+diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
+index 210183244689..ff2de7d9d8e6 100644
+--- a/net/ipv6/anycast.c
++++ b/net/ipv6/anycast.c
+@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+       pac->acl_next = NULL;
+       pac->acl_addr = *addr;
+ 
++      rtnl_lock();
+       rcu_read_lock();
+       if (ifindex == 0) {
+               struct rt6_info *rt;
+@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+ 
+ error:
+       rcu_read_unlock();
++      rtnl_unlock();
+       if (pac)
+               sock_kfree_s(sk, pac, sizeof(*pac));
+       return err;
+@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, 
const struct in6_addr *addr)
+ 
+       spin_unlock_bh(&ipv6_sk_ac_lock);
+ 
++      rtnl_lock();
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
+       if (dev)
+               ipv6_dev_ac_dec(dev, &pac->acl_addr);
+       rcu_read_unlock();
++      rtnl_unlock();
+ 
+       sock_kfree_s(sk, pac, sizeof(*pac));
+       return 0;
+@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
+       spin_unlock_bh(&ipv6_sk_ac_lock);
+ 
+       prev_index = 0;
++      rtnl_lock();
+       rcu_read_lock();
+       while (pac) {
+               struct ipv6_ac_socklist *next = pac->acl_next;
+@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
+               pac = next;
+       }
+       rcu_read_unlock();
++      rtnl_unlock();
+ }
+ 
+ static void aca_put(struct ifacaddr6 *ac)
+@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct 
in6_addr *addr)
+       struct rt6_info *rt;
+       int err;
+ 
++      ASSERT_RTNL();
++
+       idev = in6_dev_get(dev);
+ 
+       if (idev == NULL)
+@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct 
in6_addr *addr)
+ {
+       struct ifacaddr6 *aca, *prev_aca;
+ 
++      ASSERT_RTNL();
++
+       write_lock_bh(&idev->lock);
+       prev_aca = NULL;
+       for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index cb4459bd1d29..97b9fa8de377 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -643,7 +643,7 @@ static int fib6_commit_metrics(struct dst_entry *dst,
+       if (dst->flags & DST_HOST) {
+               mp = dst_metrics_write_ptr(dst);
+       } else {
+-              mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
++              mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+               if (!mp)
+                       return -ENOMEM;
+               dst_init_metrics(dst, mp, 0);
+@@ -1605,6 +1605,24 @@ static void fib6_prune_clones(struct net *net, struct 
fib6_node *fn)
+       fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
+ }
+ 
++static int fib6_update_sernum(struct rt6_info *rt, void *arg)
++{
++      __u32 sernum = *(__u32 *)arg;
++
++      if (rt->rt6i_node &&
++          rt->rt6i_node->fn_sernum != sernum)
++              rt->rt6i_node->fn_sernum = sernum;
++
++      return 0;
++}
++
++static void fib6_flush_trees(struct net *net)
++{
++      __u32 new_sernum = fib6_new_sernum();
++
++      fib6_clean_all(net, fib6_update_sernum, &new_sernum);
++}
++
+ /*
+  *    Garbage collection
+  */
+@@ -1788,6 +1806,8 @@ int __init fib6_init(void)
+                             NULL);
+       if (ret)
+               goto out_unregister_subsys;
++
++      __fib6_flush_trees = fib6_flush_trees;
+ out:
+       return ret;
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 3873181ed856..43bc1fc24621 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -778,7 +778,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, 
struct net_device *dev)
+               encap_limit = t->parms.encap_limit;
+ 
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-      fl6.flowi6_proto = IPPROTO_IPIP;
++      fl6.flowi6_proto = IPPROTO_GRE;
+ 
+       dsfield = ipv4_get_dsfield(iph);
+ 
+@@ -828,7 +828,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, 
struct net_device *dev)
+               encap_limit = t->parms.encap_limit;
+ 
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-      fl6.flowi6_proto = IPPROTO_IPV6;
++      fl6.flowi6_proto = IPPROTO_GRE;
+ 
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 45702b8cd141..59345af6d3a7 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1008,7 +1008,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, 
struct flowi6 *fl6,
+       if (final_dst)
+               fl6->daddr = *final_dst;
+ 
+-      return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++      return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 
0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+ 
+@@ -1040,7 +1040,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock 
*sk, struct flowi6 *fl6,
+       if (final_dst)
+               fl6->daddr = *final_dst;
+ 
+-      return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++      return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 
0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
+ 
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 617f0958e164..a23b655a7627 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+       mc_lst->next = NULL;
+       mc_lst->addr = *addr;
+ 
++      rtnl_lock();
+       rcu_read_lock();
+       if (ifindex == 0) {
+               struct rt6_info *rt;
+@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+ 
+       if (dev == NULL) {
+               rcu_read_unlock();
++              rtnl_unlock();
+               sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+               return -ENODEV;
+       }
+@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+ 
+       if (err) {
+               rcu_read_unlock();
++              rtnl_unlock();
+               sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+               return err;
+       }
+@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+       spin_unlock(&ipv6_sk_mc_lock);
+ 
+       rcu_read_unlock();
++      rtnl_unlock();
+ 
+       return 0;
+ }
+@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const 
struct in6_addr *addr)
+       if (!ipv6_addr_is_multicast(addr))
+               return -EINVAL;
+ 
++      rtnl_lock();
+       spin_lock(&ipv6_sk_mc_lock);
+       for (lnk = &np->ipv6_mc_list;
+            (mc_lst = rcu_dereference_protected(*lnk,
+@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, 
const struct in6_addr *addr)
+                       } else
+                               (void) ip6_mc_leave_src(sk, mc_lst, NULL);
+                       rcu_read_unlock();
++                      rtnl_unlock();
++
+                       atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+                       kfree_rcu(mc_lst, rcu);
+                       return 0;
+               }
+       }
+       spin_unlock(&ipv6_sk_mc_lock);
++      rtnl_unlock();
+ 
+       return -EADDRNOTAVAIL;
+ }
+@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
+       if (!rcu_access_pointer(np->ipv6_mc_list))
+               return;
+ 
++      rtnl_lock();
+       spin_lock(&ipv6_sk_mc_lock);
+       while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
+                               lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
+@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
+               spin_lock(&ipv6_sk_mc_lock);
+       }
+       spin_unlock(&ipv6_sk_mc_lock);
++      rtnl_unlock();
+ }
+ 
+ int ip6_mc_source(int add, int omode, struct sock *sk,
+@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct 
in6_addr *addr)
+       struct ifmcaddr6 *mc;
+       struct inet6_dev *idev;
+ 
++      ASSERT_RTNL();
++
+       /* we need to take a reference on idev */
+       idev = in6_dev_get(dev);
+ 
+@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct 
in6_addr *addr)
+ {
+       struct ifmcaddr6 *ma, **map;
+ 
++      ASSERT_RTNL();
++
+       write_lock_bh(&idev->lock);
+       for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
+               if (ipv6_addr_equal(&ma->mca_addr, addr)) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f23fbd28a501..bafde82324c5 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -314,7 +314,6 @@ static inline struct rt6_info *ip6_dst_alloc(struct net 
*net,
+ 
+               memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+               rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
+-              rt->rt6i_genid = rt_genid_ipv6(net);
+               INIT_LIST_HEAD(&rt->rt6i_siblings);
+       }
+       return rt;
+@@ -1098,9 +1097,6 @@ static struct dst_entry *ip6_dst_check(struct dst_entry 
*dst, u32 cookie)
+        * DST_OBSOLETE_FORCE_CHK which forces validation calls down
+        * into this function always.
+        */
+-      if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
+-              return NULL;
+-
+       if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+               return NULL;
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 4f408176dc64..9906535ce9de 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net 
*net,
+       for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) {
+               if (local == t->parms.iph.saddr &&
+                   remote == t->parms.iph.daddr &&
+-                  (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++                  (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+                   (t->dev->flags & IFF_UP))
+                       return t;
+       }
+       for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) {
+               if (remote == t->parms.iph.daddr &&
+-                  (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++                  (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+                   (t->dev->flags & IFF_UP))
+                       return t;
+       }
+       for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) {
+               if (local == t->parms.iph.saddr &&
+-                  (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
++                  (!dev || !t->parms.link || dev->ifindex == t->parms.link) &&
+                   (t->dev->flags & IFF_UP))
+                       return t;
+       }
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 229239ad96b1..cb5125c5328d 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1681,6 +1681,7 @@ static const struct inet_connection_sock_af_ops 
ipv6_specific = {
+       .compat_setsockopt = compat_ipv6_setsockopt,
+       .compat_getsockopt = compat_ipv6_getsockopt,
+ #endif
++      .mtu_reduced       = tcp_v6_mtu_reduced,
+ };
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1711,6 +1712,7 @@ static const struct inet_connection_sock_af_ops 
ipv6_mapped = {
+       .compat_setsockopt = compat_ipv6_setsockopt,
+       .compat_getsockopt = compat_ipv6_getsockopt,
+ #endif
++      .mtu_reduced       = tcp_v4_mtu_reduced,
+ };
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1950,7 +1952,6 @@ struct proto tcpv6_prot = {
+       .sendpage               = tcp_sendpage,
+       .backlog_rcv            = tcp_v6_do_rcv,
+       .release_cb             = tcp_release_cb,
+-      .mtu_reduced            = tcp_v6_mtu_reduced,
+       .hash                   = tcp_v6_hash,
+       .unhash                 = inet_unhash,
+       .get_port               = inet_csk_get_port,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 13752d96275e..b704a9356208 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct 
sockaddr *uservaddr,
+       /* If PMTU discovery was enabled, use the MTU that was discovered */
+       dst = sk_dst_get(tunnel->sock);
+       if (dst != NULL) {
+-              u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
++              u32 pmtu = dst_mtu(dst);
++
+               if (pmtu != 0)
+                       session->mtu = session->mru = pmtu -
+                               PPPOL2TP_HEADER_OVERHEAD;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index e6fac7e3db52..48fc607a211e 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -205,7 +205,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+               nskb->protocol = htons((u16) sk->sk_protocol);
+               nskb->pkt_type = netlink_is_kernel(sk) ?
+                                PACKET_KERNEL : PACKET_USER;
+-
++              skb_reset_network_header(nskb);
+               ret = dev_queue_xmit(nskb);
+               if (unlikely(ret > 0))
+                       ret = net_xmit_errno(ret);
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index e70d8b18e962..10736e6b192b 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct 
sk_buff *skb,
+ 
+ static int make_writable(struct sk_buff *skb, int write_len)
+ {
++      if (!pskb_may_pull(skb, write_len))
++              return -ENOMEM;
++
+       if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+               return 0;
+ 
+@@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 
*current_tci)
+ 
+       vlan_set_encap_proto(skb, vhdr);
+       skb->mac_header += VLAN_HLEN;
++      if (skb_network_offset(skb) < ETH_HLEN)
++              skb_set_network_header(skb, ETH_HLEN);
+       skb_reset_mac_len(skb);
+ 
+       return 0;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index b85c67ccb797..3eb786fd3f22 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -636,6 +636,7 @@ static void init_prb_bdqc(struct packet_sock *po,
+       p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+       p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+ 
++      p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
+       prb_init_ft_ops(p1, req_u);
+       prb_setup_retire_blk_timer(po, tx_ring);
+       prb_open_block(p1, pbd);
+@@ -1946,6 +1947,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct 
net_device *dev,
+                       if ((int)snaplen < 0)
+                               snaplen = 0;
+               }
++      } else if (unlikely(macoff + snaplen >
++                          GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
++              u32 nval;
++
++              nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
++              pr_err_once("tpacket_rcv: packet too big, clamped from %u to 
%u. macoff=%u\n",
++                          snaplen, nval, macoff);
++              snaplen = nval;
++              if (unlikely((int)snaplen < 0)) {
++                      snaplen = 0;
++                      macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
++              }
+       }
+       spin_lock(&sk->sk_receive_queue.lock);
+       h.raw = packet_current_rx_frame(po, skb,
+@@ -3789,6 +3802,10 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+                       goto out;
+               if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
+                       goto out;
++              if (po->tp_version >= TPACKET_V3 &&
++                  (int)(req->tp_block_size -
++                        BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
++                      goto out;
+               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+                                       po->tp_reserve))
+                       goto out;
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index eb9580a6b25f..cdddf6a30399 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -29,6 +29,7 @@ struct tpacket_kbdq_core {
+       char            *pkblk_start;
+       char            *pkblk_end;
+       int             kblk_size;
++      unsigned int    max_frame_len;
+       unsigned int    knum_blocks;
+       uint64_t        knxt_seq_num;
+       char            *prev;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 45527e6b52db..3b2617aa6bcd 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts 
*dst,
+       tcf_tree_lock(tp);
+       list_splice_init(&dst->actions, &tmp);
+       list_splice(&src->actions, &dst->actions);
++      dst->type = src->type;
+       tcf_tree_unlock(tp);
+       tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
+ #endif
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 5170a1ff95a1..7194fe8589b0 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct 
net *net,
+       /* Update the content of current association. */
+       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+       sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+-      sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+-                      SCTP_STATE(SCTP_STATE_ESTABLISHED));
+-      sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++      if (sctp_state(asoc, SHUTDOWN_PENDING) &&
++          (sctp_sstate(asoc->base.sk, CLOSING) ||
++           sock_flag(asoc->base.sk, SOCK_DEAD))) {
++              /* if were currently in SHUTDOWN_PENDING, but the socket
++               * has been closed by user, don't transition to ESTABLISHED.
++               * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
++               */
++              sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++              return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
++                                                   SCTP_ST_CHUNK(0), NULL,
++                                                   commands);
++      } else {
++              sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
++                              SCTP_STATE(SCTP_STATE_ESTABLISHED));
++              sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++      }
+       return SCTP_DISPOSITION_CONSUME;
+ 
+ nomem_ev:
+diff --git a/net/tipc/port.h b/net/tipc/port.h
+index cf4ca5b1d9a4..3f34cac07a2c 100644
+--- a/net/tipc/port.h
++++ b/net/tipc/port.h
+@@ -229,9 +229,12 @@ static inline int tipc_port_importance(struct tipc_port 
*port)
+       return msg_importance(&port->phdr);
+ }
+ 
+-static inline void tipc_port_set_importance(struct tipc_port *port, int imp)
++static inline int tipc_port_set_importance(struct tipc_port *port, int imp)
+ {
++      if (imp > TIPC_CRITICAL_IMPORTANCE)
++              return -EINVAL;
+       msg_set_importance(&port->phdr, (u32)imp);
++      return 0;
+ }
+ 
+ #endif
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index ef0475568f9e..4093fd81edd5 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1841,7 +1841,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, 
int opt,
+ 
+       switch (opt) {
+       case TIPC_IMPORTANCE:
+-              tipc_port_set_importance(port, value);
++              res = tipc_port_set_importance(port, value);
+               break;
+       case TIPC_SRC_DROPPABLE:
+               if (sock->type != SOCK_STREAM)
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 0525d78ba328..93e755b97486 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -39,6 +39,11 @@
+ #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
+ #define XFRM_MAX_QUEUE_LEN    100
+ 
++struct xfrm_flo {
++      struct dst_entry *dst_orig;
++      u8 flags;
++};
++
+ static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+ static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+                                               __read_mostly;
+@@ -1877,13 +1882,14 @@ static int xdst_queue_output(struct sock *sk, struct 
sk_buff *skb)
+ }
+ 
+ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
+-                                               struct dst_entry *dst,
++                                               struct xfrm_flo *xflo,
+                                                const struct flowi *fl,
+                                                int num_xfrms,
+                                                u16 family)
+ {
+       int err;
+       struct net_device *dev;
++      struct dst_entry *dst;
+       struct dst_entry *dst1;
+       struct xfrm_dst *xdst;
+ 
+@@ -1891,9 +1897,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct 
net *net,
+       if (IS_ERR(xdst))
+               return xdst;
+ 
+-      if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
++      if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
++          net->xfrm.sysctl_larval_drop ||
++          num_xfrms <= 0)
+               return xdst;
+ 
++      dst = xflo->dst_orig;
+       dst1 = &xdst->u.dst;
+       dst_hold(dst);
+       xdst->route = dst;
+@@ -1935,7 +1944,7 @@ static struct flow_cache_object *
+ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 
dir,
+                  struct flow_cache_object *oldflo, void *ctx)
+ {
+-      struct dst_entry *dst_orig = (struct dst_entry *)ctx;
++      struct xfrm_flo *xflo = (struct xfrm_flo *)ctx;
+       struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+       struct xfrm_dst *xdst, *new_xdst;
+       int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
+@@ -1976,7 +1985,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi 
*fl, u16 family, u8 dir,
+                       goto make_dummy_bundle;
+       }
+ 
+-      new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 
dst_orig);
++      new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
++                                                xflo->dst_orig);
+       if (IS_ERR(new_xdst)) {
+               err = PTR_ERR(new_xdst);
+               if (err != -EAGAIN)
+@@ -2010,7 +2020,7 @@ make_dummy_bundle:
+       /* We found policies, but there's no bundles to instantiate:
+        * either because the policy blocks, has no transformations or
+        * we could not build template (no xfrm_states).*/
+-      xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
++      xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
+       if (IS_ERR(xdst)) {
+               xfrm_pols_put(pols, num_pols);
+               return ERR_CAST(xdst);
+@@ -2104,13 +2114,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct 
dst_entry *dst_orig,
+       }
+ 
+       if (xdst == NULL) {
++              struct xfrm_flo xflo;
++
++              xflo.dst_orig = dst_orig;
++              xflo.flags = flags;
++
+               /* To accelerate a bit...  */
+               if ((dst_orig->flags & DST_NOXFRM) ||
+                   !net->xfrm.policy_count[XFRM_POLICY_OUT])
+                       goto nopol;
+ 
+               flo = flow_cache_lookup(net, fl, family, dir,
+-                                      xfrm_bundle_lookup, dst_orig);
++                                      xfrm_bundle_lookup, &xflo);
+               if (flo == NULL)
+                       goto nopol;
+               if (IS_ERR(flo)) {
+@@ -2138,7 +2153,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct 
dst_entry *dst_orig,
+                       xfrm_pols_put(pols, drop_pols);
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+ 
+-                      return make_blackhole(net, family, dst_orig);
++                      return ERR_PTR(-EREMOTE);
+               }
+ 
+               err = -EAGAIN;
+@@ -2195,6 +2210,23 @@ dropdst:
+ }
+ EXPORT_SYMBOL(xfrm_lookup);
+ 
++/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
++ * Otherwise we may send out blackholed packets.
++ */
++struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry 
*dst_orig,
++                                  const struct flowi *fl,
++                                  struct sock *sk, int flags)
++{
++      struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
++                                          flags | XFRM_LOOKUP_QUEUE);
++
++      if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
++              return make_blackhole(net, dst_orig->ops->family, dst_orig);
++
++      return dst;
++}
++EXPORT_SYMBOL(xfrm_lookup_route);
++
+ static inline int
+ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
+ {
+@@ -2460,7 +2492,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned 
short family)
+ 
+       skb_dst_force(skb);
+ 
+-      dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
++      dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
+       if (IS_ERR(dst)) {
+               res = 0;
+               dst = NULL;

Reply via email to