diff --git a/Makefile b/Makefile
index 973e8568e52d..9c894e7e586d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 18
-SUBLEVEL = 116
+SUBLEVEL = 117
 EXTRAVERSION =
 NAME = Diseased Newt
 
@@ -613,6 +613,7 @@ KBUILD_CFLAGS       += $(call 
cc-disable-warning,frame-address,)
 KBUILD_CFLAGS  += $(call cc-disable-warning, format-truncation)
 KBUILD_CFLAGS  += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS  += $(call cc-disable-warning, attribute-alias)
 KBUILD_CFLAGS  += $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS  += $(call cc-option,-fno-PIE)
 
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 9c8aa41e45c2..25f6583a7a2c 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -97,7 +97,7 @@ typedef unsigned long pgtable_t;
 #define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE 
| VM_MAYEXEC)
 
 #define WANT_PAGE_VIRTUAL   1
 
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 7670f33b9ce2..0f12756d6389 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -372,7 +372,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned 
long address,
 
 /* Decode a PTE containing swap "identifier "into constituents */
 #define __swp_type(pte_lookalike)      (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike)    ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike)    ((pte_lookalike).val >> 13)
 
 /* NOPs, to keep generic kernel happy */
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 4767eb9caa78..69b6c3f125c2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -220,7 +220,7 @@ extern int __put_user_8(void *, unsigned long long);
        ({                                                              \
                unsigned long __limit = current_thread_info()->addr_limit - 1; \
                const typeof(*(p)) __user *__tmp_p = (p);               \
-               register const typeof(*(p)) __r2 asm("r2") = (x);       \
+               register typeof(*(p)) __r2 asm("r2") = (x);     \
                register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d5d3af159bec..291cb502d141 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2240,9 +2240,6 @@ static ssize_t store_int_with_restart(struct device *s,
        if (check_interval == old_check_interval)
                return ret;
 
-       if (check_interval < 1)
-               check_interval = 1;
-
        mutex_lock(&mce_sysfs_mutex);
        mce_restart();
        mutex_unlock(&mce_sysfs_mutex);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 8a998e3884ce..8ca23539c23a 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -25,8 +25,10 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/can/dev.h>
@@ -100,7 +102,7 @@ enum xcan_reg {
 #define XCAN_INTR_ALL          (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
                                 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
                                 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
-                                XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+                                XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
 
 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
 #define XCAN_BTR_SJW_SHIFT             7  /* Synchronous jump width */
@@ -117,6 +119,7 @@ enum xcan_reg {
 /**
  * struct xcan_priv - This definition define CAN driver instance
  * @can:                       CAN private data structure.
+ * @tx_lock:                   Lock for synchronizing TX interrupt handling
  * @tx_head:                   Tx CAN packets ready to send on the queue
  * @tx_tail:                   Tx CAN packets successfully sended on the queue
  * @tx_max:                    Maximum number packets the driver can send
@@ -131,6 +134,7 @@ enum xcan_reg {
  */
 struct xcan_priv {
        struct can_priv can;
+       spinlock_t tx_lock;
        unsigned int tx_head;
        unsigned int tx_tail;
        unsigned int tx_max;
@@ -158,6 +162,11 @@ static const struct can_bittiming_const 
xcan_bittiming_const = {
        .brp_inc = 1,
 };
 
+#define XCAN_CAP_WATERMARK     0x0001
+struct xcan_devtype_data {
+       unsigned int caps;
+};
+
 /**
  * xcan_write_reg_le - Write a value to the device register little endian
  * @priv:      Driver private data structure
@@ -237,6 +246,10 @@ static int set_reset_mode(struct net_device *ndev)
                usleep_range(500, 10000);
        }
 
+       /* reset clears FIFOs */
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+
        return 0;
 }
 
@@ -391,6 +404,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 id, dlc, data[2] = {0, 0};
+       unsigned long flags;
 
        if (can_dropped_invalid_skb(ndev, skb))
                return NETDEV_TX_OK;
@@ -438,6 +452,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
                data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
 
        can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
        priv->tx_head++;
 
        /* Write the Frame to Xilinx CAN TX FIFO */
@@ -453,10 +470,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
                stats->tx_bytes += cf->can_dlc;
        }
 
+       /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+       if (priv->tx_max > 1)
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
        /* Check if the TX buffer is full */
        if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
                netif_stop_queue(ndev);
 
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -598,7 +621,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 
isr)
        if (isr & XCAN_IXR_RXOFLW_MASK) {
                stats->rx_over_errors++;
                stats->rx_errors++;
-               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
                if (skb) {
                        cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -710,15 +732,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int 
quota)
 
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
-               if (isr & XCAN_IXR_RXOK_MASK) {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXOK_MASK);
-                       work_done += xcan_rx(ndev);
-               } else {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXNEMP_MASK);
-                       break;
-               }
+               work_done += xcan_rx(ndev);
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
                isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
@@ -729,7 +743,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
        if (work_done < quota) {
                napi_complete(napi);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+               ier |= XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
        }
        return work_done;
@@ -801,9 +815,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
        }
 
        /* Check for the type of receive interrupt and Processing it */
-       if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+       if (isr & XCAN_IXR_RXNEMP_MASK) {
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+               ier &= ~XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
                napi_schedule(&priv->napi);
        }
@@ -1032,6 +1046,18 @@ static int __maybe_unused xcan_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
 
+static const struct xcan_devtype_data xcan_zynq_data = {
+       .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+       { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+       { .compatible = "xlnx,axi-can-1.00.a", },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
 /**
  * xcan_probe - Platform registration call
  * @pdev:      Handle to the platform device structure
@@ -1046,8 +1072,10 @@ static int xcan_probe(struct platform_device *pdev)
        struct resource *res; /* IO mem resources */
        struct net_device *ndev;
        struct xcan_priv *priv;
+       const struct of_device_id *of_id;
+       int caps = 0;
        void __iomem *addr;
-       int ret, rx_max, tx_max;
+       int ret, rx_max, tx_max, tx_fifo_depth;
 
        /* Get the virtual base address for the device */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1057,7 +1085,8 @@ static int xcan_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+                                  &tx_fifo_depth);
        if (ret < 0)
                goto err;
 
@@ -1065,6 +1094,30 @@ static int xcan_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err;
 
+       of_id = of_match_device(xcan_of_match, &pdev->dev);
+       if (of_id) {
+               const struct xcan_devtype_data *devtype_data = of_id->data;
+
+               if (devtype_data)
+                       caps = devtype_data->caps;
+       }
+
+       /* There is no way to directly figure out how many frames have been
+        * sent when the TXOK interrupt is processed. If watermark programming
+        * is supported, we can have 2 frames in the FIFO and use TXFEMP
+        * to determine if 1 or 2 frames have been sent.
+        * Theoretically we should be able to use TXFWMEMP to determine up
+        * to 3 frames, but it seems that after putting a second frame in the
+        * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+        * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+        * sent), which is not a sensible state - possibly TXFWMEMP is not
+        * completely synchronized with the rest of the bits?
+        */
+       if (caps & XCAN_CAP_WATERMARK)
+               tx_max = min(tx_fifo_depth, 2);
+       else
+               tx_max = 1;
+
        /* Create a CAN device instance */
        ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
        if (!ndev)
@@ -1079,6 +1132,7 @@ static int xcan_probe(struct platform_device *pdev)
                                        CAN_CTRLMODE_BERR_REPORTING;
        priv->reg_base = addr;
        priv->tx_max = tx_max;
+       spin_lock_init(&priv->tx_lock);
 
        /* Get IRQ for the device */
        ndev->irq = platform_get_irq(pdev, 0);
@@ -1146,9 +1200,9 @@ static int xcan_probe(struct platform_device *pdev)
        devm_can_led_init(ndev);
        clk_disable_unprepare(priv->bus_clk);
        clk_disable_unprepare(priv->can_clk);
-       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual 
%d, using %d\n",
                        priv->reg_base, ndev->irq, priv->can.clock.freq,
-                       priv->tx_max);
+                       tx_fifo_depth, priv->tx_max);
 
        return 0;
 
@@ -1184,14 +1238,6 @@ static int xcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* Match table for OF platform binding */
-static struct of_device_id xcan_of_match[] = {
-       { .compatible = "xlnx,zynq-can-1.0", },
-       { .compatible = "xlnx,axi-can-1.00.a", },
-       { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
 static struct platform_driver xcan_driver = {
        .probe = xcan_probe,
        .remove = xcan_remove,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 
b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index fbb0c02276f9..816b614025b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2709,7 +2709,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int 
slave,
        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
        struct res_srq *srq;
-       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+       int local_qpn = vhcr->in_modifier & 0xffffff;
 
        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
        if (err)
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index f8a76090cbca..52d023514560 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -88,6 +88,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        case PTP_PF_PHYSYNC:
                if (chan != 0)
                        return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e01d39509172..fabf77fca4ad 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1785,6 +1785,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
        .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
        },
+       { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+       },
 
        { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
        .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 12486efb043f..3d15b5fc9336 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1151,10 +1151,14 @@ static void hub_activate(struct usb_hub *hub, enum 
hub_activation_type type)
 
                if (!udev || udev->state == USB_STATE_NOTATTACHED) {
                        /* Tell hub_wq to disconnect the device or
-                        * check for a new connection
+                        * check for a new connection or over current condition.
+                        * Based on USB2.0 Spec Section 11.12.5,
+                        * C_PORT_OVER_CURRENT could be set while
+                        * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
-                           (portstatus & USB_PORT_STAT_OVERCURRENT))
+                           (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+                           (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
 
                } else if (portstatus & USB_PORT_STAT_ENABLE) {
diff --git a/drivers/usb/gadget/function/f_fs.c 
b/drivers/usb/gadget/function/f_fs.c
index fc4cb35561b1..570e6100134f 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2987,7 +2987,7 @@ static int ffs_func_setup(struct usb_function *f,
        __ffs_event_add(ffs, FUNCTIONFS_SETUP);
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 
-       return USB_GADGET_DELAYED_STATUS;
+       return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
 }
 
 static void ffs_func_suspend(struct usb_function *f)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 75b5a159d607..3df305d6783c 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -610,13 +610,21 @@ static void fat_set_state(struct super_block *sb,
        brelse(bh);
 }
 
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+       if (opts->iocharset != fat_default_iocharset) {
+               /* Note: opts->iocharset can be NULL here */
+               kfree(opts->iocharset);
+               opts->iocharset = fat_default_iocharset;
+       }
+}
+
 static void delayed_free(struct rcu_head *p)
 {
        struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
        unload_nls(sbi->nls_disk);
        unload_nls(sbi->nls_io);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        kfree(sbi);
 }
 
@@ -1031,7 +1039,7 @@ static int parse_options(struct super_block *sb, char 
*options, int is_vfat,
        opts->fs_fmask = opts->fs_dmask = current_umask();
        opts->allow_utime = -1;
        opts->codepage = fat_default_codepage;
-       opts->iocharset = fat_default_iocharset;
+       fat_reset_iocharset(opts);
        if (is_vfat) {
                opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
                opts->rodir = 0;
@@ -1181,8 +1189,7 @@ static int parse_options(struct super_block *sb, char 
*options, int is_vfat,
 
                /* vfat specific */
                case Opt_charset:
-                       if (opts->iocharset != fat_default_iocharset)
-                               kfree(opts->iocharset);
+                       fat_reset_iocharset(opts);
                        iocharset = match_strdup(&args[0]);
                        if (!iocharset)
                                return -ENOMEM;
@@ -1774,8 +1781,7 @@ out_fail:
                iput(fat_inode);
        unload_nls(sbi->nls_io);
        unload_nls(sbi->nls_disk);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        sb->s_fs_info = NULL;
        kfree(sbi);
        return error;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 008a270faf26..d3a85e0c1b00 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -475,6 +475,7 @@ static inline u32 skb_mstamp_us_delta(const struct 
skb_mstamp *t1,
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
+ *     @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -551,8 +552,8 @@ struct sk_buff {
                                fclone:2,
                                peeked:1,
                                head_frag:1,
-                               xmit_more:1;
-       /* one bit hole */
+                               xmit_more:1,
+                               pfmemalloc:1;
        kmemcheck_bitfield_end(flags1);
 
        /* fields enclosed in headers_start/headers_end are copied
@@ -572,19 +573,18 @@ struct sk_buff {
 
        __u8                    __pkt_type_offset[0];
        __u8                    pkt_type:3;
-       __u8                    pfmemalloc:1;
        __u8                    ignore_df:1;
        __u8                    nfctinfo:3;
-
        __u8                    nf_trace:1;
+
        __u8                    ip_summed:2;
        __u8                    ooo_okay:1;
        __u8                    l4_hash:1;
        __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
-
        __u8                    no_fcs:1;
+
        /* Indicates the inner headers are valid in the skbuff. */
        __u8                    encapsulation:1;
        __u8                    encap_hdr_csum:1;
@@ -592,11 +592,11 @@ struct sk_buff {
        __u8                    csum_complete_sw:1;
        __u8                    csum_level:2;
        __u8                    csum_bad:1;
-
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
        __u8                    ipvs_property:1;
+
        __u8                    inner_protocol_type:1;
        /* 4 or 6 bit hole */
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 15564a2cc28c..b362f9abae3b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -372,6 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
                        struct pipe_inode_info *pipe, size_t len,
                        unsigned int flags);
 
+void tcp_enter_quickack_mode(struct sock *sk);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
 {
@@ -530,6 +531,7 @@ int tcp_send_synack(struct sock *);
 bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
                          const char *proto);
 void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9fd2c9eb54e8..51301ad3c2d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1842,9 +1842,12 @@ int rtnl_configure_link(struct net_device *dev, const 
struct ifinfomsg *ifm)
                        return err;
        }
 
-       dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-       __dev_notify_flags(dev, old_flags, ~0U);
+       if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+               __dev_notify_flags(dev, old_flags, 0U);
+       } else {
+               dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+               __dev_notify_flags(dev, old_flags, ~0U);
+       }
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ab5b8d0bdccc..fe4c467a4a15 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -780,6 +780,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, 
struct sk_buff *skb)
        n->cloned = 1;
        n->nohdr = 0;
        n->peeked = 0;
+       C(pfmemalloc);
        n->destructor = NULL;
        C(tail);
        C(end);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 88392ebcbc64..f6c8f825ae6b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -459,6 +459,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct 
sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d383bd6be279..d6ef0dd85eb4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -134,8 +134,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, 
int write,
        if (write && ret == 0) {
                low = make_kgid(user_ns, urange[0]);
                high = make_kgid(user_ns, urange[1]);
-               if (!gid_valid(low) || !gid_valid(high) ||
-                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+               if (!gid_valid(low) || !gid_valid(high))
+                       return -EINVAL;
+               if (urange[1] < urange[0] || gid_lt(high, low)) {
                        low = make_kgid(&init_user_ns, 1);
                        high = make_kgid(&init_user_ns, 0);
                }
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index b504371af742..ddd84ed7fb38 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -128,23 +128,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=0 to CE=1 and delayed
-        * ACK has not sent yet.
-        */
-       if (!ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=0. */
-               tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (!ca->ce_state) {
+               /* State has changed from CE=0 to CE=1, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -158,23 +149,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=1 to CE=0 and delayed
-        * ACK has not sent yet.
-        */
-       if (ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=1. */
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (ca->ce_state) {
+               /* State has changed from CE=1 to CE=0, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e81a5711dd4f..dee70277f12f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -182,13 +182,14 @@ static void tcp_incr_quickack(struct sock *sk)
                icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 }
 
-static void tcp_enter_quickack_mode(struct sock *sk)
+void tcp_enter_quickack_mode(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        tcp_incr_quickack(sk);
        icsk->icsk_ack.pingpong = 0;
        icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
@@ -4651,6 +4652,7 @@ restart:
 static void tcp_collapse_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 range_truesize, sum_tiny = 0;
        struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
        struct sk_buff *head;
        u32 start, end;
@@ -4660,6 +4662,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
 
        start = TCP_SKB_CB(skb)->seq;
        end = TCP_SKB_CB(skb)->end_seq;
+       range_truesize = skb->truesize;
        head = skb;
 
        for (;;) {
@@ -4674,14 +4677,24 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
                if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, &tp->out_of_order_queue,
-                                    head, skb, start, end);
+                       /* Do not attempt collapsing tiny skbs */
+                       if (range_truesize != head->truesize ||
+                           end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+                               tcp_collapse(sk, &tp->out_of_order_queue,
+                                            head, skb, start, end);
+                       } else {
+                               sum_tiny += range_truesize;
+                               if (sum_tiny > sk->sk_rcvbuf >> 3)
+                                       return;
+                       }
+
                        head = skb;
                        if (!skb)
                                break;
                        /* Start new segment */
                        start = TCP_SKB_CB(skb)->seq;
                        end = TCP_SKB_CB(skb)->end_seq;
+                       range_truesize = skb->truesize;
                } else {
                        if (before(TCP_SKB_CB(skb)->seq, start))
                                start = TCP_SKB_CB(skb)->seq;
@@ -4737,6 +4750,9 @@ static int tcp_prune_queue(struct sock *sk)
        else if (sk_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
+       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+               return 0;
+
        tcp_collapse_ofo_queue(sk);
        if (!skb_queue_empty(&sk->sk_receive_queue))
                tcp_collapse(sk, &sk->sk_receive_queue,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index add262e8d8df..a3c701e079fd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,8 +183,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 }
 
 /* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+                                     u32 rcv_nxt)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (unlikely(rcv_nxt != tp->rcv_nxt))
+               return;  /* Special ACK sent by DCTCP to reflect ECN */
        tcp_dec_quickack_mode(sk, pkts);
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
@@ -884,8 +889,8 @@ out:
  * We are working here with either a clone of the original
  * SKB, or a fresh unique copy made by the retransmit engine.
  */
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
-                           gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+                             int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet;
@@ -948,7 +953,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff 
*skb, int clone_it,
        th->source              = inet->inet_sport;
        th->dest                = inet->inet_dport;
        th->seq                 = htonl(tcb->seq);
-       th->ack_seq             = htonl(tp->rcv_nxt);
+       th->ack_seq             = htonl(rcv_nxt);
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->tcp_flags);
 
@@ -990,7 +995,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff 
*skb, int clone_it,
        icsk->icsk_af_ops->send_check(sk, skb);
 
        if (likely(tcb->tcp_flags & TCPHDR_ACK))
-               tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+               tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
 
        if (skb->len != tcp_header_size)
                tcp_event_data_sent(tp, sk);
@@ -1019,6 +1024,13 @@ static int tcp_transmit_skb(struct sock *sk, struct 
sk_buff *skb, int clone_it,
        return net_xmit_eval(err);
 }
 
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+                           gfp_t gfp_mask)
+{
+       return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+                                 tcp_sk(sk)->rcv_nxt);
+}
+
 /* This routine just queues the buffer for sending.
  *
  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3220,7 +3232,7 @@ void tcp_send_delayed_ack(struct sock *sk)
 }
 
 /* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
 {
        struct sk_buff *buff;
 
@@ -3249,9 +3261,14 @@ void tcp_send_ack(struct sock *sk)
 
        /* Send it off, this clears delayed acks for us. */
        skb_mstamp_get(&buff->skb_mstamp);
-       tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
+       __tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC), rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+       __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
 }
-EXPORT_SYMBOL_GPL(tcp_send_ack);
 
 /* This routine sends a packet with an out of date sequence
  * number. It assumes the other end will try to ack it.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 436bd77859e5..087fe616bfaf 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -533,6 +533,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct 
sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index f7be2556eb93..57e4ab148fe3 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -645,7 +645,7 @@ static int snd_rawmidi_info_select_user(struct snd_card 
*card,
 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                              struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        
        if (substream->append && substream->use_count > 1)
@@ -658,13 +658,17 @@ int snd_rawmidi_output_params(struct 
snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
                runtime->avail = runtime->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        substream->active_sensing = !params->no_active_sensing;
@@ -675,7 +679,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                             struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        snd_rawmidi_drain_input(substream);
@@ -686,12 +690,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream 
*substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        return 0;

Reply via email to