commit:     b821cf4a2c525cf4101fdd015272504def813009
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 28 22:02:56 2015 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Wed Jan 28 22:02:56 2015 +0000
URL:        
http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=b821cf4a

Linux patch 3.14.30

---
 1029_linux-3.14.30.patch | 4387 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 4387 insertions(+)

diff --git a/1029_linux-3.14.30.patch b/1029_linux-3.14.30.patch
new file mode 100644
index 0000000..c5db3c7
--- /dev/null
+++ b/1029_linux-3.14.30.patch
@@ -0,0 +1,4387 @@
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 7116fda7077f..5d91ba1606bb 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1172,6 +1172,7 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+       i8042.notimeout [HW] Ignore timeout condition signalled by controller
+       i8042.reset     [HW] Reset the controller during init and cleanup
+       i8042.unlock    [HW] Unlock (ignore) the keylock
++      i8042.kbdreset  [HW] Reset device connected to KBD port
+ 
+       i810=           [HW,DRM]
+ 
+diff --git a/Makefile b/Makefile
+index 7aff64ee4fb6..5b94752a85e3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Remembering Coco
+ 
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index 398064cef746..4c169d825415 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -20,7 +20,7 @@
+               /* this is for console on PGU */
+               /* bootargs = "console=tty0 consoleblank=0"; */
+               /* this is for console on serial */
+-              bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 
console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
++              bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 
console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+       };
+ 
+       aliases {
+@@ -46,9 +46,9 @@
+                       #interrupt-cells = <1>;
+               };
+ 
+-              uart0: serial@c0000000 {
++              uart0: serial@f0000000 {
+                       compatible = "ns8250";
+-                      reg = <0xc0000000 0x2000>;
++                      reg = <0xf0000000 0x2000>;
+                       interrupts = <11>;
+                       clock-frequency = <3686400>;
+                       baud = <115200>;
+@@ -57,21 +57,21 @@
+                       no-loopback-test = <1>;
+               };
+ 
+-              pgu0: pgu@c9000000 {
++              pgu0: pgu@f9000000 {
+                       compatible = "snps,arcpgufb";
+-                      reg = <0xc9000000 0x400>;
++                      reg = <0xf9000000 0x400>;
+               };
+ 
+-              ps2: ps2@c9001000 {
++              ps2: ps2@f9001000 {
+                       compatible = "snps,arc_ps2";
+-                      reg = <0xc9000400 0x14>;
++                      reg = <0xf9000400 0x14>;
+                       interrupts = <13>;
+                       interrupt-names = "arc_ps2_irq";
+               };
+ 
+-              eth0: ethernet@c0003000 {
++              eth0: ethernet@f0003000 {
+                       compatible = "snps,oscilan";
+-                      reg = <0xc0003000 0x44>;
++                      reg = <0xf0003000 0x44>;
+                       interrupts = <7>, <8>;
+                       interrupt-names = "rx", "tx";
+               };
+diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
+index 66ee5527aefc..5faad17118b4 100644
+--- a/arch/arc/include/asm/linkage.h
++++ b/arch/arc/include/asm/linkage.h
+@@ -13,20 +13,6 @@
+ 
+ #define ASM_NL                 `      /* use '`' to mark new line in macro */
+ 
+-/* Can't use the ENTRY macro in linux/linkage.h
+- * gas considers ';' as comment vs. newline
+- */
+-.macro ARC_ENTRY name
+-      .global \name
+-      .align 4
+-      \name:
+-.endm
+-
+-.macro ARC_EXIT name
+-#define ASM_PREV_SYM_ADDR(name)  .-##name
+-      .size \ name, ASM_PREV_SYM_ADDR(\name)
+-.endm
+-
+ /* annotation for data we want in DCCM - if enabled in .config */
+ .macro ARCFP_DATA nm
+ #ifdef CONFIG_ARC_HAS_DCCM
+diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
+index 65690e7fcc8c..2ff0347a2fd7 100644
+--- a/arch/arc/kernel/ctx_sw_asm.S
++++ b/arch/arc/kernel/ctx_sw_asm.S
+@@ -62,4 +62,4 @@ __switch_to:
+       ld.ab   blink, [sp, 4]
+       j       [blink]
+ 
+-ARC_EXIT __switch_to
++END(__switch_to)
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 6e8f83a32522..29b82adbf0b4 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -141,7 +141,7 @@ VECTOR   EV_Extension            ; 0x130, Extn Intruction 
Excp  (0x26)
+ VECTOR   reserved                ; Reserved Exceptions
+ .endr
+ 
+-#include <linux/linkage.h>   /* ARC_{EXTRY,EXIT} */
++#include <linux/linkage.h>   /* {EXTRY,EXIT} */
+ #include <asm/entry.h>       /* SAVE_ALL_{INT1,INT2,SYS...} */
+ #include <asm/errno.h>
+ #include <asm/arcregs.h>
+@@ -184,7 +184,7 @@ reserved:          ; processor restart
+ ; ---------------------------------------------
+ ;  Level 2 ISR: Can interrupt a Level 1 ISR
+ ; ---------------------------------------------
+-ARC_ENTRY handle_interrupt_level2
++ENTRY(handle_interrupt_level2)
+ 
+       ; TODO-vineetg for SMP this wont work
+       ; free up r9 as scratchpad
+@@ -225,14 +225,14 @@ ARC_ENTRY handle_interrupt_level2
+ 
+       b   ret_from_exception
+ 
+-ARC_EXIT handle_interrupt_level2
++END(handle_interrupt_level2)
+ 
+ #endif
+ 
+ ; ---------------------------------------------
+ ;  Level 1 ISR
+ ; ---------------------------------------------
+-ARC_ENTRY handle_interrupt_level1
++ENTRY(handle_interrupt_level1)
+ 
+       /* free up r9 as scratchpad */
+ #ifdef CONFIG_SMP
+@@ -265,7 +265,7 @@ ARC_ENTRY handle_interrupt_level1
+       sr r8, [AUX_IRQ_LV12]       ; clear bit in Sticky Status Reg
+ 
+       b   ret_from_exception
+-ARC_EXIT handle_interrupt_level1
++END(handle_interrupt_level1)
+ 
+ ;################### Non TLB Exception Handling #############################
+ 
+@@ -273,7 +273,7 @@ ARC_EXIT handle_interrupt_level1
+ ; Instruction Error Exception Handler
+ ; ---------------------------------------------
+ 
+-ARC_ENTRY instr_service
++ENTRY(instr_service)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -284,13 +284,13 @@ ARC_ENTRY instr_service
+ 
+       bl  do_insterror_or_kprobe
+       b   ret_from_exception
+-ARC_EXIT instr_service
++END(instr_service)
+ 
+ ; ---------------------------------------------
+ ; Memory Error Exception Handler
+ ; ---------------------------------------------
+ 
+-ARC_ENTRY mem_service
++ENTRY(mem_service)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -301,13 +301,13 @@ ARC_ENTRY mem_service
+ 
+       bl  do_memory_error
+       b   ret_from_exception
+-ARC_EXIT mem_service
++END(mem_service)
+ 
+ ; ---------------------------------------------
+ ; Machine Check Exception Handler
+ ; ---------------------------------------------
+ 
+-ARC_ENTRY EV_MachineCheck
++ENTRY(EV_MachineCheck)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -331,13 +331,13 @@ ARC_ENTRY EV_MachineCheck
+ 
+       j  do_machine_check_fault
+ 
+-ARC_EXIT EV_MachineCheck
++END(EV_MachineCheck)
+ 
+ ; ---------------------------------------------
+ ; Protection Violation Exception Handler
+ ; ---------------------------------------------
+ 
+-ARC_ENTRY EV_TLBProtV
++ENTRY(EV_TLBProtV)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -385,12 +385,12 @@ ARC_ENTRY EV_TLBProtV
+ 
+       b   ret_from_exception
+ 
+-ARC_EXIT EV_TLBProtV
++END(EV_TLBProtV)
+ 
+ ; ---------------------------------------------
+ ; Privilege Violation Exception Handler
+ ; ---------------------------------------------
+-ARC_ENTRY EV_PrivilegeV
++ENTRY(EV_PrivilegeV)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -401,12 +401,12 @@ ARC_ENTRY EV_PrivilegeV
+ 
+       bl  do_privilege_fault
+       b   ret_from_exception
+-ARC_EXIT EV_PrivilegeV
++END(EV_PrivilegeV)
+ 
+ ; ---------------------------------------------
+ ; Extension Instruction Exception Handler
+ ; ---------------------------------------------
+-ARC_ENTRY EV_Extension
++ENTRY(EV_Extension)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -417,7 +417,7 @@ ARC_ENTRY EV_Extension
+ 
+       bl  do_extension_fault
+       b   ret_from_exception
+-ARC_EXIT EV_Extension
++END(EV_Extension)
+ 
+ ;######################### System Call Tracing #########################
+ 
+@@ -504,7 +504,7 @@ trap_with_param:
+ ;   (2) Break Points
+ ;------------------------------------------------------------------
+ 
+-ARC_ENTRY EV_Trap
++ENTRY(EV_Trap)
+ 
+       EXCEPTION_PROLOGUE
+ 
+@@ -534,9 +534,9 @@ ARC_ENTRY EV_Trap
+       jl      [r9]        ; Entry into Sys Call Handler
+ 
+       ; fall through to ret_from_system_call
+-ARC_EXIT EV_Trap
++END(EV_Trap)
+ 
+-ARC_ENTRY ret_from_system_call
++ENTRY(ret_from_system_call)
+ 
+       st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
+ 
+@@ -546,7 +546,7 @@ ARC_ENTRY ret_from_system_call
+ ;
+ ; If ret to user mode do we need to handle signals, schedule() et al.
+ 
+-ARC_ENTRY ret_from_exception
++ENTRY(ret_from_exception)
+ 
+       ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+       ld  r8, [sp, PT_status32]   ; returning to User/Kernel Mode
+@@ -728,9 +728,9 @@ not_level1_interrupt:
+ debug_marker_syscall:
+       rtie
+ 
+-ARC_EXIT ret_from_exception
++END(ret_from_exception)
+ 
+-ARC_ENTRY ret_from_fork
++ENTRY(ret_from_fork)
+       ; when the forked child comes here from the __switch_to function
+       ; r0 has the last task pointer.
+       ; put last task in scheduler queue
+@@ -747,11 +747,11 @@ ARC_ENTRY ret_from_fork
+       ; special case of kernel_thread entry point returning back due to
+       ; kernel_execve() - pretend return from syscall to ret to userland
+       b    ret_from_exception
+-ARC_EXIT ret_from_fork
++END(ret_from_fork)
+ 
+ ;################### Special Sys Call Wrappers ##########################
+ 
+-ARC_ENTRY sys_clone_wrapper
++ENTRY(sys_clone_wrapper)
+       SAVE_CALLEE_SAVED_USER
+       bl  @sys_clone
+       DISCARD_CALLEE_SAVED_USER
+@@ -761,7 +761,7 @@ ARC_ENTRY sys_clone_wrapper
+       bnz  tracesys_exit
+ 
+       b ret_from_system_call
+-ARC_EXIT sys_clone_wrapper
++END(sys_clone_wrapper)
+ 
+ #ifdef CONFIG_ARC_DW2_UNWIND
+ ; Workaround for bug 94179 (STAR ):
+diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
+index bc813d55b6c3..978bf8314dfb 100644
+--- a/arch/arc/lib/memcmp.S
++++ b/arch/arc/lib/memcmp.S
+@@ -6,7 +6,7 @@
+  * published by the Free Software Foundation.
+  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+ #ifdef __LITTLE_ENDIAN__
+ #define WORD2 r2
+@@ -16,7 +16,7 @@
+ #define SHIFT r2
+ #endif
+ 
+-ARC_ENTRY memcmp
++ENTRY(memcmp)
+       or      r12,r0,r1
+       asl_s   r12,r12,30
+       sub     r3,r2,1
+@@ -121,4 +121,4 @@ ARC_ENTRY memcmp
+ .Lnil:
+       j_s.d   [blink]
+       mov     r0,0
+-ARC_EXIT memcmp
++END(memcmp)
+diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
+index b64cc10ac918..3222573e50de 100644
+--- a/arch/arc/lib/memcpy-700.S
++++ b/arch/arc/lib/memcpy-700.S
+@@ -6,9 +6,9 @@
+  * published by the Free Software Foundation.
+  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+-ARC_ENTRY memcpy
++ENTRY(memcpy)
+       or      r3,r0,r1
+       asl_s   r3,r3,30
+       mov_s   r5,r0
+@@ -63,4 +63,4 @@ ARC_ENTRY memcpy
+ .Lendbloop:
+       j_s.d   [blink]
+       stb     r12,[r5,0]
+-ARC_EXIT memcpy
++END(memcpy)
+diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
+index 9b2d88d2e141..d36bd43fc98d 100644
+--- a/arch/arc/lib/memset.S
++++ b/arch/arc/lib/memset.S
+@@ -6,11 +6,11 @@
+  * published by the Free Software Foundation.
+  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+ #define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues.  */
+ 
+-ARC_ENTRY memset
++ENTRY(memset)
+       mov_s   r4,r0
+       or      r12,r0,r2
+       bmsk.f  r12,r12,1
+@@ -46,14 +46,14 @@ ARC_ENTRY memset
+       stb.ab  r1,[r4,1]
+ .Ltiny_end:
+       j_s     [blink]
+-ARC_EXIT memset
++END(memset)
+ 
+ ; memzero: @r0 = mem, @r1 = size_t
+ ; memset:  @r0 = mem, @r1 = char, @r2 = size_t
+ 
+-ARC_ENTRY memzero
++ENTRY(memzero)
+     ; adjust bzero args to memset args
+     mov r2, r1
+     mov r1, 0
+     b  memset    ;tail call so need to tinker with blink
+-ARC_EXIT memzero
++END(memzero)
+diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
+index 9c548c7cf001..b725d5862107 100644
+--- a/arch/arc/lib/strchr-700.S
++++ b/arch/arc/lib/strchr-700.S
+@@ -11,9 +11,9 @@
+    presence of the norm instruction makes it easier to operate on whole
+    words branch-free.  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+-ARC_ENTRY strchr
++ENTRY(strchr)
+       extb_s  r1,r1
+       asl     r5,r1,8
+       bmsk    r2,r0,1
+@@ -130,4 +130,4 @@ ARC_ENTRY strchr
+       j_s.d   [blink]
+       mov.mi  r0,0
+ #endif /* ENDIAN */
+-ARC_EXIT strchr
++END(strchr)
+diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
+index 5dc802b45cf3..3544600fefe6 100644
+--- a/arch/arc/lib/strcmp.S
++++ b/arch/arc/lib/strcmp.S
+@@ -13,9 +13,9 @@
+    source 1; however, that would increase the overhead for loop setup / 
finish,
+    and strcmp might often terminate early.  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+-ARC_ENTRY strcmp
++ENTRY(strcmp)
+       or      r2,r0,r1
+       bmsk_s  r2,r2,1
+       brne    r2,0,.Lcharloop
+@@ -93,4 +93,4 @@ ARC_ENTRY strcmp
+ .Lcmpend:
+       j_s.d   [blink]
+       sub     r0,r2,r3
+-ARC_EXIT strcmp
++END(strcmp)
+diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
+index b7ca4ae81d88..8422f38e1218 100644
+--- a/arch/arc/lib/strcpy-700.S
++++ b/arch/arc/lib/strcpy-700.S
+@@ -16,9 +16,9 @@
+    there, but the it is not likely to be taken often, and it
+    would also be likey to cost an unaligned mispredict at the next call.  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+-ARC_ENTRY strcpy
++ENTRY(strcpy)
+       or      r2,r0,r1
+       bmsk_s  r2,r2,1
+       brne.d  r2,0,charloop
+@@ -67,4 +67,4 @@ charloop:
+       brne.d  r3,0,charloop
+       stb.ab  r3,[r10,1]
+       j       [blink]
+-ARC_EXIT strcpy
++END(strcpy)
+diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
+index 39759e099696..53cfd5685a5f 100644
+--- a/arch/arc/lib/strlen.S
++++ b/arch/arc/lib/strlen.S
+@@ -6,9 +6,9 @@
+  * published by the Free Software Foundation.
+  */
+ 
+-#include <asm/linkage.h>
++#include <linux/linkage.h>
+ 
+-ARC_ENTRY strlen
++ENTRY(strlen)
+       or      r3,r0,7
+       ld      r2,[r3,-7]
+       ld.a    r6,[r3,-3]
+@@ -80,4 +80,4 @@ ARC_ENTRY strlen
+ .Learly_end:
+       b.d     .Lend
+       sub_s.ne r1,r1,r1
+-ARC_EXIT strlen
++END(strlen)
+diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
+index 3fcfdb38d242..79bfc81358c9 100644
+--- a/arch/arc/mm/tlbex.S
++++ b/arch/arc/mm/tlbex.S
+@@ -260,7 +260,7 @@ ARCFP_CODE ;Fast Path Code, candidate for ICCM
+ ; I-TLB Miss Exception Handler
+ ;-----------------------------------------------------------------------------
+ 
+-ARC_ENTRY EV_TLBMissI
++ENTRY(EV_TLBMissI)
+ 
+       TLBMISS_FREEUP_REGS
+ 
+@@ -293,13 +293,13 @@ ARC_ENTRY EV_TLBMissI
+       TLBMISS_RESTORE_REGS
+       rtie
+ 
+-ARC_EXIT EV_TLBMissI
++END(EV_TLBMissI)
+ 
+ ;-----------------------------------------------------------------------------
+ ; D-TLB Miss Exception Handler
+ ;-----------------------------------------------------------------------------
+ 
+-ARC_ENTRY EV_TLBMissD
++ENTRY(EV_TLBMissD)
+ 
+       TLBMISS_FREEUP_REGS
+ 
+@@ -381,6 +381,4 @@ do_slow_path_pf:
+       bl  do_page_fault
+       b   ret_from_exception
+ 
+-ARC_EXIT EV_TLBMissD
+-
+-ARC_ENTRY EV_TLBMissB   ; Bogus entry to measure sz of DTLBMiss hdlr
++END(EV_TLBMissD)
+diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
+index de1611966d8b..6a26e79f0ef4 100644
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -158,7 +158,7 @@
+                               #size-cells = <0>;
+                               compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
+                               reg = <0x43fa4000 0x4000>;
+-                              clocks = <&clks 62>, <&clks 62>;
++                              clocks = <&clks 78>, <&clks 78>;
+                               clock-names = "ipg", "per";
+                               interrupts = <14>;
+                               status = "disabled";
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index 4d677f442539..01a5765a8b26 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -161,8 +161,8 @@ static void __init imx6q_clocks_init(struct device_node 
*ccm_node)
+               post_div_table[1].div = 1;
+               post_div_table[2].div = 1;
+               video_div_table[1].div = 1;
+-              video_div_table[2].div = 1;
+-      };
++              video_div_table[3].div = 1;
++      }
+ 
+       /*                   type                               name         
parent_name  base     div_mask */
+       clk[pll1_sys]      = imx_clk_pllv3(IMX_PLLV3_SYS,       "pll1_sys",     
"osc", base,        0x7f);
+diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
+index 74044aaf438b..73d80b8576c3 100644
+--- a/arch/arm/mach-omap2/timer.c
++++ b/arch/arm/mach-omap2/timer.c
+@@ -513,11 +513,11 @@ static void __init realtime_counter_init(void)
+       rate = clk_get_rate(sys_clk);
+       /* Numerator/denumerator values refer TRM Realtime Counter section */
+       switch (rate) {
+-      case 1200000:
++      case 12000000:
+               num = 64;
+               den = 125;
+               break;
+-      case 1300000:
++      case 13000000:
+               num = 768;
+               den = 1625;
+               break;
+@@ -529,11 +529,11 @@ static void __init realtime_counter_init(void)
+               num = 192;
+               den = 625;
+               break;
+-      case 2600000:
++      case 26000000:
+               num = 384;
+               den = 1625;
+               break;
+-      case 2700000:
++      case 27000000:
+               num = 256;
+               den = 1125;
+               break;
+diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c 
b/arch/arm/mach-shmobile/setup-sh73a0.c
+index f74ab530c71d..2b73c8a0c244 100644
+--- a/arch/arm/mach-shmobile/setup-sh73a0.c
++++ b/arch/arm/mach-shmobile/setup-sh73a0.c
+@@ -617,6 +617,7 @@ static struct platform_device ipmmu_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin0_platform_data = {
+       .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
++      .control_parent = true,
+ };
+ 
+ static struct resource irqpin0_resources[] = {
+@@ -678,6 +679,7 @@ static struct platform_device irqpin1_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin2_platform_data = {
+       .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
++      .control_parent = true,
+ };
+ 
+ static struct resource irqpin2_resources[] = {
+@@ -708,6 +710,7 @@ static struct platform_device irqpin2_device = {
+ 
+ static struct renesas_intc_irqpin_config irqpin3_platform_data = {
+       .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
++      .control_parent = true,
+ };
+ 
+ static struct resource irqpin3_resources[] = {
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index d2d11b7055ba..8121aa6db2ff 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -33,11 +33,18 @@
+ 
+ #endif /*!CONFIG_PA20*/
+ 
+-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
++/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
++   We don't explicitly expose that "*a" may be written as reload
++   fails to find a register in class R1_REGS when "a" needs to be
++   reloaded when generating 64-bit PIC code.  Instead, we clobber
++   memory to indicate to the compiler that the assembly code reads
++   or writes to items other than those listed in the input and output
++   operands.  This may pessimize the code somewhat but __ldcw is
++   usually used within code blocks surrounded by memory barriors.  */
+ #define __ldcw(a) ({                                          \
+       unsigned __ret;                                         \
+-      __asm__ __volatile__(__LDCW " 0(%2),%0"                 \
+-              : "=r" (__ret), "+m" (*(a)) : "r" (a));         \
++      __asm__ __volatile__(__LDCW " 0(%1),%0"                 \
++              : "=r" (__ret) : "r" (a) : "memory");           \
+       __ret;                                                  \
+ })
+ 
+diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
+index 21ca44c4f6d5..1f0ea5537e8a 100644
+--- a/arch/um/Kconfig.common
++++ b/arch/um/Kconfig.common
+@@ -2,6 +2,7 @@ config UML
+       bool
+       default y
+       select HAVE_UID16
++      select HAVE_FUTEX_CMPXCHG if FUTEX
+       select GENERIC_IRQ_SHOW
+       select GENERIC_CPU_DEVICES
+       select GENERIC_IO
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 79a3f9682871..a1f5b1866cbe 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -1017,6 +1017,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, 
struct pt_regs *regs)
+       regs->flags &= ~X86_EFLAGS_IF;
+       trace_hardirqs_off();
+       regs->ip = (unsigned long)(jp->entry);
++
++      /*
++       * jprobes use jprobe_return() which skips the normal return
++       * path of the function, and this messes up the accounting of the
++       * function graph tracer to get messed up.
++       *
++       * Pause function graph tracing while performing the jprobe function.
++       */
++      pause_graph_tracing();
+       return 1;
+ }
+ 
+@@ -1042,24 +1051,25 @@ int __kprobes longjmp_break_handler(struct kprobe *p, 
struct pt_regs *regs)
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       u8 *addr = (u8 *) (regs->ip - 1);
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
++      void *saved_sp = kcb->jprobe_saved_sp;
+ 
+       if ((addr > (u8 *) jprobe_return) &&
+           (addr < (u8 *) jprobe_return_end)) {
+-              if (stack_addr(regs) != kcb->jprobe_saved_sp) {
++              if (stack_addr(regs) != saved_sp) {
+                       struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
+                       printk(KERN_ERR
+                              "current sp %p does not match saved sp %p\n",
+-                             stack_addr(regs), kcb->jprobe_saved_sp);
++                             stack_addr(regs), saved_sp);
+                       printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
+                       show_regs(saved_regs);
+                       printk(KERN_ERR "Current registers\n");
+                       show_regs(regs);
+                       BUG();
+               }
++              /* It's OK to start function graph tracing again */
++              unpause_graph_tracing();
+               *regs = kcb->jprobe_saved_regs;
+-              memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
+-                     kcb->jprobes_stack,
+-                     MIN_STACK_SIZE(kcb->jprobe_saved_sp));
++              memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+               preempt_enable_no_resched();
+               return 1;
+       }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 0c90f4b3f835..de426887b359 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2320,12 +2320,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
+       nested_vmx_secondary_ctls_low = 0;
+       nested_vmx_secondary_ctls_high &=
+               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+-              SECONDARY_EXEC_UNRESTRICTED_GUEST |
+               SECONDARY_EXEC_WBINVD_EXITING;
+ 
+       if (enable_ept) {
+               /* nested EPT: emulate EPT also to L1 */
+-              nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
++              nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
++                      SECONDARY_EXEC_UNRESTRICTED_GUEST;
+               nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
+                        VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
+                        VMX_EPT_INVEPT_BIT;
+diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
+index 531d4269e2e3..bd16d6c370ec 100644
+--- a/arch/x86/um/sys_call_table_32.c
++++ b/arch/x86/um/sys_call_table_32.c
+@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
+ 
+ extern asmlinkage void sys_ni_syscall(void);
+ 
+-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
+index f2f0723070ca..95783087f0d3 100644
+--- a/arch/x86/um/sys_call_table_64.c
++++ b/arch/x86/um/sys_call_table_64.c
+@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
+ 
+ extern void sys_ni_syscall(void);
+ 
+-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
++const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index f9c4632d4dd3..7145f6d93567 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -2232,14 +2232,17 @@ int __clk_get(struct clk *clk)
+ 
+ void __clk_put(struct clk *clk)
+ {
++      struct module *owner;
++
+       if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
+               return;
+ 
+       clk_prepare_lock();
++      owner = clk->owner;
+       kref_put(&clk->ref, __clk_release);
+       clk_prepare_unlock();
+ 
+-      module_put(clk->owner);
++      module_put(owner);
+ }
+ 
+ /***        clk rate change notifiers        ***/
+diff --git a/drivers/clk/samsung/clk-exynos-audss.c 
b/drivers/clk/samsung/clk-exynos-audss.c
+index 884187fbfe00..7f30b94c00a5 100644
+--- a/drivers/clk/samsung/clk-exynos-audss.c
++++ b/drivers/clk/samsung/clk-exynos-audss.c
+@@ -210,6 +210,10 @@ static int exynos_audss_clk_remove(struct platform_device 
*pdev)
+ {
+       int i;
+ 
++#ifdef CONFIG_PM_SLEEP
++      unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
++#endif
++
+       of_clk_del_provider(pdev->dev.of_node);
+ 
+       for (i = 0; i < clk_data.clk_num; i++) {
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index e0a98f581f58..74ed17d6cfa1 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -44,8 +44,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, 
void *data)
+               return false;
+ 
+       ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+-      if (ret < 0)
+-              return false;
++      if (ret < 0) {
++              /* We've found the gpio chip, but the translation failed.
++               * Return true to stop looking and return the translation
++               * error via out_gpio
++               */
++              gg_data->out_gpio = ERR_PTR(ret);
++              return true;
++       }
+ 
+       gg_data->out_gpio = gpio_to_desc(ret + gc->base);
+       return true;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 50c4922fe53a..5b88c83888d1 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1222,6 +1222,9 @@ int gpiochip_add(struct gpio_chip *chip)
+ 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+ 
++      if (status)
++              goto fail;
++
+ #ifdef CONFIG_PINCTRL
+       INIT_LIST_HEAD(&chip->pin_ranges);
+ #endif
+@@ -1229,12 +1232,12 @@ int gpiochip_add(struct gpio_chip *chip)
+       of_gpiochip_add(chip);
+       acpi_gpiochip_add(chip);
+ 
+-      if (status)
+-              goto fail;
+-
+       status = gpiochip_export(chip);
+-      if (status)
++      if (status) {
++              acpi_gpiochip_remove(chip);
++              of_gpiochip_remove(chip);
+               goto fail;
++      }
+ 
+       pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
+               chip->base, chip->base + chip->ngpio - 1,
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index 3c78b2268209..800e06c28018 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct 
drm_device *dev)
+               r = devm_request_mem_region(dev->dev, base + 1,
+                                           dev_priv->gtt.stolen_size - 1,
+                                           "Graphics Stolen Memory");
+-              if (r == NULL) {
++              /*
++               * GEN3 firmware likes to smash pci bridges into the stolen
++               * range. Apparently this works.
++               */
++              if (r == NULL && !IS_GEN3(dev)) {
+                       DRM_ERROR("conflict detected with stolen region: 
[0x%08x - 0x%08x]\n",
+                                 base, base + 
(uint32_t)dev_priv->gtt.stolen_size);
+                       base = 0;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 0a3b9386eb43..0c83b3dab58c 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -320,6 +320,7 @@
+ #define   PIPE_CONTROL_GLOBAL_GTT_IVB                 (1<<24) /* gen7+ */
+ #define   PIPE_CONTROL_CS_STALL                               (1<<20)
+ #define   PIPE_CONTROL_TLB_INVALIDATE                 (1<<18)
++#define   PIPE_CONTROL_MEDIA_STATE_CLEAR              (1<<16)
+ #define   PIPE_CONTROL_QW_WRITE                               (1<<14)
+ #define   PIPE_CONTROL_DEPTH_STALL                    (1<<13)
+ #define   PIPE_CONTROL_WRITE_FLUSH                    (1<<12)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index d488fc71ef49..d2af1e138c91 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -334,12 +334,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
+               flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+               flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+               flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
++              flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
+               /*
+                * TLB invalidate requires a post-sync write.
+                */
+               flags |= PIPE_CONTROL_QW_WRITE;
+               flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ 
++              flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
++
+               /* Workaround: we must issue a pipe_control with CS-stall bit
+                * set before a pipe_control command that has the state cache
+                * invalidate bit set. */
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c 
b/drivers/gpu/drm/i915/intel_uncore.c
+index c8796316d242..b6c063cad59b 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -451,8 +451,8 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, 
u32 reg)
+ static void
+ assert_device_not_suspended(struct drm_i915_private *dev_priv)
+ {
+-      WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
+-           "Device suspended\n");
++      WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
++                "Device suspended\n");
+ }
+ 
+ #define REG_READ_HEADER(x) \
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c 
b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+index a75c35ccf25c..165401c4045c 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+@@ -24,13 +24,6 @@
+ 
+ #include "nv04.h"
+ 
+-static void
+-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+-{
+-      struct nv04_mc_priv *priv = (void *)pmc;
+-      nv_wr08(priv, 0x088050, 0xff);
+-}
+-
+ struct nouveau_oclass *
+ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+       .base.handle = NV_SUBDEV(MC, 0x4c),
+@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+               .fini = _nouveau_mc_fini,
+       },
+       .intr = nv04_mc_intr,
+-      .msi_rearm = nv4c_mc_msi_rearm,
+ }.base;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c 
b/drivers/gpu/drm/radeon/atombios_dp.c
+index 5727dbdeda7f..b4dbaded2caf 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -576,6 +576,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector 
*connector,
+       struct radeon_connector_atom_dig *dig_connector;
+       int dp_clock;
+ 
++      if ((mode->clock > 340000) &&
++          (!radeon_connector_is_dp12_capable(connector)))
++              return MODE_CLOCK_HIGH;
++
+       if (!radeon_connector->con_priv)
+               return MODE_CLOCK_HIGH;
+       dig_connector = radeon_connector->con_priv;
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 543ba2d4a659..c7c285646857 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -4733,7 +4733,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
+       ci_enable_spread_spectrum(rdev, false);
+       ci_enable_auto_throttle_source(rdev, 
RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+       ci_stop_dpm(rdev);
+-      ci_enable_ds_master_switch(rdev, true);
++      ci_enable_ds_master_switch(rdev, false);
+       ci_enable_ulv(rdev, false);
+       ci_clear_vc(rdev);
+       ci_reset_to_default(rdev);
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index ddf70d6c0270..8ef67cb4ef1e 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -5879,6 +5879,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, 
bool enable)
+               }
+ 
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
++              data |= 0x00000001;
+               data &= 0xfffffffd;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+@@ -5910,7 +5911,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, 
bool enable)
+               }
+       } else {
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+-              data |= 0x00000002;
++              data |= 0x00000003;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 040a2a10ea17..45a9a03efc06 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -191,7 +191,7 @@ static void radeon_evict_flags(struct ttm_buffer_object 
*bo,
+       rbo = container_of(bo, struct radeon_bo, tbo);
+       switch (bo->mem.mem_type) {
+       case TTM_PL_VRAM:
+-              if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
++              if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == 
false)
+                       radeon_ttm_placement_from_domain(rbo, 
RADEON_GEM_DOMAIN_CPU);
+               else
+                       radeon_ttm_placement_from_domain(rbo, 
RADEON_GEM_DOMAIN_GTT);
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c 
b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index cf4bad2c1d59..76329d27385b 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct 
ttm_page_pool *pool,
+  *
+  * @pool: to free the pages from
+  * @free_all: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+  **/
+ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+-                            gfp_t gfp)
++                            bool use_static)
+ {
++      static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+       unsigned long irq_flags;
+       struct page *p;
+       struct page **pages_to_free;
+@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, 
unsigned nr_free,
+       if (NUM_PAGES_TO_ALLOC < nr_free)
+               npages_to_free = NUM_PAGES_TO_ALLOC;
+ 
+-      pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++      if (use_static)
++              pages_to_free = static_buf;
++      else
++              pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++                                      GFP_KERNEL);
+       if (!pages_to_free) {
+               pr_err("Failed to allocate memory for pool free operation\n");
+               return 0;
+@@ -374,7 +379,8 @@ restart:
+       if (freed_pages)
+               ttm_pages_put(pages_to_free, freed_pages);
+ out:
+-      kfree(pages_to_free);
++      if (pages_to_free != static_buf)
++              kfree(pages_to_free);
+       return nr_free;
+ }
+ 
+@@ -383,8 +389,6 @@ out:
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * We need to pass sc->gfp_mask to ttm_page_pool_free().
+- *
+  * This code is crying out for a shrinker per pool....
+  */
+ static unsigned long
+@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
+               if (shrink_pages == 0)
+                       break;
+               pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+-              shrink_pages = ttm_page_pool_free(pool, nr_free,
+-                                                sc->gfp_mask);
++              /* OK to use static buffer since global mutex is held. */
++              shrink_pages = ttm_page_pool_free(pool, nr_free, true);
+               freed += nr_free - shrink_pages;
+       }
+       mutex_unlock(&lock);
+@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned 
npages, int flags,
+       }
+       spin_unlock_irqrestore(&pool->lock, irq_flags);
+       if (npages)
+-              ttm_page_pool_free(pool, npages, GFP_KERNEL);
++              ttm_page_pool_free(pool, npages, false);
+ }
+ 
+ /*
+@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
+       pr_info("Finalizing pool allocator\n");
+       ttm_pool_mm_shrink_fini(_manager);
+ 
++      /* OK to use static buffer since global mutex is no longer used. */
+       for (i = 0; i < NUM_POOLS; ++i)
+-              ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+-                                 GFP_KERNEL);
++              ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
+ 
+       kobject_put(&_manager->kobj);
+       _manager = NULL;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 
b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index ca65df144765..3dfa97d04e51 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, 
struct dma_page *d_page)
+  *
+  * @pool: to free the pages from
+  * @nr_free: If set to true will free all pages in pool
+- * @gfp: GFP flags.
++ * @use_static: Safe to use static buffer
+  **/
+ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned 
nr_free,
+-                                     gfp_t gfp)
++                                     bool use_static)
+ {
++      static struct page *static_buf[NUM_PAGES_TO_ALLOC];
+       unsigned long irq_flags;
+       struct dma_page *dma_p, *tmp;
+       struct page **pages_to_free;
+@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool 
*pool, unsigned nr_free,
+                        npages_to_free, nr_free);
+       }
+ #endif
+-      pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
++      if (use_static)
++              pages_to_free = static_buf;
++      else
++              pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++                                      GFP_KERNEL);
+ 
+       if (!pages_to_free) {
+               pr_err("%s: Failed to allocate memory for pool free 
operation\n",
+@@ -502,7 +507,8 @@ restart:
+       if (freed_pages)
+               ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+ out:
+-      kfree(pages_to_free);
++      if (pages_to_free != static_buf)
++              kfree(pages_to_free);
+       return nr_free;
+ }
+ 
+@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum 
pool_type type)
+               if (pool->type != type)
+                       continue;
+               /* Takes a spinlock.. */
+-              ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
++              /* OK to use static buffer since global mutex is held. */
++              ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
+               WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+               /* This code path is called after _all_ references to the
+                * struct device has been dropped - so nobody should be
+@@ -984,7 +991,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct 
device *dev)
+ 
+       /* shrink pool if necessary (only on !is_cached pools)*/
+       if (npages)
+-              ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
++              ttm_dma_page_pool_free(pool, npages, false);
+       ttm->state = tt_unpopulated;
+ }
+ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -994,8 +1001,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
+- *
+  * I'm getting sadder as I hear more pathetical whimpers about needing 
per-pool
+  * shrinkers
+  */
+@@ -1028,8 +1033,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
+               if (++idx < pool_offset)
+                       continue;
+               nr_free = shrink_pages;
+-              shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+-                                                    sc->gfp_mask);
++              /* OK to use static buffer since global mutex is held. */
++              shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
+               freed += nr_free - shrink_pages;
+ 
+               pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 436b013b4231..b65272d7ea56 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -1049,6 +1049,8 @@ static int vmw_event_fence_action_create(struct drm_file 
*file_priv,
+       if (ret != 0)
+               goto out_no_queue;
+ 
++      return 0;
++
+ out_no_queue:
+       event->base.destroy(&event->base);
+ out_no_event:
+@@ -1124,17 +1126,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void 
*data,
+ 
+       BUG_ON(fence == NULL);
+ 
+-      if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+-              ret = vmw_event_fence_action_create(file_priv, fence,
+-                                                  arg->flags,
+-                                                  arg->user_data,
+-                                                  true);
+-      else
+-              ret = vmw_event_fence_action_create(file_priv, fence,
+-                                                  arg->flags,
+-                                                  arg->user_data,
+-                                                  true);
+-
++      ret = vmw_event_fence_action_create(file_priv, fence,
++                                          arg->flags,
++                                          arg->user_data,
++                                          true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Failed to attach event to fence.\n");
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c 
b/drivers/infiniband/ulp/isert/ib_isert.c
+index a96cfc31372e..60142274fe4b 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+ static struct workqueue_struct *isert_rx_wq;
+ static struct workqueue_struct *isert_comp_wq;
++static struct workqueue_struct *isert_release_wq;
+ 
+ static void
+ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+@@ -52,6 +53,11 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct 
isert_conn *isert_conn);
+ static int
+ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+              struct isert_rdma_wr *wr);
++static int
++isert_rdma_post_recvl(struct isert_conn *isert_conn);
++static int
++isert_rdma_accept(struct isert_conn *isert_conn);
++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ 
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+@@ -132,12 +138,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, 
struct rdma_cm_id *cma_id)
+       ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+       if (ret) {
+               pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+-              return ret;
++              goto err;
+       }
+       isert_conn->conn_qp = cma_id->qp;
+       pr_debug("rdma_create_qp() returned success 
>>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+ 
+       return 0;
++err:
++      mutex_lock(&device_list_mutex);
++      device->cq_active_qps[min_index]--;
++      mutex_unlock(&device_list_mutex);
++
++      return ret;
+ }
+ 
+ static void
+@@ -489,8 +501,8 @@ err:
+ static int
+ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+-      struct iscsi_np *np = cma_id->context;
+-      struct isert_np *isert_np = np->np_context;
++      struct isert_np *isert_np = cma_id->context;
++      struct iscsi_np *np = isert_np->np;
+       struct isert_conn *isert_conn;
+       struct isert_device *device;
+       struct ib_device *ib_dev = cma_id->device;
+@@ -515,6 +527,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       isert_conn->state = ISER_CONN_INIT;
+       INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+       init_completion(&isert_conn->conn_login_comp);
++      init_completion(&isert_conn->login_req_comp);
+       init_completion(&isert_conn->conn_wait);
+       init_completion(&isert_conn->conn_wait_comp_err);
+       kref_init(&isert_conn->conn_kref);
+@@ -522,7 +535,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       spin_lock_init(&isert_conn->conn_lock);
+       INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+ 
+-      cma_id->context = isert_conn;
+       isert_conn->conn_cm_id = cma_id;
+       isert_conn->responder_resources = event->param.conn.responder_resources;
+       isert_conn->initiator_depth = event->param.conn.initiator_depth;
+@@ -596,6 +608,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       if (ret)
+               goto out_conn_dev;
+ 
++      ret = isert_rdma_post_recvl(isert_conn);
++      if (ret)
++              goto out_conn_dev;
++
++      ret = isert_rdma_accept(isert_conn);
++      if (ret)
++              goto out_conn_dev;
++
+       mutex_lock(&isert_np->np_accept_mutex);
+       list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+       mutex_unlock(&isert_np->np_accept_mutex);
+@@ -620,6 +640,7 @@ out_login_buf:
+       kfree(isert_conn->login_buf);
+ out:
+       kfree(isert_conn);
++      rdma_reject(cma_id, NULL, 0);
+       return ret;
+ }
+ 
+@@ -635,18 +656,20 @@ isert_connect_release(struct isert_conn *isert_conn)
+       if (device && device->use_fastreg)
+               isert_conn_free_fastreg_pool(isert_conn);
+ 
++      isert_free_rx_descriptors(isert_conn);
++      rdma_destroy_id(isert_conn->conn_cm_id);
++
+       if (isert_conn->conn_qp) {
+               cq_index = ((struct isert_cq_desc *)
+                       isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+               pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
++              mutex_lock(&device_list_mutex);
+               isert_conn->conn_device->cq_active_qps[cq_index]--;
++              mutex_unlock(&device_list_mutex);
+ 
+-              rdma_destroy_qp(isert_conn->conn_cm_id);
++              ib_destroy_qp(isert_conn->conn_qp);
+       }
+ 
+-      isert_free_rx_descriptors(isert_conn);
+-      rdma_destroy_id(isert_conn->conn_cm_id);
+-
+       ib_dereg_mr(isert_conn->conn_mr);
+       ib_dealloc_pd(isert_conn->conn_pd);
+ 
+@@ -669,9 +692,19 @@ isert_connect_release(struct isert_conn *isert_conn)
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+-      struct isert_conn *isert_conn = cma_id->context;
++      struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++      pr_info("conn %p\n", isert_conn);
+ 
+-      kref_get(&isert_conn->conn_kref);
++      if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
++              pr_warn("conn %p connect_release is running\n", isert_conn);
++              return;
++      }
++
++      mutex_lock(&isert_conn->conn_mutex);
++      if (isert_conn->state != ISER_CONN_FULL_FEATURE)
++              isert_conn->state = ISER_CONN_UP;
++      mutex_unlock(&isert_conn->conn_mutex);
+ }
+ 
+ static void
+@@ -692,65 +725,108 @@ isert_put_conn(struct isert_conn *isert_conn)
+       kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ }
+ 
++/**
++ * isert_conn_terminate() - Initiate connection termination
++ * @isert_conn: isert connection struct
++ *
++ * Notes:
++ * In case the connection state is FULL_FEATURE, move state
++ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ * In case the connection state is UP, complete flush as well.
++ *
++ * This routine must be called with conn_mutex held. Thus it is
++ * safe to call multiple times.
++ */
+ static void
+-isert_disconnect_work(struct work_struct *work)
++isert_conn_terminate(struct isert_conn *isert_conn)
+ {
+-      struct isert_conn *isert_conn = container_of(work,
+-                              struct isert_conn, conn_logout_work);
++      int err;
+ 
+-      pr_debug("isert_disconnect_work(): 
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+-      mutex_lock(&isert_conn->conn_mutex);
+-      if (isert_conn->state == ISER_CONN_UP)
++      switch (isert_conn->state) {
++      case ISER_CONN_TERMINATING:
++              break;
++      case ISER_CONN_UP:
++              /*
++               * No flush completions will occur as we didn't
++               * get to ISER_CONN_FULL_FEATURE yet, complete
++               * to allow teardown progress.
++               */
++              complete(&isert_conn->conn_wait_comp_err);
++      case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
++              pr_info("Terminating conn %p state %d\n",
++                         isert_conn, isert_conn->state);
+               isert_conn->state = ISER_CONN_TERMINATING;
+-
+-      if (isert_conn->post_recv_buf_count == 0 &&
+-          atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-              mutex_unlock(&isert_conn->conn_mutex);
+-              goto wake_up;
+-      }
+-      if (!isert_conn->conn_cm_id) {
+-              mutex_unlock(&isert_conn->conn_mutex);
+-              isert_put_conn(isert_conn);
+-              return;
++              err = rdma_disconnect(isert_conn->conn_cm_id);
++              if (err)
++                      pr_warn("Failed rdma_disconnect isert_conn %p\n",
++                                 isert_conn);
++              break;
++      default:
++              pr_warn("conn %p teminating in state %d\n",
++                         isert_conn, isert_conn->state);
+       }
++}
+ 
+-      if (isert_conn->disconnect) {
+-              /* Send DREQ/DREP towards our initiator */
+-              rdma_disconnect(isert_conn->conn_cm_id);
+-      }
++static int
++isert_np_cma_handler(struct isert_np *isert_np,
++                   enum rdma_cm_event_type event)
++{
++      pr_debug("isert np %p, handling event %d\n", isert_np, event);
+ 
+-      mutex_unlock(&isert_conn->conn_mutex);
++      switch (event) {
++      case RDMA_CM_EVENT_DEVICE_REMOVAL:
++              isert_np->np_cm_id = NULL;
++              break;
++      case RDMA_CM_EVENT_ADDR_CHANGE:
++              isert_np->np_cm_id = isert_setup_id(isert_np);
++              if (IS_ERR(isert_np->np_cm_id)) {
++                      pr_err("isert np %p setup id failed: %ld\n",
++                               isert_np, PTR_ERR(isert_np->np_cm_id));
++                      isert_np->np_cm_id = NULL;
++              }
++              break;
++      default:
++              pr_err("isert np %p Unexpected event %d\n",
++                        isert_np, event);
++      }
+ 
+-wake_up:
+-      complete(&isert_conn->conn_wait);
++      return -1;
+ }
+ 
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
++isert_disconnected_handler(struct rdma_cm_id *cma_id,
++                         enum rdma_cm_event_type event)
+ {
++      struct isert_np *isert_np = cma_id->context;
+       struct isert_conn *isert_conn;
+ 
+-      if (!cma_id->qp) {
+-              struct isert_np *isert_np = cma_id->context;
++      if (isert_np->np_cm_id == cma_id)
++              return isert_np_cma_handler(cma_id->context, event);
+ 
+-              isert_np->np_cm_id = NULL;
+-              return -1;
+-      }
++      isert_conn = cma_id->qp->qp_context;
+ 
+-      isert_conn = (struct isert_conn *)cma_id->context;
++      mutex_lock(&isert_conn->conn_mutex);
++      isert_conn_terminate(isert_conn);
++      mutex_unlock(&isert_conn->conn_mutex);
+ 
+-      isert_conn->disconnect = disconnect;
+-      INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+-      schedule_work(&isert_conn->conn_logout_work);
++      pr_info("conn %p completing conn_wait\n", isert_conn);
++      complete(&isert_conn->conn_wait);
+ 
+       return 0;
+ }
+ 
++static void
++isert_connect_error(struct rdma_cm_id *cma_id)
++{
++      struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++      isert_put_conn(isert_conn);
++}
++
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+       int ret = 0;
+-      bool disconnect = false;
+ 
+       pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+                event->event, event->status, cma_id->context, cma_id);
+@@ -768,11 +844,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+-              disconnect = true;
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-              ret = isert_disconnected_handler(cma_id, disconnect);
++              ret = isert_disconnected_handler(cma_id, event->event);
+               break;
++      case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
++      case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_CONNECT_ERROR:
++              isert_connect_error(cma_id);
++              break;
+       default:
+               pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+               break;
+@@ -906,7 +985,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct 
isert_cmd *isert_cmd,
+        * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
+        */
+       mutex_lock(&isert_conn->conn_mutex);
+-      if (coalesce && isert_conn->state == ISER_CONN_UP &&
++      if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
+           ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+               tx_desc->llnode_active = true;
+               llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
+@@ -1003,7 +1082,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct 
iscsi_login *login,
+                       if (ret)
+                               return ret;
+ 
+-                      isert_conn->state = ISER_CONN_UP;
++                      /* Now we are in FULL_FEATURE phase */
++                      mutex_lock(&isert_conn->conn_mutex);
++                      isert_conn->state = ISER_CONN_FULL_FEATURE;
++                      mutex_unlock(&isert_conn->conn_mutex);
+                       goto post_send;
+               }
+ 
+@@ -1020,18 +1102,17 @@ post_send:
+ }
+ 
+ static void
+-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+-                 struct isert_conn *isert_conn)
++isert_rx_login_req(struct isert_conn *isert_conn)
+ {
++      struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
++      int rx_buflen = isert_conn->login_req_len;
+       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsi_login *login = conn->conn_login;
+       int size;
+ 
+-      if (!login) {
+-              pr_err("conn->conn_login is NULL\n");
+-              dump_stack();
+-              return;
+-      }
++      pr_info("conn %p\n", isert_conn);
++
++      WARN_ON_ONCE(!login);
+ 
+       if (login->first_request) {
+               struct iscsi_login_req *login_req =
+@@ -1394,11 +1475,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct 
isert_conn *isert_conn,
+                hdr->opcode, hdr->itt, hdr->flags,
+                (int)(xfer_len - ISER_HEADERS_LEN));
+ 
+-      if ((char *)desc == isert_conn->login_req_buf)
+-              isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+-                                 isert_conn);
+-      else
++      if ((char *)desc == isert_conn->login_req_buf) {
++              isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
++              if (isert_conn->conn) {
++                      struct iscsi_login *login = 
isert_conn->conn->conn_login;
++
++                      if (login && !login->first_request)
++                              isert_rx_login_req(isert_conn);
++              }
++              mutex_lock(&isert_conn->conn_mutex);
++              complete(&isert_conn->login_req_comp);
++              mutex_unlock(&isert_conn->conn_mutex);
++      } else {
+               isert_rx_do_work(desc, isert_conn);
++      }
+ 
+       ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+                                     DMA_FROM_DEVICE);
+@@ -1799,7 +1889,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+               msleep(3000);
+ 
+       mutex_lock(&isert_conn->conn_mutex);
+-      isert_conn->state = ISER_CONN_DOWN;
++      isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
+ 
+       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+@@ -2579,13 +2669,51 @@ isert_response_queue(struct iscsi_conn *conn, struct 
iscsi_cmd *cmd, int state)
+       return ret;
+ }
+ 
++struct rdma_cm_id *
++isert_setup_id(struct isert_np *isert_np)
++{
++      struct iscsi_np *np = isert_np->np;
++      struct rdma_cm_id *id;
++      struct sockaddr *sa;
++      int ret;
++
++      sa = (struct sockaddr *)&np->np_sockaddr;
++      pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
++
++      id = rdma_create_id(isert_cma_handler, isert_np,
++                          RDMA_PS_TCP, IB_QPT_RC);
++      if (IS_ERR(id)) {
++              pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
++              ret = PTR_ERR(id);
++              goto out;
++      }
++      pr_debug("id %p context %p\n", id, id->context);
++
++      ret = rdma_bind_addr(id, sa);
++      if (ret) {
++              pr_err("rdma_bind_addr() failed: %d\n", ret);
++              goto out_id;
++      }
++
++      ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
++      if (ret) {
++              pr_err("rdma_listen() failed: %d\n", ret);
++              goto out_id;
++      }
++
++      return id;
++out_id:
++      rdma_destroy_id(id);
++out:
++      return ERR_PTR(ret);
++}
++
+ static int
+ isert_setup_np(struct iscsi_np *np,
+              struct __kernel_sockaddr_storage *ksockaddr)
+ {
+       struct isert_np *isert_np;
+       struct rdma_cm_id *isert_lid;
+-      struct sockaddr *sa;
+       int ret;
+ 
+       isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+@@ -2597,9 +2725,8 @@ isert_setup_np(struct iscsi_np *np,
+       mutex_init(&isert_np->np_accept_mutex);
+       INIT_LIST_HEAD(&isert_np->np_accept_list);
+       init_completion(&isert_np->np_login_comp);
++      isert_np->np = np;
+ 
+-      sa = (struct sockaddr *)ksockaddr;
+-      pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+       /*
+        * Setup the np->np_sockaddr from the passed sockaddr setup
+        * in iscsi_target_configfs.c code..
+@@ -2607,37 +2734,20 @@ isert_setup_np(struct iscsi_np *np,
+       memcpy(&np->np_sockaddr, ksockaddr,
+              sizeof(struct __kernel_sockaddr_storage));
+ 
+-      isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+-                              IB_QPT_RC);
++      isert_lid = isert_setup_id(isert_np);
+       if (IS_ERR(isert_lid)) {
+-              pr_err("rdma_create_id() for isert_listen_handler failed: 
%ld\n",
+-                     PTR_ERR(isert_lid));
+               ret = PTR_ERR(isert_lid);
+               goto out;
+       }
+ 
+-      ret = rdma_bind_addr(isert_lid, sa);
+-      if (ret) {
+-              pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+-              goto out_lid;
+-      }
+-
+-      ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+-      if (ret) {
+-              pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+-              goto out_lid;
+-      }
+-
+       isert_np->np_cm_id = isert_lid;
+       np->np_context = isert_np;
+-      pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+ 
+       return 0;
+ 
+-out_lid:
+-      rdma_destroy_id(isert_lid);
+ out:
+       kfree(isert_np);
++
+       return ret;
+ }
+ 
+@@ -2673,7 +2783,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct 
iscsi_login *login)
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       int ret;
+ 
+-      pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
++      pr_info("before login_req comp conn: %p\n", isert_conn);
++      ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
++      if (ret) {
++              pr_err("isert_conn %p interrupted before got login req\n",
++                        isert_conn);
++              return ret;
++      }
++      reinit_completion(&isert_conn->login_req_comp);
++
+       /*
+        * For login requests after the first PDU, isert_rx_login_req() will
+        * kick schedule_delayed_work(&conn->login_work) as the packet is
+@@ -2683,11 +2801,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct 
iscsi_login *login)
+       if (!login->first_request)
+               return 0;
+ 
++      isert_rx_login_req(isert_conn);
++
++      pr_info("before conn_login_comp conn: %p\n", conn);
+       ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+       if (ret)
+               return ret;
+ 
+-      pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
++      pr_info("processing login->req: %p\n", login->req);
++
+       return 0;
+ }
+ 
+@@ -2765,17 +2887,10 @@ accept_wait:
+       isert_conn->conn = conn;
+       max_accept = 0;
+ 
+-      ret = isert_rdma_post_recvl(isert_conn);
+-      if (ret)
+-              return ret;
+-
+-      ret = isert_rdma_accept(isert_conn);
+-      if (ret)
+-              return ret;
+-
+       isert_set_conn_info(np, conn, isert_conn);
+ 
+-      pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
++      pr_debug("Processing isert_conn: %p\n", isert_conn);
++
+       return 0;
+ }
+ 
+@@ -2791,6 +2906,24 @@ isert_free_np(struct iscsi_np *np)
+       kfree(isert_np);
+ }
+ 
++static void isert_release_work(struct work_struct *work)
++{
++      struct isert_conn *isert_conn = container_of(work,
++                                                   struct isert_conn,
++                                                   release_work);
++
++      pr_info("Starting release conn %p\n", isert_conn);
++
++      wait_for_completion(&isert_conn->conn_wait);
++
++      mutex_lock(&isert_conn->conn_mutex);
++      isert_conn->state = ISER_CONN_DOWN;
++      mutex_unlock(&isert_conn->conn_mutex);
++
++      pr_info("Destroying conn %p\n", isert_conn);
++      isert_put_conn(isert_conn);
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+       struct isert_conn *isert_conn = conn->context;
+@@ -2798,10 +2931,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+       pr_debug("isert_wait_conn: Starting \n");
+ 
+       mutex_lock(&isert_conn->conn_mutex);
+-      if (isert_conn->conn_cm_id) {
+-              pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+-              rdma_disconnect(isert_conn->conn_cm_id);
+-      }
+       /*
+        * Only wait for conn_wait_comp_err if the isert_conn made it
+        * into full feature phase..
+@@ -2810,14 +2939,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+               mutex_unlock(&isert_conn->conn_mutex);
+               return;
+       }
+-      if (isert_conn->state == ISER_CONN_UP)
+-              isert_conn->state = ISER_CONN_TERMINATING;
++      isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
+ 
+       wait_for_completion(&isert_conn->conn_wait_comp_err);
+ 
+-      wait_for_completion(&isert_conn->conn_wait);
+-      isert_put_conn(isert_conn);
++      INIT_WORK(&isert_conn->release_work, isert_release_work);
++      queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsi_conn *conn)
+@@ -2863,10 +2991,21 @@ static int __init isert_init(void)
+               goto destroy_rx_wq;
+       }
+ 
++      isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
++                                      WQ_UNBOUND_MAX_ACTIVE);
++      if (!isert_release_wq) {
++              pr_err("Unable to allocate isert_release_wq\n");
++              ret = -ENOMEM;
++              goto destroy_comp_wq;
++      }
++
+       iscsit_register_transport(&iser_target_transport);
+-      pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
++      pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
++
+       return 0;
+ 
++destroy_comp_wq:
++      destroy_workqueue(isert_comp_wq);
+ destroy_rx_wq:
+       destroy_workqueue(isert_rx_wq);
+       return ret;
+@@ -2875,6 +3014,7 @@ destroy_rx_wq:
+ static void __exit isert_exit(void)
+ {
+       flush_scheduled_work();
++      destroy_workqueue(isert_release_wq);
+       destroy_workqueue(isert_comp_wq);
+       destroy_workqueue(isert_rx_wq);
+       iscsit_unregister_transport(&iser_target_transport);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h 
b/drivers/infiniband/ulp/isert/ib_isert.h
+index cbecaabe90b9..1178c5b6800c 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -23,6 +23,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+       ISER_CONN_INIT,
+       ISER_CONN_UP,
++      ISER_CONN_FULL_FEATURE,
+       ISER_CONN_TERMINATING,
+       ISER_CONN_DOWN,
+ };
+@@ -102,6 +103,7 @@ struct isert_conn {
+       char                    *login_req_buf;
+       char                    *login_rsp_buf;
+       u64                     login_req_dma;
++      int                     login_req_len;
+       u64                     login_rsp_dma;
+       unsigned int            conn_rx_desc_head;
+       struct iser_rx_desc     *conn_rx_descs;
+@@ -109,13 +111,13 @@ struct isert_conn {
+       struct iscsi_conn       *conn;
+       struct list_head        conn_accept_node;
+       struct completion       conn_login_comp;
++      struct completion       login_req_comp;
+       struct iser_tx_desc     conn_login_tx_desc;
+       struct rdma_cm_id       *conn_cm_id;
+       struct ib_pd            *conn_pd;
+       struct ib_mr            *conn_mr;
+       struct ib_qp            *conn_qp;
+       struct isert_device     *conn_device;
+-      struct work_struct      conn_logout_work;
+       struct mutex            conn_mutex;
+       struct completion       conn_wait;
+       struct completion       conn_wait_comp_err;
+@@ -124,10 +126,10 @@ struct isert_conn {
+       int                     conn_fr_pool_size;
+       /* lock to protect fastreg pool */
+       spinlock_t              conn_lock;
++      struct work_struct      release_work;
+ #define ISERT_COMP_BATCH_COUNT        8
+       int                     conn_comp_batch;
+       struct llist_head       conn_comp_llist;
+-      bool                    disconnect;
+ };
+ 
+ #define ISERT_MAX_CQ 64
+@@ -158,6 +160,7 @@ struct isert_device {
+ };
+ 
+ struct isert_np {
++      struct iscsi_np         *np;
+       struct semaphore        np_sem;
+       struct rdma_cm_id       *np_cm_id;
+       struct mutex            np_accept_mutex;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h 
b/drivers/input/serio/i8042-x86ia64io.h
+index 8fca488fdc15..c43c46f7dcd0 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -408,6 +408,13 @@ static const struct dmi_system_id __initconst 
i8042_dmi_nomux_table[] = {
+               },
+       },
+       {
++              /* Acer Aspire 7738 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
++              },
++      },
++      {
+               /* Gericom Bellagio */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
+@@ -721,6 +728,35 @@ static const struct dmi_system_id __initconst 
i8042_dmi_dritek_table[] = {
+       { }
+ };
+ 
++/*
++ * Some laptops need keyboard reset before probing for the trackpad to get
++ * it detected, initialised & finally work.
++ */
++static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
++      {
++              /* Gigabyte P35 v2 - Elantech touchpad */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
++              },
++      },
++              {
++              /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
++              },
++      },
++      {
++              /* Gigabyte P34 - Elantech touchpad */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
++              },
++      },
++      { }
++};
++
+ #endif /* CONFIG_X86 */
+ 
+ #ifdef CONFIG_PNP
+@@ -1016,6 +1052,9 @@ static int __init i8042_platform_init(void)
+       if (dmi_check_system(i8042_dmi_dritek_table))
+               i8042_dritek = true;
+ 
++      if (dmi_check_system(i8042_dmi_kbdreset_table))
++              i8042_kbdreset = true;
++
+       /*
+        * A20 was already enabled during early kernel init. But some buggy
+        * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 3807c3e971cc..eb796fff9e62 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -67,6 +67,10 @@ static bool i8042_notimeout;
+ module_param_named(notimeout, i8042_notimeout, bool, 0);
+ MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+ 
++static bool i8042_kbdreset;
++module_param_named(kbdreset, i8042_kbdreset, bool, 0);
++MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
++
+ #ifdef CONFIG_X86
+ static bool i8042_dritek;
+ module_param_named(dritek, i8042_dritek, bool, 0);
+@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
+               return -1;
+ 
+ /*
++ * Reset keyboard (needed on some laptops to successfully detect
++ * touchpad, e.g., some Gigabyte laptop models with Elantech
++ * touchpads).
++ */
++      if (i8042_kbdreset) {
++              pr_warn("Attempting to reset device connected to KBD port\n");
++              i8042_kbd_write(NULL, (unsigned char) 0xff);
++      }
++
++/*
+  * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
+  * used it for a PCI card or somethig else.
+  */
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 5f9c2a665ca5..fbcb6225f794 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
+       struct bset *i = btree_bset_first(b);
+       struct btree_iter *iter;
+ 
+-      iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
++      iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
+       iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
+       iter->used = 0;
+ 
+diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
+index 2335529b195c..ab5d9a3adebf 100644
+--- a/drivers/media/i2c/smiapp-pll.c
++++ b/drivers/media/i2c/smiapp-pll.c
+@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll 
*pll)
+ {
+       dev_dbg(dev, "pre_pll_clk_div\t%d\n",  pll->pre_pll_clk_div);
+       dev_dbg(dev, "pll_multiplier \t%d\n",  pll->pll_multiplier);
+-      if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
++      if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+               dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
+               dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
+       }
+@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll 
*pll)
+       dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
+       dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
+       dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
+-      if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
++      if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
+               dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
+                       pll->op_sys_clk_freq_hz);
+               dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c 
b/drivers/media/i2c/smiapp/smiapp-core.c
+index 7026ab08ec91..873d0627a75b 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -2624,7 +2624,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
+               pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+       pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ 
++      mutex_lock(&sensor->mutex);
+       rval = smiapp_update_mode(sensor);
++      mutex_unlock(&sensor->mutex);
+       if (rval) {
+               dev_err(&client->dev, "update mode failed\n");
+               goto out_nvm_release;
+diff --git a/drivers/media/usb/au0828/au0828-cards.c 
b/drivers/media/usb/au0828/au0828-cards.c
+index dd32decb237d..1d4b11038958 100644
+--- a/drivers/media/usb/au0828/au0828-cards.c
++++ b/drivers/media/usb/au0828/au0828-cards.c
+@@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
+               au0828_clear(dev, REG_000, 0x10);
+ }
+ 
++/*
++ * WARNING: There's a quirks table at sound/usb/quirks-table.h
++ * that should also be updated every time a new device with V4L2 support
++ * is added here.
++ */
+ struct au0828_board au0828_boards[] = {
+       [AU0828_BOARD_UNKNOWN] = {
+               .name   = "Unknown board",
+diff --git a/drivers/media/usb/dvb-usb/af9005.c 
b/drivers/media/usb/dvb-usb/af9005.c
+index af176b6ce738..e6d3561eea47 100644
+--- a/drivers/media/usb/dvb-usb/af9005.c
++++ b/drivers/media/usb/dvb-usb/af9005.c
+@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
+               err("usb_register failed. (%d)", result);
+               return result;
+       }
++#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
++      /* FIXME: convert to todays kernel IR infrastructure */
+       rc_decode = symbol_request(af9005_rc_decode);
+       rc_keys = symbol_request(rc_map_af9005_table);
+       rc_keys_size = symbol_request(rc_map_af9005_table_size);
++#endif
+       if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
+               err("af9005_rc_decode function not found, disabling remote");
+               af9005_properties.rc.legacy.rc_query = NULL;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c 
b/drivers/media/usb/uvc/uvc_driver.c
+index 753ad4cfc118..45314412b4a3 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1603,12 +1603,12 @@ static void uvc_delete(struct uvc_device *dev)
+ {
+       struct list_head *p, *n;
+ 
+-      usb_put_intf(dev->intf);
+-      usb_put_dev(dev->udev);
+-
+       uvc_status_cleanup(dev);
+       uvc_ctrl_cleanup_device(dev);
+ 
++      usb_put_intf(dev->intf);
++      usb_put_dev(dev->udev);
++
+       if (dev->vdev.dev)
+               v4l2_device_unregister(&dev->vdev);
+ #ifdef CONFIG_MEDIA_CONTROLLER
+diff --git a/drivers/net/can/usb/kvaser_usb.c 
b/drivers/net/can/usb/kvaser_usb.c
+index e77d11049747..4e65b35bebc0 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1237,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev)
+       if (err)
+               netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ 
++      /* reset tx contexts */
++      kvaser_usb_unlink_tx_urbs(priv);
++
+       priv->can.state = CAN_STATE_STOPPED;
+       close_candev(priv->netdev);
+ 
+@@ -1285,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct 
sk_buff *skb,
+       if (!urb) {
+               netdev_err(netdev, "No memory left for URBs\n");
+               stats->tx_dropped++;
+-              goto nourbmem;
++              dev_kfree_skb(skb);
++              return NETDEV_TX_OK;
+       }
+ 
+       buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
+       if (!buf) {
+               stats->tx_dropped++;
++              dev_kfree_skb(skb);
+               goto nobufmem;
+       }
+ 
+@@ -1325,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff 
*skb,
+               }
+       }
+ 
++      /* This should never happen; it implies a flow control bug */
+       if (!context) {
+               netdev_warn(netdev, "cannot find free context\n");
+               ret =  NETDEV_TX_BUSY;
+@@ -1355,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff 
*skb,
+       if (unlikely(err)) {
+               can_free_echo_skb(netdev, context->echo_index);
+ 
+-              skb = NULL; /* set to NULL to avoid double free in
+-                           * dev_kfree_skb(skb) */
+-
+               atomic_dec(&priv->active_tx_urbs);
+               usb_unanchor_urb(urb);
+ 
+@@ -1379,8 +1382,6 @@ releasebuf:
+       kfree(buf);
+ nobufmem:
+       usb_free_urb(urb);
+-nourbmem:
+-      dev_kfree_skb(skb);
+       return ret;
+ }
+ 
+@@ -1492,6 +1493,10 @@ static int kvaser_usb_init_one(struct usb_interface 
*intf,
+       struct kvaser_usb_net_priv *priv;
+       int i, err;
+ 
++      err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
++      if (err)
++              return err;
++
+       netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+       if (!netdev) {
+               dev_err(&intf->dev, "Cannot alloc candev\n");
+@@ -1595,9 +1600,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ 
+       usb_set_intfdata(intf, dev);
+ 
+-      for (i = 0; i < MAX_NET_DEVICES; i++)
+-              kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+-
+       err = kvaser_usb_get_software_info(dev);
+       if (err) {
+               dev_err(&intf->dev,
+diff --git a/drivers/net/ethernet/atheros/alx/main.c 
b/drivers/net/ethernet/atheros/alx/main.c
+index 380d24922049..3e1d7d29b4ec 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
+       schedule_work(&alx->reset_wk);
+ }
+ 
+-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
++static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ {
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_rrd *rrd;
+       struct alx_buffer *rxb;
+       struct sk_buff *skb;
+       u16 length, rfd_cleaned = 0;
++      int work = 0;
+ 
+-      while (budget > 0) {
++      while (work < budget) {
+               rrd = &rxq->rrd[rxq->rrd_read_idx];
+               if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+                       break;
+@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int 
budget)
+                   ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_NOR) != 1) {
+                       alx_schedule_reset(alx);
+-                      return 0;
++                      return work;
+               }
+ 
+               rxb = &rxq->bufs[rxq->read_idx];
+@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int 
budget)
+               }
+ 
+               napi_gro_receive(&alx->napi, skb);
+-              budget--;
++              work++;
+ 
+ next_pkt:
+               if (++rxq->read_idx == alx->rx_ringsz)
+@@ -258,21 +259,22 @@ next_pkt:
+       if (rfd_cleaned)
+               alx_refill_rx_ring(alx, GFP_ATOMIC);
+ 
+-      return budget > 0;
++      return work;
+ }
+ 
+ static int alx_poll(struct napi_struct *napi, int budget)
+ {
+       struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+       struct alx_hw *hw = &alx->hw;
+-      bool complete = true;
+       unsigned long flags;
++      bool tx_complete;
++      int work;
+ 
+-      complete = alx_clean_tx_irq(alx) &&
+-                 alx_clean_rx_irq(alx, budget);
++      tx_complete = alx_clean_tx_irq(alx);
++      work = alx_clean_rx_irq(alx, budget);
+ 
+-      if (!complete)
+-              return 1;
++      if (!tx_complete || work == budget)
++              return budget;
+ 
+       napi_complete(&alx->napi);
+ 
+@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
+ 
+       alx_post_write(hw);
+ 
+-      return 0;
++      return work;
+ }
+ 
+ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index 086eac5af5c2..82061139b215 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17731,23 +17731,6 @@ static int tg3_init_one(struct pci_dev *pdev,
+               goto err_out_apeunmap;
+       }
+ 
+-      /*
+-       * Reset chip in case UNDI or EFI driver did not shutdown
+-       * DMA self test will enable WDMAC and we'll see (spurious)
+-       * pending DMA on the PCI bus at that point.
+-       */
+-      if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+-          (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+-              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+-              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-      }
+-
+-      err = tg3_test_dma(tp);
+-      if (err) {
+-              dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+-              goto err_out_apeunmap;
+-      }
+-
+       intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+       rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+       sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+@@ -17792,6 +17775,23 @@ static int tg3_init_one(struct pci_dev *pdev,
+                       sndmbx += 0xc;
+       }
+ 
++      /*
++       * Reset chip in case UNDI or EFI driver did not shutdown
++       * DMA self test will enable WDMAC and we'll see (spurious)
++       * pending DMA on the PCI bus at that point.
++       */
++      if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
++          (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++      }
++
++      err = tg3_test_dma(tp);
++      if (err) {
++              dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
++              goto err_out_apeunmap;
++      }
++
+       tg3_init_coal(tp);
+ 
+       pci_set_drvdata(pdev, dev);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c 
b/drivers/net/ethernet/cisco/enic/enic_main.c
+index b740bfce72ef..ff9b423805a0 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1044,10 +1044,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
+                                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+               }
+ 
+-              if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
+-                      skb->csum = htons(checksum);
+-                      skb->ip_summed = CHECKSUM_COMPLETE;
+-              }
++              /* Hardware does not provide whole packet checksum. It only
++               * provides pseudo checksum. Since hw validates the packet
++               * checksum but not provide us the checksum value. use
++               * CHECSUM_UNNECESSARY.
++               */
++              if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
++                  ipv4_csum_ok)
++                      skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+               if (vlan_stripped)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 
vlan_tci);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 921b9df2faca..316650c3b5d7 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -596,7 +596,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, 
bool enable)
+ 
+                       /* Clear all mcast from ALE */
+                       cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
+-                                               priv->host_port);
++                                               priv->host_port, -1);
+ 
+                       /* Flood All Unicast Packets to Host port */
+                       cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+@@ -620,6 +620,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, 
bool enable)
+ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+ {
+       struct cpsw_priv *priv = netdev_priv(ndev);
++      int vid;
++
++      if (priv->data.dual_emac)
++              vid = priv->slaves[priv->emac_port].port_vlan;
++      else
++              vid = priv->data.default_vlan;
+ 
+       if (ndev->flags & IFF_PROMISC) {
+               /* Enable promiscuous mode */
+@@ -631,7 +637,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+       }
+ 
+       /* Clear all mcast from ALE */
+-      cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
++      cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
++                               vid);
+ 
+       if (!netdev_mc_empty(ndev)) {
+               struct netdev_hw_addr *ha;
+@@ -716,6 +723,14 @@ static void cpsw_rx_handler(void *token, int len, int 
status)
+ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
+ {
+       struct cpsw_priv *priv = dev_id;
++      int value = irq - priv->irqs_table[0];
++
++      /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
++       * is to make sure we will always write the correct value to the EOI
++       * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
++       * for TX Interrupt and 3 for MISC Interrupt.
++       */
++      cpdma_ctlr_eoi(priv->dma, value);
+ 
+       cpsw_intr_disable(priv);
+       if (priv->irq_enabled == true) {
+@@ -745,8 +760,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+       int                     num_tx, num_rx;
+ 
+       num_tx = cpdma_chan_process(priv->txch, 128);
+-      if (num_tx)
+-              cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ 
+       num_rx = cpdma_chan_process(priv->rxch, budget);
+       if (num_rx < budget) {
+@@ -754,7 +767,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
+ 
+               napi_complete(napi);
+               cpsw_intr_enable(priv);
+-              cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+               prim_cpsw = cpsw_get_slave_priv(priv, 0);
+               if (prim_cpsw->irq_enabled == false) {
+                       prim_cpsw->irq_enabled = true;
+@@ -1265,8 +1277,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
+       napi_enable(&priv->napi);
+       cpdma_ctlr_start(priv->dma);
+       cpsw_intr_enable(priv);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ 
+       if (priv->data.dual_emac)
+               priv->slaves[priv->emac_port].open_stat = true;
+@@ -1512,9 +1522,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
+       cpdma_chan_start(priv->txch);
+       cpdma_ctlr_int_ctrl(priv->dma, true);
+       cpsw_intr_enable(priv);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+-
+ }
+ 
+ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+@@ -1560,9 +1567,6 @@ static void cpsw_ndo_poll_controller(struct net_device 
*ndev)
+       cpsw_interrupt(ndev->irq, priv);
+       cpdma_ctlr_int_ctrl(priv->dma, true);
+       cpsw_intr_enable(priv);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+-      cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+-
+ }
+ #endif
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c 
b/drivers/net/ethernet/ti/cpsw_ale.c
+index 7f893069c418..4eceb7e42c80 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -236,7 +236,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 
*ale_entry,
+               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+ 
+-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+ {
+       u32 ale_entry[ALE_ENTRY_WORDS];
+       int ret, idx;
+@@ -247,6 +247,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int 
port_mask)
+               if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+                       continue;
+ 
++              /* if vid passed is -1 then remove all multicast entry from
++               * the table irrespective of vlan id, if a valid vlan id is
++               * passed then remove only multicast added to that vlan id.
++               * if vlan id doesn't match then move on to next entry.
++               */
++              if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
++                      continue;
++
+               if (cpsw_ale_get_mcast(ale_entry)) {
+                       u8 addr[6];
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.h 
b/drivers/net/ethernet/ti/cpsw_ale.h
+index de409c33b250..e701358fd00b 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.h
++++ b/drivers/net/ethernet/ti/cpsw_ale.h
+@@ -88,7 +88,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
+ 
+ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
+ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
++int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
+ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+                      int flags, u16 vid);
+ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 979fe433278c..32efe8371ff8 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char 
*kind)
+ static void team_notify_peers_work(struct work_struct *work)
+ {
+       struct team *team;
++      int val;
+ 
+       team = container_of(work, struct team, notify_peers.dw.work);
+ 
+@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct 
*work)
+               schedule_delayed_work(&team->notify_peers.dw, 0);
+               return;
+       }
++      val = atomic_dec_if_positive(&team->notify_peers.count_pending);
++      if (val < 0) {
++              rtnl_unlock();
++              return;
++      }
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+       rtnl_unlock();
+-      if (!atomic_dec_and_test(&team->notify_peers.count_pending))
++      if (val)
+               schedule_delayed_work(&team->notify_peers.dw,
+                                     
msecs_to_jiffies(team->notify_peers.interval));
+ }
+@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
+ static void team_mcast_rejoin_work(struct work_struct *work)
+ {
+       struct team *team;
++      int val;
+ 
+       team = container_of(work, struct team, mcast_rejoin.dw.work);
+ 
+@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct 
*work)
+               schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+               return;
+       }
++      val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
++      if (val < 0) {
++              rtnl_unlock();
++              return;
++      }
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+       rtnl_unlock();
+-      if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
++      if (val)
+               schedule_delayed_work(&team->mcast_rejoin.dw,
+                                     
msecs_to_jiffies(team->mcast_rejoin.interval));
+ }
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 3dc934438c28..07fbcb0fb646 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -237,6 +237,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
+       AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
+       AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
+       AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
++      AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
+       { NULL, }
+ /* Laptop models without axis info (yet):
+  * "NC6910" "HP Compaq 6910"
+diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
+index bb86494e2b7b..19915c5b256f 100644
+--- a/drivers/s390/char/con3215.c
++++ b/drivers/s390/char/con3215.c
+@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
+       unsigned long flags;
+ 
+       spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
+-      if (raw->flags & RAW3215_TIMER_RUNS) {
+-              del_timer(&raw->timer);
+-              raw->flags &= ~RAW3215_TIMER_RUNS;
+-              if (!(raw->port.flags & ASYNC_SUSPENDED)) {
+-                      raw3215_mk_write_req(raw);
+-                      raw3215_start_io(raw);
++      raw->flags &= ~RAW3215_TIMER_RUNS;
++      if (!(raw->port.flags & ASYNC_SUSPENDED)) {
++              raw3215_mk_write_req(raw);
++              raw3215_start_io(raw);
++              if ((raw->queued_read || raw->queued_write) &&
++                  !(raw->flags & RAW3215_WORKING) &&
++                  !(raw->flags & RAW3215_TIMER_RUNS)) {
++                      raw->timer.expires = RAW3215_TIMEOUT + jiffies;
++                      add_timer(&raw->timer);
++                      raw->flags |= RAW3215_TIMER_RUNS;
+               }
+       }
+       spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
+@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info 
*raw)
+                   (raw->flags & RAW3215_FLUSHING)) {
+                       /* execute write requests bigger than minimum size */
+                       raw3215_start_io(raw);
+-                      if (raw->flags & RAW3215_TIMER_RUNS) {
+-                              del_timer(&raw->timer);
+-                              raw->flags &= ~RAW3215_TIMER_RUNS;
+-                      }
+-              } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
+-                      /* delay small writes */
+-                      raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+-                      add_timer(&raw->timer);
+-                      raw->flags |= RAW3215_TIMER_RUNS;
+               }
+       }
++      if ((raw->queued_read || raw->queued_write) &&
++          !(raw->flags & RAW3215_WORKING) &&
++          !(raw->flags & RAW3215_TIMER_RUNS)) {
++              raw->timer.expires = RAW3215_TIMEOUT + jiffies;
++              add_timer(&raw->timer);
++              raw->flags |= RAW3215_TIMER_RUNS;
++      }
+ }
+ 
+ /*
+@@ -1027,12 +1029,26 @@ static int tty3215_write(struct tty_struct * tty,
+                        const unsigned char *buf, int count)
+ {
+       struct raw3215_info *raw;
++      int i, written;
+ 
+       if (!tty)
+               return 0;
+       raw = (struct raw3215_info *) tty->driver_data;
+-      raw3215_write(raw, buf, count);
+-      return count;
++      written = count;
++      while (count > 0) {
++              for (i = 0; i < count; i++)
++                      if (buf[i] == '\t' || buf[i] == '\n')
++                              break;
++              raw3215_write(raw, buf, i);
++              count -= i;
++              buf += i;
++              if (count > 0) {
++                      raw3215_putchar(raw, *buf);
++                      count--;
++                      buf++;
++              }
++      }
++      return written;
+ }
+ 
+ /*
+@@ -1180,7 +1196,7 @@ static int __init tty3215_init(void)
+       driver->subtype = SYSTEM_TYPE_TTY;
+       driver->init_termios = tty_std_termios;
+       driver->init_termios.c_iflag = IGNBRK | IGNPAR;
+-      driver->init_termios.c_oflag = ONLCR | XTABS;
++      driver->init_termios.c_oflag = ONLCR;
+       driver->init_termios.c_lflag = ISIG;
+       driver->flags = TTY_DRIVER_REAL_RAW;
+       tty_set_operations(driver, &tty3215_ops);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c 
b/drivers/scsi/mpt2sas/mpt2sas_transport.c
+index 410f4a3e8888..72f9c55d0e00 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
+@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER 
*ioc,
+                   &mpt2sas_phy->remote_identify);
+               _transport_add_phy_to_an_existing_port(ioc, sas_node,
+                   mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
+-      } else {
++      } else
+               memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
+                   sas_identify));
+-              _transport_del_phy_from_an_existing_port(ioc, sas_node,
+-                  mpt2sas_phy);
+-      }
+ 
+       if (mpt2sas_phy->phy)
+               mpt2sas_phy->phy->negotiated_linkrate =
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c 
b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index 65170cb1a00f..55aa597eb229 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER 
*ioc,
+                   &mpt3sas_phy->remote_identify);
+               _transport_add_phy_to_an_existing_port(ioc, sas_node,
+                   mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+-      } else {
++      } else
+               memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+                   sas_identify));
+-              _transport_del_phy_from_an_existing_port(ioc, sas_node,
+-                  mpt3sas_phy);
+-      }
+ 
+       if (mpt3sas_phy->phy)
+               mpt3sas_phy->phy->negotiated_linkrate =
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index c1d04d4d3c6c..262ab837a704 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -211,6 +211,7 @@ static struct {
+       {"Medion", "Flash XL  MMC/SD", "2.6D", BLIST_FORCELUN},
+       {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
+       {"MICROP", "4110", NULL, BLIST_NOTQ},
++      {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
+       {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
+       {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
+       {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index ed0f899e8aa5..86b05151fdab 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1690,13 +1690,12 @@ static int storvsc_queuecommand(struct Scsi_Host 
*host, struct scsi_cmnd *scmnd)
+       if (ret == -EAGAIN) {
+               /* no more space */
+ 
+-              if (cmd_request->bounce_sgl_count) {
++              if (cmd_request->bounce_sgl_count)
+                       destroy_bounce_buffer(cmd_request->bounce_sgl,
+                                       cmd_request->bounce_sgl_count);
+ 
+-                      ret = SCSI_MLQUEUE_DEVICE_BUSY;
+-                      goto queue_error;
+-              }
++              ret = SCSI_MLQUEUE_DEVICE_BUSY;
++              goto queue_error;
+       }
+ 
+       return 0;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c 
b/drivers/target/iscsi/iscsi_target_login.c
+index d509aa74cfa1..c5d3811a7b8c 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1186,6 +1186,9 @@ old_sess_out:
+               conn->sock = NULL;
+       }
+ 
++      if (conn->conn_transport->iscsit_wait_conn)
++              conn->conn_transport->iscsit_wait_conn(conn);
++
+       if (conn->conn_transport->iscsit_free_conn)
+               conn->conn_transport->iscsit_free_conn(conn);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_util.c 
b/drivers/target/iscsi/iscsi_target_util.c
+index ab77f80ead2b..1e406af4ee47 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
+       struct iscsi_conn *conn,
+       struct iscsi_data_count *count)
+ {
+-      int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
++      int ret, iov_len;
+       struct kvec *iov_p;
+       struct msghdr msg;
+ 
+       if (!conn || !conn->sock || !conn->conn_ops)
+               return -1;
+ 
+-      if (data <= 0) {
+-              pr_err("Data length is: %d\n", data);
++      if (count->data_length <= 0) {
++              pr_err("Data length is: %d\n", count->data_length);
+               return -1;
+       }
+ 
+@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
+       iov_p = count->iov;
+       iov_len = count->iov_count;
+ 
+-      while (total_tx < data) {
+-              tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+-                                      (data - total_tx));
+-              if (tx_loop <= 0) {
+-                      pr_debug("tx_loop: %d total_tx %d\n",
+-                              tx_loop, total_tx);
+-                      return tx_loop;
+-              }
+-              total_tx += tx_loop;
+-              pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+-                                      tx_loop, total_tx, data);
++      ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
++                           count->data_length);
++      if (ret != count->data_length) {
++              pr_err("Unexpected ret: %d send data %d\n",
++                     ret, count->data_length);
++              return -EPIPE;
+       }
++      pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
+ 
+-      return total_tx;
++      return ret;
+ }
+ 
+ int rx_data(
+diff --git a/drivers/target/loopback/tcm_loop.c 
b/drivers/target/loopback/tcm_loop.c
+index fadad7c5f635..67c802c93ef3 100644
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device 
*sdev, int tag)
+ /*
+  * Locate the SAM Task Attr from struct scsi_cmnd *
+  */
+-static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
+-{
+-      if (sc->device->tagged_supported) {
+-              switch (sc->tag) {
+-              case HEAD_OF_QUEUE_TAG:
+-                      return MSG_HEAD_TAG;
+-              case ORDERED_QUEUE_TAG:
+-                      return MSG_ORDERED_TAG;
+-              default:
+-                      break;
+-              }
+-      }
++static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
++{
++      if (sc->device->tagged_supported &&
++          sc->device->ordered_tags && tag >= 0)
++              return MSG_ORDERED_TAG;
+ 
+       return MSG_SIMPLE_TAG;
+ }
+@@ -197,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct 
*work)
+               set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+               goto out_done;
+       }
+-      tl_nexus = tl_hba->tl_nexus;
++      tl_nexus = tl_tpg->tl_nexus;
+       if (!tl_nexus) {
+               scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+                               " does not exist\n");
+@@ -214,7 +207,7 @@ static void tcm_loop_submission_work(struct work_struct 
*work)
+       }
+       rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
+                       &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
+-                      scsi_bufflen(sc), tcm_loop_sam_attr(sc),
++                      scsi_bufflen(sc), tcm_loop_sam_attr(sc, 
tl_cmd->sc_cmd_tag),
+                       sc->sc_data_direction, 0,
+                       scsi_sglist(sc), scsi_sg_count(sc),
+                       sgl_bidi, sgl_bidi_count,
+@@ -252,7 +245,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, 
struct scsi_cmnd *sc)
+       }
+ 
+       tl_cmd->sc = sc;
+-      tl_cmd->sc_cmd_tag = sc->tag;
++      tl_cmd->sc_cmd_tag = sc->request->tag;
+       INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
+       queue_work(tcm_loop_workqueue, &tl_cmd->work);
+       return 0;
+@@ -263,16 +256,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, 
struct scsi_cmnd *sc)
+  * to struct scsi_device
+  */
+ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+-                            struct tcm_loop_nexus *tl_nexus,
+                             int lun, int task, enum tcm_tmreq_table tmr)
+ {
+       struct se_cmd *se_cmd = NULL;
+       struct se_session *se_sess;
+       struct se_portal_group *se_tpg;
++      struct tcm_loop_nexus *tl_nexus;
+       struct tcm_loop_cmd *tl_cmd = NULL;
+       struct tcm_loop_tmr *tl_tmr = NULL;
+       int ret = TMR_FUNCTION_FAILED, rc;
+ 
++      /*
++       * Locate the tl_nexus and se_sess pointers
++       */
++      tl_nexus = tl_tpg->tl_nexus;
++      if (!tl_nexus) {
++              pr_err("Unable to perform device reset without"
++                              " active I_T Nexus\n");
++              return ret;
++      }
++
+       tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+       if (!tl_cmd) {
+               pr_err("Unable to allocate memory for tl_cmd\n");
+@@ -288,7 +291,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+ 
+       se_cmd = &tl_cmd->tl_se_cmd;
+       se_tpg = &tl_tpg->tl_se_tpg;
+-      se_sess = tl_nexus->se_sess;
++      se_sess = tl_tpg->tl_nexus->se_sess;
+       /*
+        * Initialize struct se_cmd descriptor from target_core_mod 
infrastructure
+        */
+@@ -333,7 +336,6 @@ release:
+ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+ {
+       struct tcm_loop_hba *tl_hba;
+-      struct tcm_loop_nexus *tl_nexus;
+       struct tcm_loop_tpg *tl_tpg;
+       int ret = FAILED;
+ 
+@@ -341,22 +343,9 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+        * Locate the tcm_loop_hba_t pointer
+        */
+       tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+-      /*
+-       * Locate the tl_nexus and se_sess pointers
+-       */
+-      tl_nexus = tl_hba->tl_nexus;
+-      if (!tl_nexus) {
+-              pr_err("Unable to perform device reset without"
+-                              " active I_T Nexus\n");
+-              return FAILED;
+-      }
+-
+-      /*
+-       * Locate the tl_tpg pointer from TargetID in sc->device->id
+-       */
+       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+-      ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+-                               sc->tag, TMR_ABORT_TASK);
++      ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
++                               sc->request->tag, TMR_ABORT_TASK);
+       return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+ }
+ 
+@@ -367,7 +356,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+ {
+       struct tcm_loop_hba *tl_hba;
+-      struct tcm_loop_nexus *tl_nexus;
+       struct tcm_loop_tpg *tl_tpg;
+       int ret = FAILED;
+ 
+@@ -375,20 +363,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+        * Locate the tcm_loop_hba_t pointer
+        */
+       tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+-      /*
+-       * Locate the tl_nexus and se_sess pointers
+-       */
+-      tl_nexus = tl_hba->tl_nexus;
+-      if (!tl_nexus) {
+-              pr_err("Unable to perform device reset without"
+-                              " active I_T Nexus\n");
+-              return FAILED;
+-      }
+-      /*
+-       * Locate the tl_tpg pointer from TargetID in sc->device->id
+-       */
+       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+-      ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
++
++      ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
+                                0, TMR_LUN_RESET);
+       return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
+ }
+@@ -995,8 +972,8 @@ static int tcm_loop_make_nexus(
+       struct tcm_loop_nexus *tl_nexus;
+       int ret = -ENOMEM;
+ 
+-      if (tl_tpg->tl_hba->tl_nexus) {
+-              pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
++      if (tl_tpg->tl_nexus) {
++              pr_debug("tl_tpg->tl_nexus already exists\n");
+               return -EEXIST;
+       }
+       se_tpg = &tl_tpg->tl_se_tpg;
+@@ -1031,7 +1008,7 @@ static int tcm_loop_make_nexus(
+        */
+       __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+                       tl_nexus->se_sess, tl_nexus);
+-      tl_tpg->tl_hba->tl_nexus = tl_nexus;
++      tl_tpg->tl_nexus = tl_nexus;
+       pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+               name);
+@@ -1047,12 +1024,8 @@ static int tcm_loop_drop_nexus(
+ {
+       struct se_session *se_sess;
+       struct tcm_loop_nexus *tl_nexus;
+-      struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+ 
+-      if (!tl_hba)
+-              return -ENODEV;
+-
+-      tl_nexus = tl_hba->tl_nexus;
++      tl_nexus = tpg->tl_nexus;
+       if (!tl_nexus)
+               return -ENODEV;
+ 
+@@ -1068,13 +1041,13 @@ static int tcm_loop_drop_nexus(
+       }
+ 
+       pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+-              " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
++              " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
+               tl_nexus->se_sess->se_node_acl->initiatorname);
+       /*
+        * Release the SCSI I_T Nexus to the emulated SAS Target Port
+        */
+       transport_deregister_session(tl_nexus->se_sess);
+-      tpg->tl_hba->tl_nexus = NULL;
++      tpg->tl_nexus = NULL;
+       kfree(tl_nexus);
+       return 0;
+ }
+@@ -1090,7 +1063,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
+       struct tcm_loop_nexus *tl_nexus;
+       ssize_t ret;
+ 
+-      tl_nexus = tl_tpg->tl_hba->tl_nexus;
++      tl_nexus = tl_tpg->tl_nexus;
+       if (!tl_nexus)
+               return -ENODEV;
+ 
+diff --git a/drivers/target/loopback/tcm_loop.h 
b/drivers/target/loopback/tcm_loop.h
+index 54c59d0b6608..6ae49f272ba6 100644
+--- a/drivers/target/loopback/tcm_loop.h
++++ b/drivers/target/loopback/tcm_loop.h
+@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
+ };
+ 
+ struct tcm_loop_nexus {
+-      int it_nexus_active;
+-      /*
+-       * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+-       */
+-      struct scsi_host *sh;
+       /*
+        * Pointer to TCM session for I_T Nexus
+        */
+@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
+       atomic_t tl_tpg_port_count;
+       struct se_portal_group tl_se_tpg;
+       struct tcm_loop_hba *tl_hba;
++      struct tcm_loop_nexus *tl_nexus;
+ };
+ 
+ struct tcm_loop_hba {
+@@ -59,7 +55,6 @@ struct tcm_loop_hba {
+       struct se_hba_s *se_hba;
+       struct se_lun *tl_hba_lun;
+       struct se_port *tl_hba_lun_sep;
+-      struct tcm_loop_nexus *tl_nexus;
+       struct device dev;
+       struct Scsi_Host *sh;
+       struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+diff --git a/drivers/thermal/intel_powerclamp.c 
b/drivers/thermal/intel_powerclamp.c
+index a084325f1386..6e75177915fa 100644
+--- a/drivers/thermal/intel_powerclamp.c
++++ b/drivers/thermal/intel_powerclamp.c
+@@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
+                * allowed. thus jiffies are updated properly.
+                */
+               preempt_disable();
+-              tick_nohz_idle_enter();
+               /* mwait until target jiffies is reached */
+               while (time_before(jiffies, target_jiffies)) {
+                       unsigned long ecx = 1;
+@@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
+                       start_critical_timings();
+                       atomic_inc(&idle_wakeup_counter);
+               }
+-              tick_nohz_idle_exit();
+               preempt_enable();
+       }
+       del_timer_sync(&wakeup_timer);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d90c70c23adb..8f6738d46b14 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -887,8 +887,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool 
starting)
+ 
+                               if (i == (request->num_mapped_sgs - 1) ||
+                                               sg_is_last(s)) {
+-                                      if (list_is_last(&req->list,
+-                                                      &dep->request_list))
++                                      if (list_empty(&dep->request_list))
+                                               last_one = true;
+                                       chain = false;
+                               }
+@@ -906,6 +905,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool 
starting)
+                               if (last_one)
+                                       break;
+                       }
++
++                      if (last_one)
++                              break;
+               } else {
+                       dma = req->request.dma;
+                       length = req->request.length;
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index e113fd73aeae..c399606f154e 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1581,6 +1581,10 @@ iso_stream_schedule (
+       else
+               next = (now + 2 + 7) & ~0x07;   /* full frame cache */
+ 
++      /* If needed, initialize last_iso_frame so that this URB will be seen */
++      if (ehci->isoc_count == 0)
++              ehci->last_iso_frame = now >> 3;
++
+       /*
+        * Use ehci->last_iso_frame as the base.  There can't be any
+        * TDs scheduled for earlier than that.
+@@ -1671,10 +1675,6 @@ iso_stream_schedule (
+       urb->start_frame = start & (mod - 1);
+       if (!stream->highspeed)
+               urb->start_frame >>= 3;
+-
+-      /* Make sure scan_isoc() sees these */
+-      if (ehci->isoc_count == 0)
+-              ehci->last_iso_frame = now >> 3;
+       return status;
+ 
+  fail:
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 2f3acebb577a..f4e6b945136c 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -571,7 +571,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ {
+       void __iomem *base;
+       u32 control;
+-      u32 fminterval;
++      u32 fminterval = 0;
++      bool no_fminterval = false;
+       int cnt;
+ 
+       if (!mmio_resource_enabled(pdev, 0))
+@@ -581,6 +582,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+       if (base == NULL)
+               return;
+ 
++      /*
++       * ULi M5237 OHCI controller locks the whole system when accessing
++       * the OHCI_FMINTERVAL offset.
++       */
++      if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
++              no_fminterval = true;
++
+       control = readl(base + OHCI_CONTROL);
+ 
+ /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
+@@ -619,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+       }
+ 
+       /* software reset of the controller, preserving HcFmInterval */
+-      fminterval = readl(base + OHCI_FMINTERVAL);
++      if (!no_fminterval)
++              fminterval = readl(base + OHCI_FMINTERVAL);
++
+       writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+ 
+       /* reset requires max 10 us delay */
+@@ -628,7 +638,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
+                       break;
+               udelay(1);
+       }
+-      writel(fminterval, base + OHCI_FMINTERVAL);
++
++      if (!no_fminterval)
++              writel(fminterval, base + OHCI_FMINTERVAL);
+ 
+       /* Now the controller is safely in SUSPEND and nothing can wake it up */
+       iounmap(base);
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index abb38c3833ef..6b0fb6af6815 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -2640,7 +2640,6 @@ void musb_host_cleanup(struct musb *musb)
+       if (musb->port_mode == MUSB_PORT_MODE_GADGET)
+               return;
+       usb_remove_hcd(musb->hcd);
+-      musb->hcd = NULL;
+ }
+ 
+ void musb_host_free(struct musb *musb)
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 8d7fc48b1f30..29fa1c3d0089 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -46,6 +46,8 @@ static struct console usbcons;
+  * ------------------------------------------------------------
+  */
+ 
++static const struct tty_operations usb_console_fake_tty_ops = {
++};
+ 
+ /*
+  * The parsing of the command line works exactly like the
+@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char 
*options)
+                               goto reset_open_count;
+                       }
+                       kref_init(&tty->kref);
+-                      tty_port_tty_set(&port->port, tty);
+                       tty->driver = usb_serial_tty_driver;
+                       tty->index = co->index;
++                      init_ldsem(&tty->ldisc_sem);
++                      INIT_LIST_HEAD(&tty->tty_files);
++                      kref_get(&tty->driver->kref);
++                      tty->ops = &usb_console_fake_tty_ops;
+                       if (tty_init_termios(tty)) {
+                               retval = -ENOMEM;
+-                              goto free_tty;
++                              goto put_tty;
+                       }
++                      tty_port_tty_set(&port->port, tty);
+               }
+ 
+               /* only call the device specific open if this
+@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char 
*options)
+                       serial->type->set_termios(tty, port, &dummy);
+ 
+                       tty_port_tty_set(&port->port, NULL);
+-                      kfree(tty);
++                      tty_kref_put(tty);
+               }
+               set_bit(ASYNCB_INITIALIZED, &port->port.flags);
+       }
+@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char 
*options)
+ 
+  fail:
+       tty_port_tty_set(&port->port, NULL);
+- free_tty:
+-      kfree(tty);
++ put_tty:
++      tty_kref_put(tty);
+  reset_open_count:
+       port->port.count = 0;
+       usb_autopm_put_interface(serial->interface);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 5741e9405069..9e8708c5cbfa 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+       { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+       { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+-      { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
++      { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
++      { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
+       { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB 
Device */
+       { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
++      { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
+       { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index 49101fe45d38..35297a845a63 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -421,6 +421,8 @@ static void        usa26_instat_callback(struct urb *urb)
+       }
+       port = serial->port[msg->port];
+       p_priv = usb_get_serial_port_data(port);
++      if (!p_priv)
++              goto resubmit;
+ 
+       /* Update handshaking pin state information */
+       old_dcd_state = p_priv->dcd_state;
+@@ -431,7 +433,7 @@ static void        usa26_instat_callback(struct urb *urb)
+ 
+       if (old_dcd_state != p_priv->dcd_state)
+               tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+       /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err != 0)
+@@ -541,6 +543,8 @@ static void        usa28_instat_callback(struct urb *urb)
+       }
+       port = serial->port[msg->port];
+       p_priv = usb_get_serial_port_data(port);
++      if (!p_priv)
++              goto resubmit;
+ 
+       /* Update handshaking pin state information */
+       old_dcd_state = p_priv->dcd_state;
+@@ -551,7 +555,7 @@ static void        usa28_instat_callback(struct urb *urb)
+ 
+       if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+               tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+               /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err != 0)
+@@ -624,6 +628,8 @@ static void        usa49_instat_callback(struct urb *urb)
+       }
+       port = serial->port[msg->portNumber];
+       p_priv = usb_get_serial_port_data(port);
++      if (!p_priv)
++              goto resubmit;
+ 
+       /* Update handshaking pin state information */
+       old_dcd_state = p_priv->dcd_state;
+@@ -634,7 +640,7 @@ static void        usa49_instat_callback(struct urb *urb)
+ 
+       if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+               tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+       /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err != 0)
+@@ -872,6 +878,8 @@ static void        usa90_instat_callback(struct urb *urb)
+ 
+       port = serial->port[0];
+       p_priv = usb_get_serial_port_data(port);
++      if (!p_priv)
++              goto resubmit;
+ 
+       /* Update handshaking pin state information */
+       old_dcd_state = p_priv->dcd_state;
+@@ -882,7 +890,7 @@ static void        usa90_instat_callback(struct urb *urb)
+ 
+       if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+               tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+       /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err != 0)
+@@ -943,6 +951,8 @@ static void        usa67_instat_callback(struct urb *urb)
+ 
+       port = serial->port[msg->port];
+       p_priv = usb_get_serial_port_data(port);
++      if (!p_priv)
++              goto resubmit;
+ 
+       /* Update handshaking pin state information */
+       old_dcd_state = p_priv->dcd_state;
+@@ -951,7 +961,7 @@ static void        usa67_instat_callback(struct urb *urb)
+ 
+       if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
+               tty_port_tty_hangup(&port->port, true);
+-
++resubmit:
+       /* Resubmit urb so we continue receiving */
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err != 0)
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 7ba042498857..75e1d03b8da3 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -810,13 +810,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
+ 
+ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id 
*id)
+ {
+-      u8 type;
+       struct vfio_pci_device *vdev;
+       struct iommu_group *group;
+       int ret;
+ 
+-      pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
+-      if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
++      if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+               return -EINVAL;
+ 
+       group = iommu_group_get(&pdev->dev);
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 5d0b7b846440..486d710a5293 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -861,6 +861,23 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
+       return 0;
+ }
+ 
++static int vhost_scsi_to_tcm_attr(int attr)
++{
++      switch (attr) {
++      case VIRTIO_SCSI_S_SIMPLE:
++              return MSG_SIMPLE_TAG;
++      case VIRTIO_SCSI_S_ORDERED:
++              return MSG_ORDERED_TAG;
++      case VIRTIO_SCSI_S_HEAD:
++              return MSG_HEAD_TAG;
++      case VIRTIO_SCSI_S_ACA:
++              return MSG_ACA_TAG;
++      default:
++              break;
++      }
++      return MSG_SIMPLE_TAG;
++}
++
+ static void tcm_vhost_submission_work(struct work_struct *work)
+ {
+       struct tcm_vhost_cmd *cmd =
+@@ -887,9 +904,10 @@ static void tcm_vhost_submission_work(struct work_struct 
*work)
+       rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
+                       cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
+                       cmd->tvc_lun, cmd->tvc_exp_data_len,
+-                      cmd->tvc_task_attr, cmd->tvc_data_direction,
+-                      TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
+-                      sg_bidi_ptr, sg_no_bidi, NULL, 0);
++                      vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
++                      cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
++                      sg_ptr, cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi,
++                      NULL, 0);
+       if (rc < 0) {
+               transport_send_check_condition_and_sense(se_cmd,
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
+index b670cbda38e3..ffe024b830fc 100644
+--- a/drivers/video/logo/logo.c
++++ b/drivers/video/logo/logo.c
+@@ -21,6 +21,21 @@ static bool nologo;
+ module_param(nologo, bool, 0);
+ MODULE_PARM_DESC(nologo, "Disables startup logo");
+ 
++/*
++ * Logos are located in the initdata, and will be freed in kernel_init.
++ * Use late_init to mark the logos as freed to prevent any further use.
++ */
++
++static bool logos_freed;
++
++static int __init fb_logo_late_init(void)
++{
++      logos_freed = true;
++      return 0;
++}
++
++late_initcall(fb_logo_late_init);
++
+ /* logo's are marked __initdata. Use __init_refok to tell
+  * modpost that it is intended that this function uses data
+  * marked __initdata.
+@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int 
depth)
+ {
+       const struct linux_logo *logo = NULL;
+ 
+-      if (nologo)
++      if (nologo || logos_freed)
+               return NULL;
+ 
+       if (depth >= 1) {
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 223e1cb14345..59a53f664005 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -137,10 +137,6 @@ lockd(void *vrqstp)
+ 
+       dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
+ 
+-      if (!nlm_timeout)
+-              nlm_timeout = LOCKD_DFLT_TIMEO;
+-      nlmsvc_timeout = nlm_timeout * HZ;
+-
+       /*
+        * The main request loop. We don't terminate until the last
+        * NFS mount or NFS daemon has gone away.
+@@ -346,6 +342,10 @@ static struct svc_serv *lockd_create_svc(void)
+               printk(KERN_WARNING
+                       "lockd_up: no pid, %d users??\n", nlmsvc_users);
+ 
++      if (!nlm_timeout)
++              nlm_timeout = LOCKD_DFLT_TIMEO;
++      nlmsvc_timeout = nlm_timeout * HZ;
++
+       serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
+       if (!serv) {
+               printk(KERN_WARNING "lockd_up: create service failed\n");
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 1abe4f55dea2..037f9572b94c 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -565,20 +565,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, 
struct nfs_client *b)
+ }
+ 
+ /*
+- * Returns true if the server owners match
++ * Returns true if the server major ids match
+  */
+ static bool
+-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
++nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
+ {
+       struct nfs41_server_owner *o1 = a->cl_serverowner;
+       struct nfs41_server_owner *o2 = b->cl_serverowner;
+ 
+-      if (o1->minor_id != o2->minor_id) {
+-              dprintk("NFS: --> %s server owner minor IDs do not match\n",
+-                      __func__);
+-              return false;
+-      }
+-
+       if (o1->major_id_sz != o2->major_id_sz)
+               goto out_major_mismatch;
+       if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
+@@ -654,7 +648,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
+               if (!nfs4_match_clientids(pos, new))
+                       continue;
+ 
+-              if (!nfs4_match_serverowners(pos, new))
++              /*
++               * Note that session trunking is just a special subcase of
++               * client id trunking. In either case, we want to fall back
++               * to using the existing nfs_client.
++               */
++              if (!nfs4_check_clientid_trunking(pos, new))
+                       continue;
+ 
+               atomic_inc(&pos->cl_count);
+diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
+index 74825be65b7b..fbb9dfb7b1d2 100644
+--- a/fs/notify/inode_mark.c
++++ b/fs/notify/inode_mark.c
+@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
+               spin_unlock(&inode->i_lock);
+ 
+               /* In case the dropping of a reference would nuke next_i. */
+-              if ((&next_i->i_sb_list != list) &&
+-                  atomic_read(&next_i->i_count)) {
++              while (&next_i->i_sb_list != list) {
+                       spin_lock(&next_i->i_lock);
+-                      if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
++                      if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
++                                              atomic_read(&next_i->i_count)) {
+                               __iget(next_i);
+                               need_iput = next_i;
++                              spin_unlock(&next_i->i_lock);
++                              break;
+                       }
+                       spin_unlock(&next_i->i_lock);
++                      next_i = list_entry(next_i->i_sb_list.next,
++                                              struct inode, i_sb_list);
+               }
+ 
+               /*
+-               * We can safely drop inode_sb_list_lock here because we hold
+-               * references on both inode and next_i.  Also no new inodes
+-               * will be added since the umount has begun.
++               * We can safely drop inode_sb_list_lock here because either
++               * we actually hold references on both inode and next_i or
++               * end of list.  Also no new inodes will be added since the
++               * umount has begun.
+                */
+               spin_unlock(&inode_sb_list_lock);
+ 
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 6f599c62f0cc..dbd027235440 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
+ 
+       /* sum again ? it could be updated? */
+       for_each_irq_nr(j)
+-              seq_put_decimal_ull(p, ' ', kstat_irqs(j));
++              seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
+ 
+       seq_printf(p,
+               "\nctxt %llu\n"
+diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
+index 51c72be4a7c3..4b2053a232c9 100644
+--- a/include/linux/kernel_stat.h
++++ b/include/linux/kernel_stat.h
+@@ -74,6 +74,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int 
irq, int cpu)
+  * Number of interrupts per specific IRQ source, since bootup
+  */
+ extern unsigned int kstat_irqs(unsigned int irq);
++extern unsigned int kstat_irqs_usr(unsigned int irq);
+ 
+ /*
+  * Number of interrupts per cpu, since bootup
+diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
+index e9a1d2d973b6..4c399ae04677 100644
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
+ /*
+  *    IPV6 socket options
+  */
+-
++#if __UAPI_DEF_IPV6_OPTIONS
+ #define IPV6_ADDRFORM         1
+ #define IPV6_2292PKTINFO      2
+ #define IPV6_2292HOPOPTS      3
+@@ -192,6 +192,7 @@ struct in6_flowlabel_req {
+ 
+ #define IPV6_IPSEC_POLICY     34
+ #define IPV6_XFRM_POLICY      35
++#endif
+ 
+ /*
+  * Multicast:
+diff --git a/include/uapi/linux/libc-compat.h 
b/include/uapi/linux/libc-compat.h
+index c140620dad92..e28807ad17fa 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -69,6 +69,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               0
+ #define __UAPI_DEF_IPV6_MREQ          0
+ #define __UAPI_DEF_IPPROTO_V6         0
++#define __UAPI_DEF_IPV6_OPTIONS               0
+ 
+ #else
+ 
+@@ -82,6 +83,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               1
+ #define __UAPI_DEF_IPV6_MREQ          1
+ #define __UAPI_DEF_IPPROTO_V6         1
++#define __UAPI_DEF_IPV6_OPTIONS               1
+ 
+ #endif /* _NETINET_IN_H */
+ 
+@@ -103,6 +105,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               1
+ #define __UAPI_DEF_IPV6_MREQ          1
+ #define __UAPI_DEF_IPPROTO_V6         1
++#define __UAPI_DEF_IPV6_OPTIONS               1
+ 
+ /* Definitions for xattr.h */
+ #define __UAPI_DEF_XATTR              1
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index 001fa5bab490..8a160e8a44e8 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -74,6 +74,14 @@ extern void irq_percpu_disable(struct irq_desc *desc, 
unsigned int cpu);
+ extern void mask_irq(struct irq_desc *desc);
+ extern void unmask_irq(struct irq_desc *desc);
+ 
++#ifdef CONFIG_SPARSE_IRQ
++extern void irq_lock_sparse(void);
++extern void irq_unlock_sparse(void);
++#else
++static inline void irq_lock_sparse(void) { }
++static inline void irq_unlock_sparse(void) { }
++#endif
++
+ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+ 
+ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction 
*action);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 8ab8e9390297..07d45516b540 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -131,6 +131,16 @@ static void free_masks(struct irq_desc *desc)
+ static inline void free_masks(struct irq_desc *desc) { }
+ #endif
+ 
++void irq_lock_sparse(void)
++{
++      mutex_lock(&sparse_irq_lock);
++}
++
++void irq_unlock_sparse(void)
++{
++      mutex_unlock(&sparse_irq_lock);
++}
++
+ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
+ {
+       struct irq_desc *desc;
+@@ -167,6 +177,12 @@ static void free_desc(unsigned int irq)
+ 
+       unregister_irq_proc(irq, desc);
+ 
++      /*
++       * sparse_irq_lock protects also show_interrupts() and
++       * kstat_irq_usr(). Once we deleted the descriptor from the
++       * sparse tree we can free it. Access in proc will fail to
++       * lookup the descriptor.
++       */
+       mutex_lock(&sparse_irq_lock);
+       delete_irq_desc(irq);
+       mutex_unlock(&sparse_irq_lock);
+@@ -489,6 +505,15 @@ void dynamic_irq_cleanup(unsigned int irq)
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+ 
++/**
++ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
++ * @irq:      The interrupt number
++ * @cpu:      The cpu number
++ *
++ * Returns the sum of interrupt counts on @cpu since boot for
++ * @irq. The caller must ensure that the interrupt is not removed
++ * concurrently.
++ */
+ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+ {
+       struct irq_desc *desc = irq_to_desc(irq);
+@@ -497,6 +522,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+                       *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+ }
+ 
++/**
++ * kstat_irqs - Get the statistics for an interrupt
++ * @irq:      The interrupt number
++ *
++ * Returns the sum of interrupt counts on all cpus since boot for
++ * @irq. The caller must ensure that the interrupt is not removed
++ * concurrently.
++ */
+ unsigned int kstat_irqs(unsigned int irq)
+ {
+       struct irq_desc *desc = irq_to_desc(irq);
+@@ -509,3 +542,22 @@ unsigned int kstat_irqs(unsigned int irq)
+               sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
+       return sum;
+ }
++
++/**
++ * kstat_irqs_usr - Get the statistics for an interrupt
++ * @irq:      The interrupt number
++ *
++ * Returns the sum of interrupt counts on all cpus since boot for
++ * @irq. Contrary to kstat_irqs() this can be called from any
++ * preemptible context. It's protected against concurrent removal of
++ * an interrupt descriptor when sparse irqs are enabled.
++ */
++unsigned int kstat_irqs_usr(unsigned int irq)
++{
++      int sum;
++
++      irq_lock_sparse();
++      sum = kstat_irqs(irq);
++      irq_unlock_sparse();
++      return sum;
++}
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index 36f6ee181b0c..095cd7230aef 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -15,6 +15,23 @@
+ 
+ #include "internals.h"
+ 
++/*
++ * Access rules:
++ *
++ * procfs protects read/write of /proc/irq/N/ files against a
++ * concurrent free of the interrupt descriptor. remove_proc_entry()
++ * immediately prevents new read/writes to happen and waits for
++ * already running read/write functions to complete.
++ *
++ * We remove the proc entries first and then delete the interrupt
++ * descriptor from the radix tree and free it. So it is guaranteed
++ * that irq_to_desc(N) is valid as long as the read/writes are
++ * permitted by procfs.
++ *
++ * The read from /proc/interrupts is a different problem because there
++ * is no protection. So the lookup and the access to irqdesc
++ * information must be protected by sparse_irq_lock.
++ */
+ static struct proc_dir_entry *root_irq_dir;
+ 
+ #ifdef CONFIG_SMP
+@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
+               seq_putc(p, '\n');
+       }
+ 
++      irq_lock_sparse();
+       desc = irq_to_desc(i);
+       if (!desc)
+-              return 0;
++              goto outsparse;
+ 
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       for_each_online_cpu(j)
+@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
+       seq_putc(p, '\n');
+ out:
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
++outsparse:
++      irq_unlock_sparse();
+       return 0;
+ }
+ #endif
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 6558b7ac112d..8c08a6f9cca0 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -807,7 +807,6 @@ void tick_nohz_idle_enter(void)
+ 
+       local_irq_enable();
+ }
+-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
+ 
+ /**
+  * tick_nohz_irq_exit - update next tick event from interrupt exit
+@@ -934,7 +933,6 @@ void tick_nohz_idle_exit(void)
+ 
+       local_irq_enable();
+ }
+-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
+ 
+ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
+ {
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 31c5f7675fbf..f504027d66a8 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
+       if (get_bits(bd, 1))
+               return RETVAL_OBSOLETE_INPUT;
+       origPtr = get_bits(bd, 24);
+-      if (origPtr > dbufSize)
++      if (origPtr >= dbufSize)
+               return RETVAL_DATA_ERROR;
+       /* mapping table: if some byte values are never used (encoding things
+          like ascii text), the compression code removes the gaps to have fewer
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index c46387a46535..e5c5f573c0d4 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct 
sk_buff *skb)
+       kfree(entry);
+ 
+       /* Make room for the rest of the fragments. */
+-      if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
++      if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
+               kfree_skb(skb_out);
+               skb_out = NULL;
+               goto free;
+@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
+        * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+        */
+       mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+-      max_fragment_size = (mtu - header_size - ETH_HLEN);
++      max_fragment_size = mtu - header_size;
+       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+ 
+       /* Don't even try to fragment, if we need more than 16 fragments */
+diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
+index 36b9ae61f5e8..2393ea72d65f 100644
+--- a/net/batman-adv/gateway_client.c
++++ b/net/batman-adv/gateway_client.c
+@@ -812,7 +812,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+               goto out;
+ 
+       gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+-      if (!gw_node->bandwidth_down == 0)
++      if (!gw_node)
+               goto out;
+ 
+       switch (atomic_read(&bat_priv->gw_mode)) {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3ed11a555834..86bb9cc81f02 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1709,6 +1709,7 @@ int dev_forward_skb(struct net_device *dev, struct 
sk_buff *skb)
+ 
+       skb_scrub_packet(skb, true);
+       skb->protocol = eth_type_trans(skb, dev);
++      skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ 
+       return netif_rx_internal(skb);
+ }
+@@ -2529,11 +2530,14 @@ netdev_features_t netif_skb_dev_features(struct 
sk_buff *skb,
+       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+               features &= ~NETIF_F_GSO_MASK;
+ 
+-      if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
+-              struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+-              protocol = veh->h_vlan_encapsulated_proto;
+-      } else if (!vlan_tx_tag_present(skb)) {
+-              return harmonize_features(skb, dev, features);
++      if (!vlan_tx_tag_present(skb)) {
++              if (unlikely(protocol == htons(ETH_P_8021Q) ||
++                           protocol == htons(ETH_P_8021AD))) {
++                      struct vlan_ethhdr *veh = (struct vlan_ethhdr 
*)skb->data;
++                      protocol = veh->h_vlan_encapsulated_proto;
++              } else {
++                      return harmonize_features(skb, dev, features);
++              }
+       }
+ 
+       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+@@ -4701,9 +4705,14 @@ static void netdev_adjacent_sysfs_del(struct net_device 
*dev,
+       sysfs_remove_link(&(dev->dev.kobj), linkname);
+ }
+ 
+-#define netdev_adjacent_is_neigh_list(dev, dev_list) \
+-              (dev_list == &dev->adj_list.upper || \
+-               dev_list == &dev->adj_list.lower)
++static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
++                                               struct net_device *adj_dev,
++                                               struct list_head *dev_list)
++{
++      return (dev_list == &dev->adj_list.upper ||
++              dev_list == &dev->adj_list.lower) &&
++              net_eq(dev_net(dev), dev_net(adj_dev));
++}
+ 
+ static int __netdev_adjacent_dev_insert(struct net_device *dev,
+                                       struct net_device *adj_dev,
+@@ -4733,7 +4742,7 @@ static int __netdev_adjacent_dev_insert(struct 
net_device *dev,
+       pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
+ 
+-      if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
++      if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
+               ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
+               if (ret)
+                       goto free_adj;
+@@ -4754,7 +4763,7 @@ static int __netdev_adjacent_dev_insert(struct 
net_device *dev,
+       return 0;
+ 
+ remove_symlinks:
+-      if (netdev_adjacent_is_neigh_list(dev, dev_list))
++      if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
+               netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
+ free_adj:
+       kfree(adj);
+@@ -4787,7 +4796,7 @@ static void __netdev_adjacent_dev_remove(struct 
net_device *dev,
+       if (adj->master)
+               sysfs_remove_link(&(dev->dev.kobj), "master");
+ 
+-      if (netdev_adjacent_is_neigh_list(dev, dev_list))
++      if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
+               netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
+ 
+       list_del_rcu(&adj->list);
+@@ -5057,11 +5066,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
+ }
+ EXPORT_SYMBOL(netdev_upper_dev_unlink);
+ 
++void netdev_adjacent_add_links(struct net_device *dev)
++{
++      struct netdev_adjacent *iter;
++
++      struct net *net = dev_net(dev);
++
++      list_for_each_entry(iter, &dev->adj_list.upper, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
++              netdev_adjacent_sysfs_add(iter->dev, dev,
++                                        &iter->dev->adj_list.lower);
++              netdev_adjacent_sysfs_add(dev, iter->dev,
++                                        &dev->adj_list.upper);
++      }
++
++      list_for_each_entry(iter, &dev->adj_list.lower, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
++              netdev_adjacent_sysfs_add(iter->dev, dev,
++                                        &iter->dev->adj_list.upper);
++              netdev_adjacent_sysfs_add(dev, iter->dev,
++                                        &dev->adj_list.lower);
++      }
++}
++
++void netdev_adjacent_del_links(struct net_device *dev)
++{
++      struct netdev_adjacent *iter;
++
++      struct net *net = dev_net(dev);
++
++      list_for_each_entry(iter, &dev->adj_list.upper, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
++              netdev_adjacent_sysfs_del(iter->dev, dev->name,
++                                        &iter->dev->adj_list.lower);
++              netdev_adjacent_sysfs_del(dev, iter->dev->name,
++                                        &dev->adj_list.upper);
++      }
++
++      list_for_each_entry(iter, &dev->adj_list.lower, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
++              netdev_adjacent_sysfs_del(iter->dev, dev->name,
++                                        &iter->dev->adj_list.upper);
++              netdev_adjacent_sysfs_del(dev, iter->dev->name,
++                                        &dev->adj_list.lower);
++      }
++}
++
+ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
+ {
+       struct netdev_adjacent *iter;
+ 
++      struct net *net = dev_net(dev);
++
+       list_for_each_entry(iter, &dev->adj_list.upper, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
+               netdev_adjacent_sysfs_del(iter->dev, oldname,
+                                         &iter->dev->adj_list.lower);
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+@@ -5069,6 +5132,8 @@ void netdev_adjacent_rename_links(struct net_device 
*dev, char *oldname)
+       }
+ 
+       list_for_each_entry(iter, &dev->adj_list.lower, list) {
++              if (!net_eq(net,dev_net(iter->dev)))
++                      continue;
+               netdev_adjacent_sysfs_del(iter->dev, oldname,
+                                         &iter->dev->adj_list.upper);
+               netdev_adjacent_sysfs_add(iter->dev, dev,
+@@ -6675,6 +6740,7 @@ int dev_change_net_namespace(struct net_device *dev, 
struct net *net, const char
+ 
+       /* Send a netdev-removed uevent to the old namespace */
+       kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
++      netdev_adjacent_del_links(dev);
+ 
+       /* Actually switch the network namespace */
+       dev_net_set(dev, net);
+@@ -6689,6 +6755,7 @@ int dev_change_net_namespace(struct net_device *dev, 
struct net *net, const char
+ 
+       /* Send a netdev-add uevent to the new namespace */
+       kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
++      netdev_adjacent_add_links(dev);
+ 
+       /* Fixup kobjects */
+       err = device_rename(&dev->dev, dev->name);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index baf6fc457df9..e2b1bba69882 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3937,6 +3937,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+       skb->local_df = 0;
+       skb_dst_drop(skb);
+       skb->mark = 0;
++      skb_init_secmark(skb);
+       secpath_reset(skb);
+       nf_reset(skb);
+       nf_reset_trace(skb);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 94213c891565..b40b90d3bd2b 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -250,10 +250,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       const struct iphdr *tnl_params;
+ 
+-      skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+-      if (IS_ERR(skb))
+-              goto out;
+-
+       if (dev->header_ops) {
+               /* Need space for new headers */
+               if (skb_cow_head(skb, dev->needed_headroom -
+@@ -266,6 +262,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+                * to gre header.
+                */
+               skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++              skb_reset_mac_header(skb);
+       } else {
+               if (skb_cow_head(skb, dev->needed_headroom))
+                       goto free_skb;
+@@ -273,6 +270,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+               tnl_params = &tunnel->parms.iph;
+       }
+ 
++      skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
++      if (IS_ERR(skb))
++              goto out;
++
+       __gre_xmit(skb, dev, tnl_params, skb->protocol);
+ 
+       return NETDEV_TX_OK;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 91b98e5a17aa..7efa26bb872c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1894,7 +1894,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
+               if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
+                       break;
+ 
+-              if (tso_segs == 1) {
++              if (tso_segs == 1 || !sk->sk_gso_max_segs) {
+                       if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
+                                                    (tcp_skb_is_last(sk, skb) ?
+                                                     nonagle : 
TCP_NAGLE_PUSH))))
+@@ -1931,7 +1931,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
+               }
+ 
+               limit = mss_now;
+-              if (tso_segs > 1 && !tcp_urg_mode(tp))
++              if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
+                       limit = tcp_mss_split_point(sk, skb, mss_now,
+                                                   min_t(unsigned int,
+                                                         cwnd_quota,
+diff --git a/net/netfilter/ipset/ip_set_core.c 
b/net/netfilter/ipset/ip_set_core.c
+index cf9937743abb..53ea1644a297 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1839,6 +1839,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void 
__user *user, int *len)
+       if (*op < IP_SET_OP_VERSION) {
+               /* Check the version at the beginning of operations */
+               struct ip_set_req_version *req_version = data;
++
++              if (*len < sizeof(struct ip_set_req_version)) {
++                      ret = -EINVAL;
++                      goto done;
++              }
++
+               if (req_version->version != IPSET_PROTOCOL) {
+                       ret = -EPROTO;
+                       goto done;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 7c177bc43806..1d52506bda14 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -510,14 +510,14 @@ out:
+       return err;
+ }
+ 
+-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, 
unsigned int nm_len)
+ {
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+       struct page *p_start, *p_end;
+ 
+       /* First page is flushed through netlink_{get,set}_status */
+       p_start = pgvec_to_page(hdr + PAGE_SIZE);
+-      p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
++      p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
+       while (p_start <= p_end) {
+               flush_dcache_page(p_start);
+               p_start++;
+@@ -535,9 +535,9 @@ static enum nl_mmap_status netlink_get_status(const struct 
nl_mmap_hdr *hdr)
+ static void netlink_set_status(struct nl_mmap_hdr *hdr,
+                              enum nl_mmap_status status)
+ {
++      smp_mb();
+       hdr->nm_status = status;
+       flush_dcache_page(pgvec_to_page(hdr));
+-      smp_wmb();
+ }
+ 
+ static struct nl_mmap_hdr *
+@@ -699,24 +699,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct 
msghdr *msg,
+       struct nl_mmap_hdr *hdr;
+       struct sk_buff *skb;
+       unsigned int maxlen;
+-      bool excl = true;
+       int err = 0, len = 0;
+ 
+-      /* Netlink messages are validated by the receiver before processing.
+-       * In order to avoid userspace changing the contents of the message
+-       * after validation, the socket and the ring may only be used by a
+-       * single process, otherwise we fall back to copying.
+-       */
+-      if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+-          atomic_read(&nlk->mapped) > 1)
+-              excl = false;
+-
+       mutex_lock(&nlk->pg_vec_lock);
+ 
+       ring   = &nlk->tx_ring;
+       maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+ 
+       do {
++              unsigned int nm_len;
++
+               hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+               if (hdr == NULL) {
+                       if (!(msg->msg_flags & MSG_DONTWAIT) &&
+@@ -724,35 +716,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct 
msghdr *msg,
+                               schedule();
+                       continue;
+               }
+-              if (hdr->nm_len > maxlen) {
++
++              nm_len = ACCESS_ONCE(hdr->nm_len);
++              if (nm_len > maxlen) {
+                       err = -EINVAL;
+                       goto out;
+               }
+ 
+-              netlink_frame_flush_dcache(hdr);
++              netlink_frame_flush_dcache(hdr, nm_len);
+ 
+-              if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
+-                      skb = alloc_skb_head(GFP_KERNEL);
+-                      if (skb == NULL) {
+-                              err = -ENOBUFS;
+-                              goto out;
+-                      }
+-                      sock_hold(sk);
+-                      netlink_ring_setup_skb(skb, sk, ring, hdr);
+-                      NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
+-                      __skb_put(skb, hdr->nm_len);
+-                      netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+-                      atomic_inc(&ring->pending);
+-              } else {
+-                      skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
+-                      if (skb == NULL) {
+-                              err = -ENOBUFS;
+-                              goto out;
+-                      }
+-                      __skb_put(skb, hdr->nm_len);
+-                      memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, 
hdr->nm_len);
+-                      netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
++              skb = alloc_skb(nm_len, GFP_KERNEL);
++              if (skb == NULL) {
++                      err = -ENOBUFS;
++                      goto out;
+               }
++              __skb_put(skb, nm_len);
++              memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
++              netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+ 
+               netlink_increment_head(ring);
+ 
+@@ -798,7 +778,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, 
struct sk_buff *skb)
+       hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
+       hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+       hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+-      netlink_frame_flush_dcache(hdr);
++      netlink_frame_flush_dcache(hdr, hdr->nm_len);
+       netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+ 
+       NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
+diff --git a/net/wireless/chan.c b/net/wireless/chan.c
+index 78559b5bbd1f..27157a7801e8 100644
+--- a/net/wireless/chan.c
++++ b/net/wireless/chan.c
+@@ -516,7 +516,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ {
+       struct ieee80211_sta_ht_cap *ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap;
+-      u32 width, control_freq;
++      u32 width, control_freq, cap;
+ 
+       if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+               return false;
+@@ -554,7 +554,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+                       return false;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+-              if (!(vht_cap->cap & 
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
++              cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
++              if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+                       return false;
+       case NL80211_CHAN_WIDTH_80:
+               if (!vht_cap->vht_supported)
+@@ -565,7 +566,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
+       case NL80211_CHAN_WIDTH_160:
+               if (!vht_cap->vht_supported)
+                       return false;
+-              if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
++              cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
++              if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
++                  cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+                       return false;
+               prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
+               width = 160;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 338794ea44d1..04d530560ec8 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -1547,7 +1547,7 @@ static enum reg_request_treatment
+ reg_process_hint_driver(struct wiphy *wiphy,
+                       struct regulatory_request *driver_request)
+ {
+-      const struct ieee80211_regdomain *regd;
++      const struct ieee80211_regdomain *regd, *tmp;
+       enum reg_request_treatment treatment;
+ 
+       treatment = __reg_process_hint_driver(driver_request);
+@@ -1566,7 +1566,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
+                       kfree(driver_request);
+                       return REG_REQ_IGNORE;
+               }
++
++              tmp = get_wiphy_regdom(wiphy);
+               rcu_assign_pointer(wiphy->regd, regd);
++              rcu_free_regdom(tmp);
+       }
+ 
+ 
+@@ -1625,11 +1628,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
+                       return REG_REQ_IGNORE;
+               return REG_REQ_ALREADY_SET;
+       }
+-      /*
+-       * Two consecutive Country IE hints on the same wiphy.
+-       * This should be picked up early by the driver/stack
+-       */
+-      if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
++
++      if (regdom_changes(country_ie_request->alpha2))
+               return REG_REQ_OK;
+       return REG_REQ_ALREADY_SET;
+ }
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index c657752a420c..83bddbdb90e9 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2804,133 +2804,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+       }
+ },
+ 
+-/* Hauppauge HVR-950Q and HVR-850 */
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-850",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
+-{
+-      USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
+-      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+-                     USB_DEVICE_ID_MATCH_INT_CLASS |
+-                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+-      .bInterfaceClass = USB_CLASS_AUDIO,
+-      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+-      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+-              .vendor_name = "Hauppauge",
+-              .product_name = "HVR-950Q",
+-              .ifnum = QUIRK_ANY_INTERFACE,
+-              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+-      }
+-},
++/*
++ * Auvitek au0828 devices with audio interface.
++ * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
++ * Please notice that some drivers are DVB only, and don't need to be
++ * here. That's the case, for example, of DVICO_FUSIONHDTV7.
++ */
++
++#define AU0828_DEVICE(vid, pid, vname, pname) { \
++      USB_DEVICE_VENDOR_SPEC(vid, pid), \
++      .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
++                     USB_DEVICE_ID_MATCH_INT_CLASS | \
++                     USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
++      .bInterfaceClass = USB_CLASS_AUDIO, \
++      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
++      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
++              .vendor_name = vname, \
++              .product_name = pname, \
++              .ifnum = QUIRK_ANY_INTERFACE, \
++              .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
++      } \
++}
++
++AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
++AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
++AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
++AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
++AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
++AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 
+ /* Digidesign Mbox */
+ {

Reply via email to