diff --git a/Makefile b/Makefile
index e2b10b9..7e4eee5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 8
-SUBLEVEL = 10
+SUBLEVEL = 11
 EXTRAVERSION =
 NAME = Displaced Humerus Anterior
 
diff --git a/arch/arm/include/asm/hardware/iop3xx.h 
b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fb..ed94b1a 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
  * IOP3XX processor registers
  */
 #define IOP3XX_PERIPHERAL_PHYS_BASE    0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE    0xfedfe000
 #define IOP3XX_PERIPHERAL_SIZE         0x00002000
 #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
                                        IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index bd6f56b..59d2adb 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
 
 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
 
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
 {
        return (cyc * mult) >> shift;
 }
 
-static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
 {
        u64 epoch_ns;
        u32 epoch_cyc;
diff --git a/arch/sparc/include/asm/pgtable_64.h 
b/arch/sparc/include/asm/pgtable_64.h
index 08fcce9..7619f2f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct 
*vma,
        return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
 }
 
+#include <asm/tlbflush.h>
 #include <asm-generic/pgtable.h>
 
 /* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/switch_to_64.h 
b/arch/sparc/include/asm/switch_to_64.h
index cad36f5..c7de332 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -18,8 +18,7 @@ do {                                          \
         * and 2 stores in this critical code path.  -DaveM
         */
 #define switch_to(prev, next, last)                                    \
-do {   flush_tlb_pending();                                            \
-       save_and_clear_fpu();                                           \
+do {   save_and_clear_fpu();                                           \
        /* If you are tempted to conditionalize the following */        \
        /* so that ASI is only written if it changes, think again. */   \
        __asm__ __volatile__("wr %%g0, %0, %%asi"                       \
diff --git a/arch/sparc/include/asm/tlbflush_64.h 
b/arch/sparc/include/asm/tlbflush_64.h
index 2ef4634..f0d6a97 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -11,24 +11,40 @@
 struct tlb_batch {
        struct mm_struct *mm;
        unsigned long tlb_nr;
+       unsigned long active;
        unsigned long vaddrs[TLB_BATCH_NR];
 };
 
 extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
 extern void flush_tsb_user(struct tlb_batch *tb);
+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
 
 /* TLB flush operations. */
 
-extern void flush_tlb_pending(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+                                 unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
-#define flush_tlb_range(vma,start,end) \
-       do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr)       flush_tlb_pending()
-#define flush_tlb_mm(mm)               flush_tlb_pending()
+extern void flush_tlb_pending(void);
+extern void arch_enter_lazy_mmu_mode(void);
+extern void arch_leave_lazy_mmu_mode(void);
+#define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
 extern void __flush_tlb_all(void);
-
+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do {        flush_tsb_kernel_range(start,end); \
        __flush_tlb_kernel_range(start,end); \
 } while (0)
 
+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long 
vaddr)
+{
+       __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+}
+
 #else /* CONFIG_SMP */
 
 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
 #define flush_tlb_kernel_range(start, end) \
 do {   flush_tsb_kernel_range(start,end); \
        smp_flush_tlb_kernel_range(start, end); \
 } while (0)
 
+#define global_flush_tlb_page(mm, vaddr) \
+       smp_flush_tlb_page(mm, vaddr)
+
 #endif /* ! CONFIG_SMP */
 
 #endif /* _SPARC64_TLBFLUSH_H */
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 537eb66..ca64d2a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
 }
 
 extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_page;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_fetch_glob_regs;
 extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1074,56 @@ local_flush_and_out:
        put_cpu();
 }
 
+struct tlb_pending_info {
+       unsigned long ctx;
+       unsigned long nr;
+       unsigned long *vaddrs;
+};
+
+static void tlb_pending_func(void *info)
+{
+       struct tlb_pending_info *t = info;
+
+       __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
+}
+
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned 
long *vaddrs)
 {
        u32 ctx = CTX_HWBITS(mm->context);
+       struct tlb_pending_info info;
        int cpu = get_cpu();
 
+       info.ctx = ctx;
+       info.nr = nr;
+       info.vaddrs = vaddrs;
+
        if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
                cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
        else
-               smp_cross_call_masked(&xcall_flush_tlb_pending,
-                                     ctx, nr, (unsigned long) vaddrs,
-                                     mm_cpumask(mm));
+               smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+                                      &info, 1);
 
        __flush_tlb_pending(ctx, nr, vaddrs);
 
        put_cpu();
 }
 
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long context = CTX_HWBITS(mm->context);
+       int cpu = get_cpu();
+
+       if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+       else
+               smp_cross_call_masked(&xcall_flush_tlb_page,
+                                     context, vaddr, 0,
+                                     mm_cpumask(mm));
+       __flush_tlb_page(context, vaddr);
+
+       put_cpu();
+}
+
 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        start &= PAGE_MASK;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index ba6ae7f..83d89bc 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 void flush_tlb_pending(void)
 {
        struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+       struct mm_struct *mm = tb->mm;
 
-       if (tb->tlb_nr) {
-               flush_tsb_user(tb);
+       if (!tb->tlb_nr)
+               goto out;
 
-               if (CTX_VALID(tb->mm->context)) {
+       flush_tsb_user(tb);
+
+       if (CTX_VALID(mm->context)) {
+               if (tb->tlb_nr == 1) {
+                       global_flush_tlb_page(mm, tb->vaddrs[0]);
+               } else {
 #ifdef CONFIG_SMP
                        smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
                                              &tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
                                            tb->tlb_nr, &tb->vaddrs[0]);
 #endif
                }
-               tb->tlb_nr = 0;
        }
 
+       tb->tlb_nr = 0;
+
+out:
        put_cpu_var(tlb_batch);
 }
 
+void arch_enter_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+       if (tb->tlb_nr)
+               flush_tlb_pending();
+       tb->active = 0;
+}
+
 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
                              bool exec)
 {
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned 
long vaddr,
                nr = 0;
        }
 
+       if (!tb->active) {
+               global_flush_tlb_page(mm, vaddr);
+               flush_tsb_user_page(mm, vaddr);
+               goto out;
+       }
+
        if (nr == 0)
                tb->mm = mm;
 
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned 
long vaddr,
        if (nr >= TLB_BATCH_NR)
                flush_tlb_pending();
 
+out:
        put_cpu_var(tlb_batch);
 }
 
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 428982b..2cc3bce 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -7,11 +7,10 @@
 #include <linux/preempt.h>
 #include <linux/slab.h>
 #include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/mmu_context.h>
 #include <asm/tsb.h>
+#include <asm/tlb.h>
 #include <asm/oplib.h>
 
 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned 
long end)
        }
 }
 
-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
-                           unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+                                 unsigned long hash_shift,
+                                 unsigned long nentries)
 {
-       unsigned long i;
+       unsigned long tag, ent, hash;
 
-       for (i = 0; i < tb->tlb_nr; i++) {
-               unsigned long v = tb->vaddrs[i];
-               unsigned long tag, ent, hash;
+       v &= ~0x1UL;
+       hash = tsb_hash(v, hash_shift, nentries);
+       ent = tsb + (hash * sizeof(struct tsb));
+       tag = (v >> 22UL);
 
-               v &= ~0x1UL;
+       tsb_flush(ent, tag);
+}
 
-               hash = tsb_hash(v, hash_shift, nentries);
-               ent = tsb + (hash * sizeof(struct tsb));
-               tag = (v >> 22UL);
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+                           unsigned long tsb, unsigned long nentries)
+{
+       unsigned long i;
 
-               tsb_flush(ent, tag);
-       }
+       for (i = 0; i < tb->tlb_nr; i++)
+               __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
 }
 
 void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
        spin_unlock_irqrestore(&mm->context.lock, flags);
 }
 
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+{
+       unsigned long nentries, base, flags;
+
+       spin_lock_irqsave(&mm->context.lock, flags);
+
+       base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+       nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+       if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+               base = __pa(base);
+       __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+               base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+               nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+               if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+                       base = __pa(base);
+               __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+       }
+#endif
+       spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
 #define HV_PGSZ_IDX_BASE       HV_PGSZ_IDX_8K
 #define HV_PGSZ_MASK_BASE      HV_PGSZ_MASK_8K
 
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index f8e13d4..29b9608 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -53,6 +53,33 @@ __flush_tlb_mm:              /* 18 insns */
        nop
 
        .align          32
+       .globl          __flush_tlb_page
+__flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, %pstate
+       mov             SECONDARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       andn            %o1, 1, %o3
+       be,pn           %icc, 1f
+        or             %o3, 0x10, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       retl
+        wrpr           %g7, 0x0, %pstate
+       nop
+       nop
+       nop
+       nop
+
+       .align          32
        .globl          __flush_tlb_pending
 __flush_tlb_pending:   /* 26 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
        retl
         wrpr           %g7, 0x0, %pstate
 
+__cheetah_flush_tlb_page:      /* 22 insns */
+       /* %o0 = context, %o1 = vaddr */
+       rdpr            %pstate, %g7
+       andn            %g7, PSTATE_IE, %g2
+       wrpr            %g2, 0x0, %pstate
+       wrpr            %g0, 1, %tl
+       mov             PRIMARY_CONTEXT, %o4
+       ldxa            [%o4] ASI_DMMU, %g2
+       srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+       sllx            %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+       or              %o0, %o3, %o0   /* Preserve nucleus page size fields */
+       stxa            %o0, [%o4] ASI_DMMU
+       andcc           %o1, 1, %g0
+       be,pn           %icc, 1f
+        andn           %o1, 1, %o3
+       stxa            %g0, [%o3] ASI_IMMU_DEMAP
+1:     stxa            %g0, [%o3] ASI_DMMU_DEMAP
+       membar          #Sync
+       stxa            %g2, [%o4] ASI_DMMU
+       sethi           %hi(KERNBASE), %o4
+       flush           %o4
+       wrpr            %g0, 0, %tl
+       retl
+        wrpr           %g7, 0x0, %pstate
+
 __cheetah_flush_tlb_pending:   /* 27 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        rdpr            %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
        retl
         nop
 
+__hypervisor_flush_tlb_page: /* 11 insns */
+       /* %o0 = context, %o1 = vaddr */
+       mov             %o0, %g2
+       mov             %o1, %o0              /* ARG0: vaddr + IMMU-bit */
+       mov             %g2, %o1              /* ARG1: mmu context */
+       mov             HV_MMU_ALL, %o2       /* ARG2: flags */
+       srlx            %o0, PAGE_SHIFT, %o0
+       sllx            %o0, PAGE_SHIFT, %o0
+       ta              HV_MMU_UNMAP_ADDR_TRAP
+       brnz,pn         %o0, __hypervisor_tlb_tl0_error
+        mov            HV_MMU_UNMAP_ADDR_TRAP, %o1
+       retl
+        nop
+
 __hypervisor_flush_tlb_pending: /* 16 insns */
        /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
        sllx            %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
        call            tlb_patch_one
         mov            19, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__cheetah_flush_tlb_page), %o1
+       or              %o1, %lo(__cheetah_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            22, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm:        /* 21 insns */
        nop
        nop
 
-       .globl          xcall_flush_tlb_pending
-xcall_flush_tlb_pending:       /* 21 insns */
-       /* %g5=context, %g1=nr, %g7=vaddrs[] */
-       sllx            %g1, 3, %g1
+       .globl          xcall_flush_tlb_page
+xcall_flush_tlb_page:  /* 17 insns */
+       /* %g5=context, %g1=vaddr */
        mov             PRIMARY_CONTEXT, %g4
        ldxa            [%g4] ASI_DMMU, %g2
        srlx            %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending:  /* 21 insns */
        or              %g5, %g4, %g5
        mov             PRIMARY_CONTEXT, %g4
        stxa            %g5, [%g4] ASI_DMMU
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %g5
-       andcc           %g5, 0x1, %g0
+       andcc           %g1, 0x1, %g0
        be,pn           %icc, 2f
-
-        andn           %g5, 0x1, %g5
+        andn           %g1, 0x1, %g5
        stxa            %g0, [%g5] ASI_IMMU_DEMAP
 2:     stxa            %g0, [%g5] ASI_DMMU_DEMAP
        membar          #Sync
-       brnz,pt         %g1, 1b
-        nop
        stxa            %g2, [%g4] ASI_DMMU
        retry
        nop
+       nop
 
        .globl          xcall_flush_tlb_kernel_range
 xcall_flush_tlb_kernel_range:  /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
        membar          #Sync
        retry
 
-       .globl          __hypervisor_xcall_flush_tlb_pending
-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
-       /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
-       sllx            %g1, 3, %g1
+       .globl          __hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+       /* %g5=ctx, %g1=vaddr */
        mov             %o0, %g2
        mov             %o1, %g3
        mov             %o2, %g4
-1:     sub             %g1, (1 << 3), %g1
-       ldx             [%g7 + %g1], %o0        /* ARG0: virtual address */
+       mov             %g1, %o0                /* ARG0: virtual address */
        mov             %g5, %o1                /* ARG1: mmu context */
        mov             HV_MMU_ALL, %o2         /* ARG2: flags */
        srlx            %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
        mov             HV_MMU_UNMAP_ADDR_TRAP, %g6
        brnz,a,pn       %o0, __hypervisor_tlb_xcall_error
         mov            %o0, %g5
-       brnz,pt         %g1, 1b
-        nop
        mov             %g2, %o0
        mov             %g3, %o1
        mov             %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            10, %o2
 
+       sethi           %hi(__flush_tlb_page), %o0
+       or              %o0, %lo(__flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_flush_tlb_page), %o1
+       call            tlb_patch_one
+        mov            11, %o2
+
        sethi           %hi(__flush_tlb_pending), %o0
        or              %o0, %lo(__flush_tlb_pending), %o0
        sethi           %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
        call            tlb_patch_one
         mov            21, %o2
 
-       sethi           %hi(xcall_flush_tlb_pending), %o0
-       or              %o0, %lo(xcall_flush_tlb_pending), %o0
-       sethi           %hi(__hypervisor_xcall_flush_tlb_pending), %o1
-       or              %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
+       sethi           %hi(xcall_flush_tlb_page), %o0
+       or              %o0, %lo(xcall_flush_tlb_page), %o0
+       sethi           %hi(__hypervisor_xcall_flush_tlb_page), %o1
+       or              %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
        call            tlb_patch_one
-        mov            21, %o2
+        mov            17, %o2
 
        sethi           %hi(xcall_flush_tlb_kernel_range), %o0
        or              %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 27cdf1f..045dc53 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1888,6 +1888,7 @@ err_detach:
        write_unlock_bh(&bond->lock);
 
 err_close:
+       slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_unset_master:
@@ -3379,20 +3380,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff 
*skb, int count)
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
+       const struct ethhdr *data;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
        u32 v6hash;
-       __be32 *s, *d;
+       const __be32 *s, *d;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_network_may_pull(skb, sizeof(*iph))) {
                iph = ip_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
+               data = (struct ethhdr *)skb->data;
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
                v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3411,33 +3414,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff 
*skb, int count)
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
        u32 layer4_xor = 0;
-       struct iphdr *iph;
-       struct ipv6hdr *ipv6h;
-       __be32 *s, *d;
-       __be16 *layer4hdr;
+       const struct iphdr *iph;
+       const struct ipv6hdr *ipv6h;
+       const __be32 *s, *d;
+       const __be16 *l4 = NULL;
+       __be16 _l4[2];
+       int noff = skb_network_offset(skb);
+       int poff;
 
        if (skb->protocol == htons(ETH_P_IP) &&
-           skb_network_header_len(skb) >= sizeof(*iph)) {
+           pskb_may_pull(skb, noff + sizeof(*iph))) {
                iph = ip_hdr(skb);
-               if (!ip_is_fragment(iph) &&
-                   (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(iph->protocol);
+
+               if (!ip_is_fragment(iph) && poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + 
poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
        } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
                ipv6h = ipv6_hdr(skb);
-               if ((ipv6h->nexthdr == IPPROTO_TCP ||
-                    ipv6h->nexthdr == IPPROTO_UDP) &&
-                   (skb_headlen(skb) - skb_network_offset(skb) >=
-                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
-                       layer4hdr = (__be16 *)(ipv6h + 1);
-                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               poff = proto_ports_offset(ipv6h->nexthdr);
+               if (poff >= 0) {
+                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + 
poff,
+                                               sizeof(_l4), &_l4);
+                       if (l4)
+                               layer4_xor = ntohs(l4[0] ^ l4[1]);
                }
                s = &ipv6h->saddr.s6_addr32[0];
                d = &ipv6h->daddr.s6_addr32[0];
@@ -4919,9 +4925,18 @@ static int __net_init bond_net_init(struct net *net)
 static void __net_exit bond_net_exit(struct net *net)
 {
        struct bond_net *bn = net_generic(net, bond_net_id);
+       struct bonding *bond, *tmp_bond;
+       LIST_HEAD(list);
 
        bond_destroy_sysfs(bn);
        bond_destroy_proc_dir(bn);
+
+       /* Kill off any bonds created after unregistering bond rtnl ops */
+       rtnl_lock();
+       list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+               unregister_netdevice_queue(bond->dev, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 }
 
 static struct pernet_operations bond_net_ops = {
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h 
b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index edfdf6b..b5fd934 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
 /* how about 0x2000 */
 #define MAX_TX_BUF_LEN      0x2000
 #define MAX_TX_BUF_SHIFT    13
-/*#define MAX_TX_BUF_LEN  0x3000 */
+#define MAX_TSO_SEG_SIZE    0x3c00
 
 /* rrs word 1 bit 0:31 */
 #define RRS_RX_CSUM_MASK       0xFFFF
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 
b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 35faab7..ca33b28 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2332,6 +2332,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
 
        INIT_WORK(&adapter->reset_task, atl1e_reset_task);
        INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
+       netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
        err = register_netdev(netdev);
        if (err) {
                netdev_err(netdev, "register netdevice failed\n");
diff --git a/drivers/net/ethernet/marvell/Kconfig 
b/drivers/net/ethernet/marvell/Kconfig
index edfba93..434e33c 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
 
 config MVMDIO
        tristate "Marvell MDIO interface support"
+       select PHYLIB
        ---help---
          This driver supports the MDIO interface found in the network
          interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -45,7 +46,6 @@ config MVMDIO
 config MVNETA
        tristate "Marvell Armada 370/XP network interface support"
        depends on MACH_ARMADA_370_XP
-       select PHYLIB
        select MVMDIO
        ---help---
          This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index b6025c3..84b312ea 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -375,7 +375,6 @@ static int rxq_number = 8;
 static int txq_number = 8;
 
 static int rxq_def;
-static int txq_def;
 
 #define MVNETA_DRIVER_NAME "mvneta"
 #define MVNETA_DRIVER_VERSION "1.0"
@@ -1476,7 +1475,8 @@ error:
 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+       u16 txq_id = skb_get_queue_mapping(skb);
+       struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
        struct netdev_queue *nq;
        int frags = 0;
@@ -1486,7 +1486,7 @@ static int mvneta_tx(struct sk_buff *skb, struct 
net_device *dev)
                goto out;
 
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_def);
+       nq    = netdev_get_tx_queue(dev, txq_id);
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2690,7 +2690,7 @@ static int mvneta_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+       dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, 
rxq_number);
        if (!dev)
                return -ENOMEM;
 
@@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
 module_param(txq_number, int, S_IRUGO);
 
 module_param(rxq_def, int, S_IRUGO);
-module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 16c8429..6bd9167 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet 
*dev, struct sk_buff *skb
                goto error;
 
        if (skb) {
-               if (skb->len <= sizeof(ETH_HLEN))
+               if (skb->len <= ETH_HLEN)
                        goto error;
 
                /* mapping VLANs to MBIM sessions:
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index da9fde8..892ecda 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty)
 
 EXPORT_SYMBOL(start_tty);
 
+static void tty_update_time(struct timespec *time)
+{
+       unsigned long sec = get_seconds();
+       sec -= sec % 60;
+       if ((long)(sec - time->tv_sec) > 0)
+               time->tv_sec = sec;
+}
+
 /**
  *     tty_read        -       read method for tty device files
  *     @file: pointer to tty file
@@ -977,8 +985,10 @@ static ssize_t tty_read(struct file *file, char __user 
*buf, size_t count,
        else
                i = -EIO;
        tty_ldisc_deref(ld);
+
        if (i > 0)
-               inode->i_atime = current_fs_time(inode->i_sb);
+               tty_update_time(&inode->i_atime);
+
        return i;
 }
 
@@ -1081,7 +1091,7 @@ static inline ssize_t do_tty_write(
        }
        if (written) {
                struct inode *inode = file->f_path.dentry->d_inode;
-               inode->i_mtime = current_fs_time(inode->i_sb);
+               tty_update_time(&inode->i_mtime);
                ret = written;
        }
 out:
diff --git a/fs/aio.c b/fs/aio.c
index 71f613c..ed762ae 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1027,9 +1027,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct 
io_event *ent)
        spin_unlock(&info->ring_lock);
 
 out:
-       kunmap_atomic(ring);
        dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
                 (unsigned long)ring->head, (unsigned long)ring->tail);
+       kunmap_atomic(ring);
        return ret;
 }
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9ef07d0..0e182f9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -208,9 +208,9 @@ struct netdev_hw_addr {
 #define NETDEV_HW_ADDR_T_SLAVE         3
 #define NETDEV_HW_ADDR_T_UNICAST       4
 #define NETDEV_HW_ADDR_T_MULTICAST     5
-       bool                    synced;
        bool                    global_use;
        int                     refcount;
+       int                     synced;
        struct rcu_head         rcu_head;
 };
 
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 98399e2..9fe54b6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2597,6 +2597,13 @@ static inline void nf_reset(struct sk_buff *skb)
 #endif
 }
 
+static inline void nf_reset_trace(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+       skb->nf_trace = 0;
+#endif
+}
+
 /* Note: This doesn't put any conntrack and bridge info in dst. */
 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
 {
diff --git a/include/net/scm.h b/include/net/scm.h
index 975cca0..b117081 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
        scm->pid  = get_pid(pid);
        scm->cred = cred ? get_cred(cred) : NULL;
        scm->creds.pid = pid_vnr(pid);
-       scm->creds.uid = cred ? cred->euid : INVALID_UID;
-       scm->creds.gid = cred ? cred->egid : INVALID_GID;
+       scm->creds.uid = cred ? cred->uid : INVALID_UID;
+       scm->creds.gid = cred ? cred->gid : INVALID_GID;
 }
 
 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 4762316..5fc7aa5 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -452,7 +452,6 @@ trace_selftest_function_recursion(void)
        char *func_name;
        int len;
        int ret;
-       int cnt;
 
        /* The previous test PASSED */
        pr_cont("PASSED\n");
@@ -510,19 +509,10 @@ trace_selftest_function_recursion(void)
 
        unregister_ftrace_function(&test_recsafe_probe);
 
-       /*
-        * If arch supports all ftrace features, and no other task
-        * was on the list, we should be fine.
-        */
-       if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
-               cnt = 2; /* Should have recursed */
-       else
-               cnt = 1;
-
        ret = -1;
-       if (trace_selftest_recursion_cnt != cnt) {
-               pr_cont("*callback not called expected %d times (%d)* ",
-                       cnt, trace_selftest_recursion_cnt);
+       if (trace_selftest_recursion_cnt != 2) {
+               pr_cont("*callback not called expected 2 times (%d)* ",
+                       trace_selftest_recursion_cnt);
                goto out;
        }
 
diff --git a/net/atm/common.c b/net/atm/common.c
index 806fc0a..cf4b7e6 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -532,6 +532,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, 
struct msghdr *msg,
        struct sk_buff *skb;
        int copied, error = -EINVAL;
 
+       msg->msg_namelen = 0;
+
        if (sock->state != SS_CONNECTED)
                return -ENOTCONN;
 
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 779095d..d53a123 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1647,6 +1647,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket 
*sock,
                ax25_address src;
                const unsigned char *mac = skb_mac_header(skb);
 
+               memset(sax, 0, sizeof(struct full_sockaddr_ax25));
                ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
                                &digi, NULL, NULL);
                sax->sax25_family = AF_AX25;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 5355df6..b04795e 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
+       msg->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
                if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                return err;
        }
 
-       msg->msg_namelen = 0;
-
        copied = skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ce3f665..970fc13 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -610,6 +610,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct 
socket *sock,
 
        if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
                rfcomm_dlc_accept(d);
+               msg->msg_namelen = 0;
                return 0;
        }
 
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index aaf1957..cc16d1b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -667,6 +667,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct 
socket *sock,
            test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
                hci_conn_accept(pi->conn->hcon, 0);
                sk->sk_state = BT_CONFIG;
+               msg->msg_namelen = 0;
 
                release_sock(sk);
                return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 095259f..ff2ff3c 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct 
socket *sock,
        if (m->msg_flags&MSG_OOB)
                goto read_error;
 
+       m->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags, 0 , &ret);
        if (!skb)
                goto read_error;
diff --git a/net/core/dev.c b/net/core/dev.c
index 5d9c43d..d592214 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1737,6 +1737,7 @@ int dev_forward_skb(struct net_device *dev, struct 
sk_buff *skb)
        skb->mark = 0;
        secpath_reset(skb);
        nf_reset(skb);
+       nf_reset_trace(skb);
        return netif_rx(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -2017,6 +2018,9 @@ static void skb_warn_bad_offload(const struct sk_buff 
*skb)
        struct net_device *dev = skb->dev;
        const char *driver = "";
 
+       if (!net_ratelimit())
+               return;
+
        if (dev && dev->dev.parent)
                driver = dev_driver_string(dev->dev.parent);
 
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index b079c7b..7841d87 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -38,7 +38,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list 
*list,
        ha->type = addr_type;
        ha->refcount = 1;
        ha->global_use = global;
-       ha->synced = false;
+       ha->synced = 0;
        list_add_tail_rcu(&ha->list, &list->list);
        list->count++;
 
@@ -166,7 +166,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
                                            addr_len, ha->type);
                        if (err)
                                break;
-                       ha->synced = true;
+                       ha->synced++;
                        ha->refcount++;
                } else if (ha->refcount == 1) {
                        __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
@@ -187,7 +187,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
                if (ha->synced) {
                        __hw_addr_del(to_list, ha->addr,
                                      addr_len, ha->type);
-                       ha->synced = false;
+                       ha->synced--;
                        __hw_addr_del(from_list, ha->addr,
                                      addr_len, ha->type);
                }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 6212ec9..055fb13 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1068,7 +1068,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct 
netlink_callback *cb)
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
 
                if (tb[IFLA_EXT_MASK])
@@ -1924,7 +1924,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct 
nlmsghdr *nlh)
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
 
-       if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+       if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
                        ifla_policy) >= 0) {
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 3b4f0cd..4cfe34d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff 
*skb)
 
        /* skb is pure payload to encrypt */
 
-       err = -ENOMEM;
-
        esp = x->data;
        aead = esp->aead;
        alen = crypto_aead_authsize(aead);
@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff 
*skb)
        }
 
        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
-       if (!tmp)
+       if (!tmp) {
+               err = -ENOMEM;
                goto error;
+       }
 
        seqhi = esp_tmp_seqhi(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a8fc332..0fcfee3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -255,8 +255,7 @@ static void ip_expire(unsigned long arg)
                if (!head->dev)
                        goto out_rcu_unlock;
 
-               /* skb dst is stale, drop it, and perform route lookup again */
-               skb_dst_drop(head);
+               /* skb has no dst, perform route lookup again */
                iph = ip_hdr(head);
                err = ip_route_input_noref(head, iph->daddr, iph->saddr,
                                           iph->tos, head->dev);
@@ -525,8 +524,16 @@ found:
                qp->q.max_size = skb->len + ihl;
 
        if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           qp->q.meat == qp->q.len)
-               return ip_frag_reasm(qp, prev, dev);
+           qp->q.meat == qp->q.len) {
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               err = ip_frag_reasm(qp, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return err;
+       }
+
+       skb_dst_drop(skb);
 
        write_lock(&ip4_frags.lock);
        list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b236ef0..f962f19 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -348,8 +348,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
-                          RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+                          RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
                           ireq->loc_addr, th->source, th->dest);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9841a71..b4e8b79 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -116,6 +116,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2;
 #define FLAG_DSACKING_ACK      0x800 /* SACK blocks contained D-SACK info */
 #define FLAG_NONHEAD_RETRANS_ACKED     0x1000 /* Non-head rexmitted data was 
ACKed */
 #define FLAG_SACK_RENEGING     0x2000 /* snd_una advanced to a sacked seq */
+#define FLAG_UPDATE_TS_RECENT  0x4000 /* tcp_replace_ts_recent() */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3572,6 +3573,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
        }
 }
 
+static void tcp_store_ts_recent(struct tcp_sock *tp)
+{
+       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
+}
+
+static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+{
+       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+                * extra check below makes sure this can only happen
+                * for pure ACK frames.  -DaveM
+                *
+                * Not only, also it occurs for expired timestamps.
+                */
+
+               if (tcp_paws_check(&tp->rx_opt, 0))
+                       tcp_store_ts_recent(tp);
+       }
+}
+
 /* This routine deals with incoming acks, but not outgoing ones. */
 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
@@ -3624,6 +3646,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff 
*skb, int flag)
        prior_fackets = tp->fackets_out;
        prior_in_flight = tcp_packets_in_flight(tp);
 
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       if (flag & FLAG_UPDATE_TS_RECENT)
+               tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
        if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
                 * No more checks are required.
@@ -3940,27 +3968,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr 
*th)
 EXPORT_SYMBOL(tcp_parse_md5sig_option);
 #endif
 
-static inline void tcp_store_ts_recent(struct tcp_sock *tp)
-{
-       tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = get_seconds();
-}
-
-static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
-{
-       if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
-               /* PAWS bug workaround wrt. ACK frames, the PAWS discard
-                * extra check below makes sure this can only happen
-                * for pure ACK frames.  -DaveM
-                *
-                * Not only, also it occurs for expired timestamps.
-                */
-
-               if (tcp_paws_check(&tp->rx_opt, 0))
-                       tcp_store_ts_recent(tp);
-       }
-}
-
 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
  *
  * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
@@ -5556,14 +5563,9 @@ slow_path:
                return 0;
 
 step5:
-       if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+       if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
                goto discard;
 
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
        tcp_rcv_rtt_measure_ts(sk, skb);
 
        /* Process urgent data. */
@@ -5997,7 +5999,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff 
*skb,
 
        /* step 5: check the ACK field */
        if (true) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
+                                                 FLAG_UPDATE_TS_RECENT) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -6148,11 +6151,6 @@ int tcp_rcv_state_process(struct sock *sk, struct 
sk_buff *skb,
                }
        }
 
-       /* ts_recent update must be made after we are sure that the packet
-        * is in window.
-        */
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
        /* step 6: check the URG bit */
        tcp_urg(sk, skb, th);
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 17d659e..a9f50ee 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff 
*skb)
         */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
-       /* make sure skb->data is aligned on arches that require it */
-       if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+       /* make sure skb->data is aligned on arches that require it
+        * and check if ack-trimming & collapsing extended the headroom
+        * beyond what csum_start can cover.
+        */
+       if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+                    skb_headroom(skb) >= 0xFFFF)) {
                struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
                                                   GFP_ATOMIC);
                return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a36d17e..e8676c2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2525,6 +2525,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
 static void init_loopback(struct net_device *dev)
 {
        struct inet6_dev  *idev;
+       struct net_device *sp_dev;
+       struct inet6_ifaddr *sp_ifa;
+       struct rt6_info *sp_rt;
 
        /* ::1 */
 
@@ -2536,6 +2539,30 @@ static void init_loopback(struct net_device *dev)
        }
 
        add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
+
+       /* Add routes to other interface's IPv6 addresses */
+       for_each_netdev(dev_net(dev), sp_dev) {
+               if (!strcmp(sp_dev->name, dev->name))
+                       continue;
+
+               idev = __in6_dev_get(sp_dev);
+               if (!idev)
+                       continue;
+
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
+
+                       if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+                               continue;
+
+                       sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+                       /* Failure cases are ignored */
+                       if (!IS_ERR(sp_rt))
+                               ip6_ins_rt(sp_rt);
+               }
+               read_unlock_bh(&idev->lock);
+       }
 }
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct 
in6_addr *addr)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index d9ba8a2..7a610a6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -342,8 +342,17 @@ found:
        }
 
        if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len)
-               return ip6_frag_reasm(fq, prev, dev);
+           fq->q.meat == fq->q.len) {
+               int res;
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               res = ip6_frag_reasm(fq, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return res;
+       }
+
+       skb_dst_drop(skb);
 
        write_lock(&ip6_frags.lock);
        list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8d19346..89dfedd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct 
inet6_skb_parm *opt,
 
                if (dst)
                        dst->ops->redirect(dst, sk, skb);
+               goto out;
        }
 
        if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 4d04105..3c9bd59 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct 
socket *sock,
 
        IRDA_DEBUG(4, "%s()\n", __func__);
 
+       msg->msg_namelen = 0;
+
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &err);
        if (!skb)
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cd6f7a9..625bc50 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1331,6 +1331,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct 
socket *sock,
        struct sk_buff *skb, *rskb, *cskb;
        int err = 0;
 
+       msg->msg_namelen = 0;
+
        if ((sk->sk_state == IUCV_DISCONN) &&
            skb_queue_empty(&iucv->backlog_skb_q) &&
            skb_queue_empty(&sk->sk_receive_queue) &&
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8ee4a86..9e1822e 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -684,6 +684,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock 
*sk,
                lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
                lsa->l2tp_flowinfo = 0;
                lsa->l2tp_scope_id = 0;
+               lsa->l2tp_conn_id = 0;
                if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8870988..48aaa89 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket 
*sock,
        int target;     /* Read at least this many bytes */
        long timeo;
 
+       msg->msg_namelen = 0;
+
        lock_sock(sk);
        copied = -ENOTCONN;
        if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7261eb8..14c106b 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1177,6 +1177,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket 
*sock,
        }
 
        if (sax != NULL) {
+               memset(sax, 0, sizeof(sax));
                sax->sax25_family = AF_NETROM;
                skb_copy_from_linear_data_offset(skb, 7, 
sax->sax25_call.ax25_call,
                              AX25_ADDR_LEN);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index fea22eb..48fb1de 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -644,6 +644,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct 
socket *sock,
 
        pr_debug("%p %zu\n", sk, len);
 
+       msg->msg_namelen = 0;
+
        lock_sock(sk);
 
        if (sk->sk_state == LLCP_CLOSED &&
@@ -684,6 +686,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct 
socket *sock,
 
                pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
 
+               memset(&sockaddr, 0, sizeof(sockaddr));
                sockaddr.sa_family = AF_NFC;
                sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP;
                sockaddr.dsap = ui_cb->dsap;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c4719ce..7f645d1 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1257,6 +1257,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket 
*sock,
        skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
 
        if (srose != NULL) {
+               memset(srose, 0, msg->msg_namelen);
                srose->srose_family = AF_ROSE;
                srose->srose_addr   = rose->dest_addr;
                srose->srose_call   = rose->dest_call;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0e19948..ced81a1 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)
                cbq_update(q);
                if ((incr -= incr2) < 0)
                        incr = 0;
+               q->now += incr;
+       } else {
+               if (now > q->now)
+                       q->now = now;
        }
-       q->now += incr;
        q->now_rt = now;
 
        for (;;) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 9b4e483..fc906d9 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -806,6 +806,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg 
*msg)
        if (addr) {
                addr->family = AF_TIPC;
                addr->addrtype = TIPC_ADDR_ID;
+               memset(&addr->addr, 0, sizeof(addr->addr));
                addr->addr.id.ref = msg_origport(msg);
                addr->addr.id.node = msg_orignode(msg);
                addr->addr.name.domain = 0;     /* could leave uninitialized */
@@ -920,6 +921,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
                goto exit;
        }
 
+       /* will be updated in set_orig_addr() if needed */
+       m->msg_namelen = 0;
+
        timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 restart:
 
@@ -1029,6 +1033,9 @@ static int recv_stream(struct kiocb *iocb, struct socket 
*sock,
                goto exit;
        }
 
+       /* will be updated in set_orig_addr() if needed */
+       m->msg_namelen = 0;
+
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
        timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b45eb65..f347754 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1995,7 +1995,7 @@ again:
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
                            (UNIXCB(skb).cred != siocb->scm->cred))
                                break;
-               } else {
+               } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
                        scm_set_cred(siocb->scm, UNIXCB(skb).pid, 
UNIXCB(skb).cred);
                        check_creds = 1;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to