commit:     94f39221cd730a186a590b0140ba24cc3c3334c5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 19 10:05:30 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 19 10:05:30 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94f39221

Linux ptach 5.2.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1015_linux-5.2.16.patch | 3120 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3124 insertions(+)

diff --git a/0000_README b/0000_README
index e8d3287..c046e8a 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-5.2.15.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.2.15
 
+Patch:  1015_linux-5.2.16.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.2.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-5.2.16.patch b/1015_linux-5.2.16.patch
new file mode 100644
index 0000000..7eee1f4
--- /dev/null
+++ b/1015_linux-5.2.16.patch
@@ -0,0 +1,3120 @@
+diff --git a/Makefile b/Makefile
+index 3c977aa66650..3cec03e93b40 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
+index 76f34346b642..8b03eb44e876 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -312,6 +312,7 @@ raw_copy_in_user(void __user *to, const void __user *from, 
unsigned long n)
+ {
+       unsigned long ret;
+ 
++      barrier_nospec();
+       allow_user_access(to, from, n);
+       ret = __copy_tofrom_user(to, from, n);
+       prevent_user_access(to, from, n);
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 9dde4d7d8704..149525b5df1b 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1978,6 +1978,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt 
*s390int,
+       case KVM_S390_MCHK:
+               irq->u.mchk.mcic = s390int->parm64;
+               break;
++      case KVM_S390_INT_PFAULT_INIT:
++              irq->u.ext.ext_params = s390int->parm;
++              irq->u.ext.ext_params2 = s390int->parm64;
++              break;
++      case KVM_S390_RESTART:
++      case KVM_S390_INT_CLOCK_COMP:
++      case KVM_S390_INT_CPU_TIMER:
++              break;
++      default:
++              return -EINVAL;
+       }
+       return 0;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 28ebd647784c..4934141689d2 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1013,6 +1013,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
+       /* mark all the pages in active slots as dirty */
+       for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
+               ms = slots->memslots + slotnr;
++              if (!ms->dirty_bitmap)
++                      return -EINVAL;
+               /*
+                * The second half of the bitmap is only used on x86,
+                * and would be wasted otherwise, so we put it to good
+@@ -4325,7 +4327,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
+       }
+       case KVM_S390_INTERRUPT: {
+               struct kvm_s390_interrupt s390int;
+-              struct kvm_s390_irq s390irq;
++              struct kvm_s390_irq s390irq = {};
+ 
+               if (copy_from_user(&s390int, argp, sizeof(s390int)))
+                       return -EFAULT;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c 
b/arch/sparc/kernel/sys_sparc_64.c
+index 9825ca6a6020..5cdca4208647 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, 
first, unsigned long, second
+ {
+       long err;
+ 
++      if (!IS_ENABLED(CONFIG_SYSVIPC))
++              return -ENOSYS;
++
+       /* No need for backward compatibility. We can start fresh... */
+       if (call <= SEMTIMEDOP) {
+               switch (call) {
+               case SEMOP:
+-                      err = sys_semtimedop(first, ptr,
+-                                           (unsigned int)second, NULL);
++                      err = ksys_semtimedop(first, ptr,
++                                            (unsigned int)second, NULL);
+                       goto out;
+               case SEMTIMEDOP:
+-                      err = sys_semtimedop(first, ptr, (unsigned int)second,
++                      err = ksys_semtimedop(first, ptr, (unsigned int)second,
+                               (const struct __kernel_timespec __user *)
+-                                           (unsigned long) fifth);
++                                            (unsigned long) fifth);
+                       goto out;
+               case SEMGET:
+-                      err = sys_semget(first, (int)second, (int)third);
++                      err = ksys_semget(first, (int)second, (int)third);
+                       goto out;
+               case SEMCTL: {
+-                      err = sys_semctl(first, second,
+-                                       (int)third | IPC_64,
+-                                       (unsigned long) ptr);
++                      err = ksys_old_semctl(first, second,
++                                            (int)third | IPC_64,
++                                            (unsigned long) ptr);
+                       goto out;
+               }
+               default:
+@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, 
first, unsigned long, second
+       if (call <= MSGCTL) {
+               switch (call) {
+               case MSGSND:
+-                      err = sys_msgsnd(first, ptr, (size_t)second,
++                      err = ksys_msgsnd(first, ptr, (size_t)second,
+                                        (int)third);
+                       goto out;
+               case MSGRCV:
+-                      err = sys_msgrcv(first, ptr, (size_t)second, fifth,
++                      err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
+                                        (int)third);
+                       goto out;
+               case MSGGET:
+-                      err = sys_msgget((key_t)first, (int)second);
++                      err = ksys_msgget((key_t)first, (int)second);
+                       goto out;
+               case MSGCTL:
+-                      err = sys_msgctl(first, (int)second | IPC_64, ptr);
++                      err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
+                       goto out;
+               default:
+                       err = -ENOSYS;
+@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, 
first, unsigned long, second
+                       goto out;
+               }
+               case SHMDT:
+-                      err = sys_shmdt(ptr);
++                      err = ksys_shmdt(ptr);
+                       goto out;
+               case SHMGET:
+-                      err = sys_shmget(first, (size_t)second, (int)third);
++                      err = ksys_shmget(first, (size_t)second, (int)third);
+                       goto out;
+               case SHMCTL:
+-                      err = sys_shmctl(first, (int)second | IPC_64, ptr);
++                      err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
+                       goto out;
+               default:
+                       err = -ENOSYS;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 56e748a7679f..94df0868804b 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -38,6 +38,7 @@ REALMODE_CFLAGS      := $(M16_CFLAGS) -g -Os 
-DDISABLE_BRANCH_PROFILING \
+ 
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), 
-ffreestanding)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), 
-fno-stack-protector)
++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), 
-Wno-address-of-packed-member)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), 
$(cc_stack_align4))
+ export REALMODE_CFLAGS
+ 
+diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
+index ad84239e595e..15255f388a85 100644
+--- a/arch/x86/boot/compressed/acpi.c
++++ b/arch/x86/boot/compressed/acpi.c
+@@ -44,17 +44,109 @@ static acpi_physical_address get_acpi_rsdp(void)
+       return addr;
+ }
+ 
+-/* Search EFI system tables for RSDP. */
+-static acpi_physical_address efi_get_rsdp_addr(void)
++/*
++ * Search EFI system tables for RSDP.  If both ACPI_20_TABLE_GUID and
++ * ACPI_TABLE_GUID are found, take the former, which has more features.
++ */
++static acpi_physical_address
++__efi_get_rsdp_addr(unsigned long config_tables, unsigned int nr_tables,
++                  bool efi_64)
+ {
+       acpi_physical_address rsdp_addr = 0;
+ 
+ #ifdef CONFIG_EFI
+-      unsigned long systab, systab_tables, config_tables;
++      int i;
++
++      /* Get EFI tables from systab. */
++      for (i = 0; i < nr_tables; i++) {
++              acpi_physical_address table;
++              efi_guid_t guid;
++
++              if (efi_64) {
++                      efi_config_table_64_t *tbl = (efi_config_table_64_t 
*)config_tables + i;
++
++                      guid  = tbl->guid;
++                      table = tbl->table;
++
++                      if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
++                              debug_putstr("Error getting RSDP address: EFI 
config table located above 4GB.\n");
++                              return 0;
++                      }
++              } else {
++                      efi_config_table_32_t *tbl = (efi_config_table_32_t 
*)config_tables + i;
++
++                      guid  = tbl->guid;
++                      table = tbl->table;
++              }
++
++              if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
++                      rsdp_addr = table;
++              else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
++                      return table;
++      }
++#endif
++      return rsdp_addr;
++}
++
++/* EFI/kexec support is 64-bit only. */
++#ifdef CONFIG_X86_64
++static struct efi_setup_data *get_kexec_setup_data_addr(void)
++{
++      struct setup_data *data;
++      u64 pa_data;
++
++      pa_data = boot_params->hdr.setup_data;
++      while (pa_data) {
++              data = (struct setup_data *)pa_data;
++              if (data->type == SETUP_EFI)
++                      return (struct efi_setup_data *)(pa_data + 
sizeof(struct setup_data));
++
++              pa_data = data->next;
++      }
++      return NULL;
++}
++
++static acpi_physical_address kexec_get_rsdp_addr(void)
++{
++      efi_system_table_64_t *systab;
++      struct efi_setup_data *esd;
++      struct efi_info *ei;
++      char *sig;
++
++      esd = (struct efi_setup_data *)get_kexec_setup_data_addr();
++      if (!esd)
++              return 0;
++
++      if (!esd->tables) {
++              debug_putstr("Wrong kexec SETUP_EFI data.\n");
++              return 0;
++      }
++
++      ei = &boot_params->efi_info;
++      sig = (char *)&ei->efi_loader_signature;
++      if (strncmp(sig, EFI64_LOADER_SIGNATURE, 4)) {
++              debug_putstr("Wrong kexec EFI loader signature.\n");
++              return 0;
++      }
++
++      /* Get systab from boot params. */
++      systab = (efi_system_table_64_t *) (ei->efi_systab | 
((__u64)ei->efi_systab_hi << 32));
++      if (!systab)
++              error("EFI system table not found in kexec boot_params.");
++
++      return __efi_get_rsdp_addr((unsigned long)esd->tables, 
systab->nr_tables, true);
++}
++#else
++static acpi_physical_address kexec_get_rsdp_addr(void) { return 0; }
++#endif /* CONFIG_X86_64 */
++
++static acpi_physical_address efi_get_rsdp_addr(void)
++{
++#ifdef CONFIG_EFI
++      unsigned long systab, config_tables;
+       unsigned int nr_tables;
+       struct efi_info *ei;
+       bool efi_64;
+-      int size, i;
+       char *sig;
+ 
+       ei = &boot_params->efi_info;
+@@ -88,49 +180,20 @@ static acpi_physical_address efi_get_rsdp_addr(void)
+ 
+               config_tables   = stbl->tables;
+               nr_tables       = stbl->nr_tables;
+-              size            = sizeof(efi_config_table_64_t);
+       } else {
+               efi_system_table_32_t *stbl = (efi_system_table_32_t *)systab;
+ 
+               config_tables   = stbl->tables;
+               nr_tables       = stbl->nr_tables;
+-              size            = sizeof(efi_config_table_32_t);
+       }
+ 
+       if (!config_tables)
+               error("EFI config tables not found.");
+ 
+-      /* Get EFI tables from systab. */
+-      for (i = 0; i < nr_tables; i++) {
+-              acpi_physical_address table;
+-              efi_guid_t guid;
+-
+-              config_tables += size;
+-
+-              if (efi_64) {
+-                      efi_config_table_64_t *tbl = (efi_config_table_64_t 
*)config_tables;
+-
+-                      guid  = tbl->guid;
+-                      table = tbl->table;
+-
+-                      if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
+-                              debug_putstr("Error getting RSDP address: EFI 
config table located above 4GB.\n");
+-                              return 0;
+-                      }
+-              } else {
+-                      efi_config_table_32_t *tbl = (efi_config_table_32_t 
*)config_tables;
+-
+-                      guid  = tbl->guid;
+-                      table = tbl->table;
+-              }
+-
+-              if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
+-                      rsdp_addr = table;
+-              else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
+-                      return table;
+-      }
++      return __efi_get_rsdp_addr(config_tables, nr_tables, efi_64);
++#else
++      return 0;
+ #endif
+-      return rsdp_addr;
+ }
+ 
+ static u8 compute_checksum(u8 *buffer, u32 length)
+@@ -220,6 +283,14 @@ acpi_physical_address get_rsdp_addr(void)
+       if (!pa)
+               pa = boot_params->acpi_rsdp_addr;
+ 
++      /*
++       * Try to get EFI data from setup_data. This can happen when we're a
++       * kexec'ed kernel and kexec(1) has passed all the required EFI info to
++       * us.
++       */
++      if (!pa)
++              pa = kexec_get_rsdp_addr();
++
+       if (!pa)
+               pa = efi_get_rsdp_addr();
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 921c609c2af7..65d49452e6e0 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -333,6 +333,7 @@ struct kvm_mmu_page {
+       int root_count;          /* Currently serving as active root */
+       unsigned int unsync_children;
+       struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
++      unsigned long mmu_valid_gen;
+       DECLARE_BITMAP(unsync_child_bitmap, 512);
+ 
+ #ifdef CONFIG_X86_32
+@@ -851,6 +852,7 @@ struct kvm_arch {
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
+       unsigned int indirect_shadow_pages;
++      unsigned long mmu_valid_gen;
+       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+       /*
+        * Hash table of struct kvm_mmu_page.
+diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
+index 64b973f0e985..4c407833faca 100644
+--- a/arch/x86/kernel/ima_arch.c
++++ b/arch/x86/kernel/ima_arch.c
+@@ -11,10 +11,11 @@ extern struct boot_params boot_params;
+ static enum efi_secureboot_mode get_sb_mode(void)
+ {
+       efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
++      efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
+       efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+       efi_status_t status;
+       unsigned long size;
+-      u8 secboot;
++      u8 secboot, setupmode;
+ 
+       size = sizeof(secboot);
+ 
+@@ -36,7 +37,14 @@ static enum efi_secureboot_mode get_sb_mode(void)
+               return efi_secureboot_mode_unknown;
+       }
+ 
+-      if (secboot == 0) {
++      size = sizeof(setupmode);
++      status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
++                                NULL, &size, &setupmode);
++
++      if (status != EFI_SUCCESS)      /* ignore unknown SetupMode */
++              setupmode = 0;
++
++      if (secboot == 0 || setupmode == 1) {
+               pr_info("ima: secureboot mode disabled\n");
+               return efi_secureboot_mode_disabled;
+       }
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 01f04db1fa61..66055ca29b6b 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2066,6 +2066,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct 
kvm_vcpu *vcpu, int direct
+       if (!direct)
+               sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
+       set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
++
++      /*
++       * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
++       * depends on valid pages being added to the head of the list.  See
++       * comments in kvm_zap_obsolete_pages().
++       */
+       list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+       kvm_mod_used_mmu_pages(vcpu->kvm, +1);
+       return sp;
+@@ -2215,7 +2221,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
+ #define for_each_valid_sp(_kvm, _sp, _gfn)                            \
+       hlist_for_each_entry(_sp,                                       \
+         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+-              if ((_sp)->role.invalid) {    \
++              if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
+               } else
+ 
+ #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                       
\
+@@ -2272,6 +2278,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int 
point) { }
+ static void mmu_audit_disable(void) { }
+ #endif
+ 
++static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
++{
++      return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
++}
++
+ static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                        struct list_head *invalid_list)
+ {
+@@ -2496,6 +2507,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
+               if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+                       flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
+       }
++      sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
+       clear_page(sp->spt);
+       trace_kvm_mmu_get_page(sp, true);
+ 
+@@ -4229,6 +4241,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, 
gpa_t new_cr3,
+                       return false;
+ 
+               if (cached_root_available(vcpu, new_cr3, new_role)) {
++                      /*
++                       * It is possible that the cached previous root page is
++                       * obsolete because of a change in the MMU generation
++                       * number. However, changing the generation number is
++                       * accompanied by KVM_REQ_MMU_RELOAD, which will free
++                       * the root set here and allocate a new one.
++                       */
+                       kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+                       if (!skip_tlb_flush) {
+                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+@@ -5645,11 +5664,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
+       return alloc_mmu_pages(vcpu);
+ }
+ 
++
++static void kvm_zap_obsolete_pages(struct kvm *kvm)
++{
++      struct kvm_mmu_page *sp, *node;
++      LIST_HEAD(invalid_list);
++      int ign;
++
++restart:
++      list_for_each_entry_safe_reverse(sp, node,
++            &kvm->arch.active_mmu_pages, link) {
++              /*
++               * No obsolete valid page exists before a newly created page
++               * since active_mmu_pages is a FIFO list.
++               */
++              if (!is_obsolete_sp(kvm, sp))
++                      break;
++
++              /*
++               * Do not repeatedly zap a root page to avoid unnecessary
++               * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
++               * progress:
++               *    vcpu 0                        vcpu 1
++               *                         call vcpu_enter_guest():
++               *                            1): handle KVM_REQ_MMU_RELOAD
++               *                                and require mmu-lock to
++               *                                load mmu
++               * repeat:
++               *    1): zap root page and
++               *        send KVM_REQ_MMU_RELOAD
++               *
++               *    2): if (cond_resched_lock(mmu-lock))
++               *
++               *                            2): hold mmu-lock and load mmu
++               *
++               *                            3): see KVM_REQ_MMU_RELOAD bit
++               *                                on vcpu->requests is set
++               *                                then return 1 to call
++               *                                vcpu_enter_guest() again.
++               *            goto repeat;
++               *
++               * Since we are reversely walking the list and the invalid
++               * list will be moved to the head, skip the invalid page
++               * can help us to avoid the infinity list walking.
++               */
++              if (sp->role.invalid)
++                      continue;
++
++              if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
++                      kvm_mmu_commit_zap_page(kvm, &invalid_list);
++                      cond_resched_lock(&kvm->mmu_lock);
++                      goto restart;
++              }
++
++              if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
++                      goto restart;
++      }
++
++      kvm_mmu_commit_zap_page(kvm, &invalid_list);
++}
++
++/*
++ * Fast invalidate all shadow pages and use lock-break technique
++ * to zap obsolete pages.
++ *
++ * It's required when memslot is being deleted or VM is being
++ * destroyed, in these cases, we should ensure that KVM MMU does
++ * not use any resource of the being-deleted slot or all slots
++ * after calling the function.
++ */
++static void kvm_mmu_zap_all_fast(struct kvm *kvm)
++{
++      spin_lock(&kvm->mmu_lock);
++      kvm->arch.mmu_valid_gen++;
++
++      kvm_zap_obsolete_pages(kvm);
++      spin_unlock(&kvm->mmu_lock);
++}
++
+ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+                       struct kvm_memory_slot *slot,
+                       struct kvm_page_track_notifier_node *node)
+ {
+-      kvm_mmu_zap_all(kvm);
++      kvm_mmu_zap_all_fast(kvm);
+ }
+ 
+ void kvm_mmu_init_vm(struct kvm *kvm)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2c7daa3b968d..4ca86e70d3b4 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -7116,13 +7116,41 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ 
+ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+ {
+-      bool is_user, smap;
+-
+-      is_user = svm_get_cpl(vcpu) == 3;
+-      smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
++      unsigned long cr4 = kvm_read_cr4(vcpu);
++      bool smep = cr4 & X86_CR4_SMEP;
++      bool smap = cr4 & X86_CR4_SMAP;
++      bool is_user = svm_get_cpl(vcpu) == 3;
+ 
+       /*
+-       * Detect and workaround Errata 1096 Fam_17h_00_0Fh
++       * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
++       *
++       * Errata:
++       * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
++       * possible that CPU microcode implementing DecodeAssist will fail
++       * to read bytes of instruction which caused #NPF. In this case,
++       * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
++       * return 0 instead of the correct guest instruction bytes.
++       *
++       * This happens because CPU microcode reading instruction bytes
++       * uses a special opcode which attempts to read data using CPL=0
++       * priviledges. The microcode reads CS:RIP and if it hits a SMAP
++       * fault, it gives up and returns no instruction bytes.
++       *
++       * Detection:
++       * We reach here in case CPU supports DecodeAssist, raised #NPF and
++       * returned 0 in GuestIntrBytes field of the VMCB.
++       * First, errata can only be triggered in case vCPU CR4.SMAP=1.
++       * Second, if vCPU CR4.SMEP=1, errata could only be triggered
++       * in case vCPU CPL==3 (Because otherwise guest would have triggered
++       * a SMEP fault instead of #NPF).
++       * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU 
CPL.
++       * As most guests enable SMAP if they have also enabled SMEP, use above
++       * logic in order to attempt minimize false-positive of detecting errata
++       * while still preserving all cases semantic correctness.
++       *
++       * Workaround:
++       * To determine what instruction the guest was executing, the hypervisor
++       * will have to decode the instruction at the instruction pointer.
+        *
+        * In non SEV guest, hypervisor will be able to read the guest
+        * memory to decode the instruction pointer when insn_len is zero
+@@ -7133,11 +7161,11 @@ static bool svm_need_emulation_on_page_fault(struct 
kvm_vcpu *vcpu)
+        * instruction pointer so we will not able to workaround it. Lets
+        * print the error and request to kill the guest.
+        */
+-      if (is_user && smap) {
++      if (smap && (!smep || is_user)) {
+               if (!sev_guest(vcpu->kvm))
+                       return true;
+ 
+-              pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
++              pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 
1096\n");
+               kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+       }
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index b96723294b2f..74ac35bbf1ef 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4411,6 +4411,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+       int len;
+       gva_t gva = 0;
+       struct vmcs12 *vmcs12;
++      struct x86_exception e;
+ 
+       if (!nested_vmx_check_permission(vcpu))
+               return 1;
+@@ -4451,7 +4452,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+                               vmx_instruction_info, true, len, &gva))
+                       return 1;
+               /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+-              kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
++              if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, 
&e))
++                      kvm_inject_page_fault(vcpu, &e);
+       }
+ 
+       return nested_vmx_succeed(vcpu);
+@@ -4706,13 +4708,11 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 
+       switch (type) {
+       case VMX_EPT_EXTENT_GLOBAL:
++      case VMX_EPT_EXTENT_CONTEXT:
+       /*
+-       * TODO: track mappings and invalidate
+-       * single context requests appropriately
++       * TODO: Sync the necessary shadow EPT roots here, rather than
++       * at the next emulated VM-entry.
+        */
+-      case VMX_EPT_EXTENT_CONTEXT:
+-              kvm_mmu_sync_roots(vcpu);
+-              kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               break;
+       default:
+               BUG_ON(1);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1f80fd560ede..4000bcff47b0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5265,6 +5265,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 
gva_t addr, void *val,
+       /* kvm_write_guest_virt_system can pull in tons of pages. */
+       vcpu->arch.l1tf_flush_l1d = true;
+ 
++      /*
++       * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
++       * is returned, but our callers are not ready for that and they blindly
++       * call kvm_inject_page_fault.  Ensure that they at least do not leak
++       * uninitialized kernel stack memory into cr2 and error code.
++       */
++      memset(exception, 0, sizeof(*exception));
+       return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+                                          PFERR_WRITE_MASK, exception);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 8901a1f89cf5..10fb42da0007 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -18,37 +18,40 @@ targets += purgatory.ro
+ KASAN_SANITIZE        := n
+ KCOV_INSTRUMENT := n
+ 
++# These are adjustments to the compiler flags used for objects that
++# make up the standalone purgatory.ro
++
++PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
++PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
++
+ # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+ # in turn leaves some undefined symbols like __fentry__ in purgatory and not
+ # sure how to relocate those.
+ ifdef CONFIG_FUNCTION_TRACER
+-CFLAGS_REMOVE_sha256.o                += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_purgatory.o     += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_string.o                += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_kexec-purgatory.o       += $(CC_FLAGS_FTRACE)
++PURGATORY_CFLAGS_REMOVE               += $(CC_FLAGS_FTRACE)
+ endif
+ 
+ ifdef CONFIG_STACKPROTECTOR
+-CFLAGS_REMOVE_sha256.o                += -fstack-protector
+-CFLAGS_REMOVE_purgatory.o     += -fstack-protector
+-CFLAGS_REMOVE_string.o                += -fstack-protector
+-CFLAGS_REMOVE_kexec-purgatory.o       += -fstack-protector
++PURGATORY_CFLAGS_REMOVE               += -fstack-protector
+ endif
+ 
+ ifdef CONFIG_STACKPROTECTOR_STRONG
+-CFLAGS_REMOVE_sha256.o                += -fstack-protector-strong
+-CFLAGS_REMOVE_purgatory.o     += -fstack-protector-strong
+-CFLAGS_REMOVE_string.o                += -fstack-protector-strong
+-CFLAGS_REMOVE_kexec-purgatory.o       += -fstack-protector-strong
++PURGATORY_CFLAGS_REMOVE               += -fstack-protector-strong
+ endif
+ 
+ ifdef CONFIG_RETPOLINE
+-CFLAGS_REMOVE_sha256.o                += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_purgatory.o     += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_string.o                += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_kexec-purgatory.o       += $(RETPOLINE_CFLAGS)
++PURGATORY_CFLAGS_REMOVE               += $(RETPOLINE_CFLAGS)
+ endif
+ 
++CFLAGS_REMOVE_purgatory.o     += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_purgatory.o            += $(PURGATORY_CFLAGS)
++
++CFLAGS_REMOVE_sha256.o                += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_sha256.o                       += $(PURGATORY_CFLAGS)
++
++CFLAGS_REMOVE_string.o                += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_string.o                       += $(PURGATORY_CFLAGS)
++
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+               $(call if_changed,ld)
+ 
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index eaf3aa0cb803..2dc0123cbba1 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1820,12 +1820,63 @@ static inline struct kobject *get_glue_dir(struct 
device *dev)
+  */
+ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ {
++      unsigned int ref;
++
+       /* see if we live in a "glue" directory */
+       if (!live_in_glue_dir(glue_dir, dev))
+               return;
+ 
+       mutex_lock(&gdp_mutex);
+-      if (!kobject_has_children(glue_dir))
++      /**
++       * There is a race condition between removing glue directory
++       * and adding a new device under the glue directory.
++       *
++       * CPU1:                                         CPU2:
++       *
++       * device_add()
++       *   get_device_parent()
++       *     class_dir_create_and_add()
++       *       kobject_add_internal()
++       *         create_dir()    // create glue_dir
++       *
++       *                                               device_add()
++       *                                                 get_device_parent()
++       *                                                   kobject_get() // 
get glue_dir
++       *
++       * device_del()
++       *   cleanup_glue_dir()
++       *     kobject_del(glue_dir)
++       *
++       *                                               kobject_add()
++       *                                                 
kobject_add_internal()
++       *                                                   create_dir() // in 
glue_dir
++       *                                                     
sysfs_create_dir_ns()
++       *                                                       
kernfs_create_dir_ns(sd)
++       *
++       *       sysfs_remove_dir() // glue_dir->sd=NULL
++       *       sysfs_put()        // free glue_dir->sd
++       *
++       *                                                         // sd is 
freed
++       *                                                         
kernfs_new_node(sd)
++       *                                                           
kernfs_get(glue_dir)
++       *                                                           
kernfs_add_one()
++       *                                                           
kernfs_put()
++       *
++       * Before CPU1 remove last child device under glue dir, if CPU2 add
++       * a new device under glue dir, the glue_dir kobject reference count
++       * will be increase to 2 in kobject_get(k). And CPU2 has been called
++       * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
++       * and sysfs_put(). This result in glue_dir->sd is freed.
++       *
++       * Then the CPU2 will see a stale "empty" but still potentially used
++       * glue dir around in kernfs_new_node().
++       *
++       * In order to avoid this happening, we also should make sure that
++       * kernfs_node for glue_dir is released in CPU1 only when refcount
++       * for glue_dir kobj is 1.
++       */
++      ref = kref_read(&glue_dir->kref);
++      if (!kobject_has_children(glue_dir) && !--ref)
+               kobject_del(glue_dir);
+       kobject_put(glue_dir);
+       mutex_unlock(&gdp_mutex);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6d61f5aafc78..7954a7924923 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1162,10 +1162,6 @@ static int btusb_open(struct hci_dev *hdev)
+       }
+ 
+       data->intf->needs_remote_wakeup = 1;
+-      /* device specific wakeup source enabled and required for USB
+-       * remote wakeup while host is suspended
+-       */
+-      device_wakeup_enable(&data->udev->dev);
+ 
+       if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
+               goto done;
+@@ -1229,7 +1225,6 @@ static int btusb_close(struct hci_dev *hdev)
+               goto failed;
+ 
+       data->intf->needs_remote_wakeup = 0;
+-      device_wakeup_disable(&data->udev->dev);
+       usb_autopm_put_interface(data->intf);
+ 
+ failed:
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 3a4961dc5831..77d1d3894f8d 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3020,15 +3020,49 @@ static int clk_flags_show(struct seq_file *s, void 
*data)
+ }
+ DEFINE_SHOW_ATTRIBUTE(clk_flags);
+ 
++static void possible_parent_show(struct seq_file *s, struct clk_core *core,
++                               unsigned int i, char terminator)
++{
++      struct clk_core *parent;
++
++      /*
++       * Go through the following options to fetch a parent's name.
++       *
++       * 1. Fetch the registered parent clock and use its name
++       * 2. Use the global (fallback) name if specified
++       * 3. Use the local fw_name if provided
++       * 4. Fetch parent clock's clock-output-name if DT index was set
++       *
++       * This may still fail in some cases, such as when the parent is
++       * specified directly via a struct clk_hw pointer, but it isn't
++       * registered (yet).
++       */
++      parent = clk_core_get_parent_by_index(core, i);
++      if (parent)
++              seq_printf(s, "%s", parent->name);
++      else if (core->parents[i].name)
++              seq_printf(s, "%s", core->parents[i].name);
++      else if (core->parents[i].fw_name)
++              seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
++      else if (core->parents[i].index >= 0)
++              seq_printf(s, "%s",
++                         of_clk_get_parent_name(core->of_node,
++                                                core->parents[i].index));
++      else
++              seq_puts(s, "(missing)");
++
++      seq_putc(s, terminator);
++}
++
+ static int possible_parents_show(struct seq_file *s, void *data)
+ {
+       struct clk_core *core = s->private;
+       int i;
+ 
+       for (i = 0; i < core->num_parents - 1; i++)
+-              seq_printf(s, "%s ", core->parents[i].name);
++              possible_parent_show(s, core, i, ' ');
+ 
+-      seq_printf(s, "%s\n", core->parents[i].name);
++      possible_parent_show(s, core, i, '\n');
+ 
+       return 0;
+ }
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c 
b/drivers/clk/rockchip/clk-mmc-phase.c
+index c61f4d3e52e2..2a841d38f8a7 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -52,10 +52,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
+       u32 delay_num = 0;
+ 
+       /* See the comment for rockchip_mmc_set_phase below */
+-      if (!rate) {
+-              pr_err("%s: invalid clk rate\n", __func__);
++      if (!rate)
+               return -EINVAL;
+-      }
+ 
+       raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ 
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 710e09e28227..f9d7d6aaf3db 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -994,11 +994,13 @@ static void talitos_sg_unmap(struct device *dev,
+ 
+ static void ipsec_esp_unmap(struct device *dev,
+                           struct talitos_edesc *edesc,
+-                          struct aead_request *areq)
++                          struct aead_request *areq, bool encrypt)
+ {
+       struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+       unsigned int ivsize = crypto_aead_ivsize(aead);
++      unsigned int authsize = crypto_aead_authsize(aead);
++      unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+       bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+       struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
+ 
+@@ -1007,7 +1009,7 @@ static void ipsec_esp_unmap(struct device *dev,
+                                        DMA_FROM_DEVICE);
+       unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
+ 
+-      talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
++      talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
+                        areq->assoclen);
+ 
+       if (edesc->dma_len)
+@@ -1018,7 +1020,7 @@ static void ipsec_esp_unmap(struct device *dev,
+               unsigned int dst_nents = edesc->dst_nents ? : 1;
+ 
+               sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
+-                                 areq->assoclen + areq->cryptlen - ivsize);
++                                 areq->assoclen + cryptlen - ivsize);
+       }
+ }
+ 
+@@ -1040,7 +1042,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
+ 
+       edesc = container_of(desc, struct talitos_edesc, desc);
+ 
+-      ipsec_esp_unmap(dev, edesc, areq);
++      ipsec_esp_unmap(dev, edesc, areq, true);
+ 
+       /* copy the generated ICV to dst */
+       if (edesc->icv_ool) {
+@@ -1074,7 +1076,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device 
*dev,
+ 
+       edesc = container_of(desc, struct talitos_edesc, desc);
+ 
+-      ipsec_esp_unmap(dev, edesc, req);
++      ipsec_esp_unmap(dev, edesc, req, false);
+ 
+       if (!err) {
+               char icvdata[SHA512_DIGEST_SIZE];
+@@ -1120,7 +1122,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device 
*dev,
+ 
+       edesc = container_of(desc, struct talitos_edesc, desc);
+ 
+-      ipsec_esp_unmap(dev, edesc, req);
++      ipsec_esp_unmap(dev, edesc, req, false);
+ 
+       /* check ICV auth status */
+       if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+@@ -1223,6 +1225,7 @@ static int talitos_sg_map(struct device *dev, struct 
scatterlist *src,
+  * fill in and submit ipsec_esp descriptor
+  */
+ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
++                   bool encrypt,
+                    void (*callback)(struct device *dev,
+                                     struct talitos_desc *desc,
+                                     void *context, int error))
+@@ -1232,7 +1235,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct 
aead_request *areq,
+       struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+       struct device *dev = ctx->dev;
+       struct talitos_desc *desc = &edesc->desc;
+-      unsigned int cryptlen = areq->cryptlen;
++      unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+       unsigned int ivsize = crypto_aead_ivsize(aead);
+       int tbl_off = 0;
+       int sg_count, ret;
+@@ -1359,7 +1362,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct 
aead_request *areq,
+ 
+       ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
+       if (ret != -EINPROGRESS) {
+-              ipsec_esp_unmap(dev, edesc, areq);
++              ipsec_esp_unmap(dev, edesc, areq, encrypt);
+               kfree(edesc);
+       }
+       return ret;
+@@ -1473,9 +1476,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct 
aead_request *areq, u8 *iv,
+       unsigned int authsize = crypto_aead_authsize(authenc);
+       struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+       unsigned int ivsize = crypto_aead_ivsize(authenc);
++      unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+ 
+       return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+-                                 iv, areq->assoclen, areq->cryptlen,
++                                 iv, areq->assoclen, cryptlen,
+                                  authsize, ivsize, icv_stashing,
+                                  areq->base.flags, encrypt);
+ }
+@@ -1494,7 +1498,7 @@ static int aead_encrypt(struct aead_request *req)
+       /* set encrypt */
+       edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+ 
+-      return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
++      return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
+ }
+ 
+ static int aead_decrypt(struct aead_request *req)
+@@ -1506,14 +1510,13 @@ static int aead_decrypt(struct aead_request *req)
+       struct talitos_edesc *edesc;
+       void *icvdata;
+ 
+-      req->cryptlen -= authsize;
+-
+       /* allocate extended descriptor */
+       edesc = aead_edesc_alloc(req, req->iv, 1, false);
+       if (IS_ERR(edesc))
+               return PTR_ERR(edesc);
+ 
+-      if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
++      if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
++          (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+           ((!edesc->src_nents && !edesc->dst_nents) ||
+            priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
+ 
+@@ -1524,7 +1527,8 @@ static int aead_decrypt(struct aead_request *req)
+ 
+               /* reset integrity check result bits */
+ 
+-              return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
++              return ipsec_esp(edesc, req, false,
++                               ipsec_esp_decrypt_hwauth_done);
+       }
+ 
+       /* Have to check the ICV with software */
+@@ -1540,7 +1544,7 @@ static int aead_decrypt(struct aead_request *req)
+       sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+                          req->assoclen + req->cryptlen - authsize);
+ 
+-      return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
++      return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
+ }
+ 
+ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+@@ -1591,6 +1595,18 @@ static int ablkcipher_des3_setkey(struct 
crypto_ablkcipher *cipher,
+       return ablkcipher_setkey(cipher, key, keylen);
+ }
+ 
++static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
++                                const u8 *key, unsigned int keylen)
++{
++      if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
++          keylen == AES_KEYSIZE_256)
++              return ablkcipher_setkey(cipher, key, keylen);
++
++      crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++
++      return -EINVAL;
++}
++
+ static void common_nonsnoop_unmap(struct device *dev,
+                                 struct talitos_edesc *edesc,
+                                 struct ablkcipher_request *areq)
+@@ -1713,6 +1729,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request 
*areq)
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct talitos_edesc *edesc;
++      unsigned int blocksize =
++                      crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
++
++      if (!areq->nbytes)
++              return 0;
++
++      if (areq->nbytes % blocksize)
++              return -EINVAL;
+ 
+       /* allocate extended descriptor */
+       edesc = ablkcipher_edesc_alloc(areq, true);
+@@ -1730,6 +1754,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request 
*areq)
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       struct talitos_edesc *edesc;
++      unsigned int blocksize =
++                      crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
++
++      if (!areq->nbytes)
++              return 0;
++
++      if (areq->nbytes % blocksize)
++              return -EINVAL;
+ 
+       /* allocate extended descriptor */
+       edesc = ablkcipher_edesc_alloc(areq, false);
+@@ -2752,6 +2784,7 @@ static struct talitos_alg_template driver_algs[] = {
+                               .min_keysize = AES_MIN_KEY_SIZE,
+                               .max_keysize = AES_MAX_KEY_SIZE,
+                               .ivsize = AES_BLOCK_SIZE,
++                              .setkey = ablkcipher_aes_setkey,
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+@@ -2768,6 +2801,7 @@ static struct talitos_alg_template driver_algs[] = {
+                               .min_keysize = AES_MIN_KEY_SIZE,
+                               .max_keysize = AES_MAX_KEY_SIZE,
+                               .ivsize = AES_BLOCK_SIZE,
++                              .setkey = ablkcipher_aes_setkey,
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+@@ -2778,13 +2812,13 @@ static struct talitos_alg_template driver_algs[] = {
+               .alg.crypto = {
+                       .cra_name = "ctr(aes)",
+                       .cra_driver_name = "ctr-aes-talitos",
+-                      .cra_blocksize = AES_BLOCK_SIZE,
++                      .cra_blocksize = 1,
+                       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                                    CRYPTO_ALG_ASYNC,
+                       .cra_ablkcipher = {
+                               .min_keysize = AES_MIN_KEY_SIZE,
+                               .max_keysize = AES_MAX_KEY_SIZE,
+-                              .ivsize = AES_BLOCK_SIZE,
++                              .setkey = ablkcipher_aes_setkey,
+                       }
+               },
+               .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
+@@ -2818,7 +2852,6 @@ static struct talitos_alg_template driver_algs[] = {
+                       .cra_ablkcipher = {
+                               .min_keysize = DES_KEY_SIZE,
+                               .max_keysize = DES_KEY_SIZE,
+-                              .ivsize = DES_BLOCK_SIZE,
+                               .setkey = ablkcipher_des_setkey,
+                       }
+               },
+@@ -2854,7 +2887,6 @@ static struct talitos_alg_template driver_algs[] = {
+                       .cra_ablkcipher = {
+                               .min_keysize = DES3_EDE_KEY_SIZE,
+                               .max_keysize = DES3_EDE_KEY_SIZE,
+-                              .ivsize = DES3_EDE_BLOCK_SIZE,
+                               .setkey = ablkcipher_des3_setkey,
+                       }
+               },
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index ef93406ace1b..36ce11a67235 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -466,9 +466,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info 
*info)
+       struct ti_sci_xfer *xfer;
+       int ret;
+ 
+-      /* No need to setup flags since it is expected to respond */
+       xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+-                                 0x0, sizeof(struct ti_sci_msg_hdr),
++                                 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
++                                 sizeof(struct ti_sci_msg_hdr),
+                                  sizeof(*rev_info));
+       if (IS_ERR(xfer)) {
+               ret = PTR_ERR(xfer);
+@@ -596,9 +596,9 @@ static int ti_sci_get_device_state(const struct 
ti_sci_handle *handle,
+       info = handle_to_ti_sci_info(handle);
+       dev = info->dev;
+ 
+-      /* Response is expected, so need of any flags */
+       xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+-                                 0, sizeof(*req), sizeof(*resp));
++                                 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
++                                 sizeof(*req), sizeof(*resp));
+       if (IS_ERR(xfer)) {
+               ret = PTR_ERR(xfer);
+               dev_err(dev, "Message alloc failed(%d)\n", ret);
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index b6a4efce7c92..be8590d386b1 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -309,6 +309,7 @@ static const struct file_operations 
gpio_mockup_debugfs_ops = {
+       .read = gpio_mockup_debugfs_read,
+       .write = gpio_mockup_debugfs_write,
+       .llseek = no_llseek,
++      .release = single_release,
+ };
+ 
+ static void gpio_mockup_debugfs_setup(struct device *dev,
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index c9fc9e232aaf..4d5c285c46f8 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -7,6 +7,7 @@
+  *          Mika Westerberg <mika.westerb...@linux.intel.com>
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/errno.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/gpio/driver.h>
+@@ -19,6 +20,11 @@
+ 
+ #include "gpiolib.h"
+ 
++static int run_edge_events_on_boot = -1;
++module_param(run_edge_events_on_boot, int, 0444);
++MODULE_PARM_DESC(run_edge_events_on_boot,
++               "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
++
+ /**
+  * struct acpi_gpio_event - ACPI GPIO event handler data
+  *
+@@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct 
acpi_gpio_chip *acpi_gpio,
+       event->irq_requested = true;
+ 
+       /* Make sure we trigger the initial state of edge-triggered IRQs */
+-      value = gpiod_get_raw_value_cansleep(event->desc);
+-      if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+-          ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+-              event->handler(event->irq, event);
++      if (run_edge_events_on_boot &&
++          (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
++              value = gpiod_get_raw_value_cansleep(event->desc);
++              if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
++                  ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
++                      event->handler(event->irq, event);
++      }
+ }
+ 
+ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+@@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
+ }
+ /* We must use _sync so that this runs after the first deferred_probe run */
+ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
++
++static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
++      {
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
++              }
++      },
++      {} /* Terminating entry */
++};
++
++static int acpi_gpio_setup_params(void)
++{
++      if (run_edge_events_on_boot < 0) {
++              if (dmi_check_system(run_edge_events_on_boot_blacklist))
++                      run_edge_events_on_boot = 0;
++              else
++                      run_edge_events_on_boot = 1;
++      }
++
++      return 0;
++}
++
++/* Directly after dmi_setup() which runs as core_initcall() */
++postcore_initcall(acpi_gpio_setup_params);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index f272b5143997..e806cd9a14ba 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -535,6 +535,14 @@ static int linehandle_create(struct gpio_device *gdev, 
void __user *ip)
+       if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+               return -EINVAL;
+ 
++      /*
++       * Do not allow both INPUT & OUTPUT flags to be set as they are
++       * contradictory.
++       */
++      if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
++          (lflags & GPIOHANDLE_REQUEST_OUTPUT))
++              return -EINVAL;
++
+       /*
+        * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+        * the hardware actually supports enabling both at the same time the
+@@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void 
__user *ip)
+       }
+ 
+       /* This is just wrong: we don't look for events on output lines */
+-      if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
++      if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
++          (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
++          (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
+               ret = -EINVAL;
+               goto out_free_label;
+       }
+@@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, 
void __user *ip)
+ 
+       if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+-      if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
+-              set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+-      if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
+-              set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ 
+       ret = gpiod_direction_input(desc);
+       if (ret)
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c 
b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index d8a0bcd02f34..ffd95bfeaa94 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -90,6 +90,12 @@ static const struct drm_dmi_panel_orientation_data 
itworks_tw891 = {
+       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = 
{
++      .width = 720,
++      .height = 1280,
++      .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = 
{
+       .width = 800,
+       .height = 1280,
+@@ -123,6 +129,12 @@ static const struct dmi_system_id orientation_data[] = {
+                 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+               },
+               .driver_data = (void *)&gpd_micropc,
++      }, {    /* GPD MicroPC (later BIOS versions with proper DMI strings) */
++              .matches = {
++                DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++                DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
++              },
++              .driver_data = (void *)&lcd720x1280_rightside_up,
+       }, {    /*
+                * GPD Pocket, note that the the DMI data is less generic then
+                * it seems, devices with a board-vendor of "AMI Corporation"
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c 
b/drivers/gpu/drm/i915/intel_dp_mst.c
+index d89120dcac67..8e6a7b8dffca 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -125,7 +125,15 @@ static int intel_dp_mst_compute_config(struct 
intel_encoder *encoder,
+       limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+ 
+       limits.min_bpp = intel_dp_min_bpp(pipe_config);
+-      limits.max_bpp = pipe_config->pipe_bpp;
++      /*
++       * FIXME: If all the streams can't fit into the link with
++       * their current pipe_bpp we should reduce pipe_bpp across
++       * the board until things start to fit. Until then we
++       * limit to <= 8bpc since that's what was hardcoded for all
++       * MST streams previously. This hack should be removed once
++       * we have the proper retry logic in place.
++       */
++      limits.max_bpp = min(pipe_config->pipe_bpp, 24);
+ 
+       intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c 
b/drivers/gpu/drm/i915/intel_workarounds.c
+index edd57a5e0495..b50a7c3f22bf 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -294,11 +294,6 @@ static void gen9_ctx_workarounds_init(struct 
intel_engine_cs *engine)
+                         FLOW_CONTROL_ENABLE |
+                         PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+ 
+-      /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+-      if (!IS_COFFEELAKE(i915))
+-              WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+-                                GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+-
+       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
+       WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 477c0f766663..b609dc030d6c 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 
op, s64 timeout_ns)
+       timeout = drm_timeout_abs_to_jiffies(timeout_ns);
+ 
+       ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+-      if (ret == 0)
++      if (ret == -ETIME)
+               ret = timeout ? -ETIMEDOUT : -EBUSY;
+ 
+       return ret;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c 
b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index c021d4c8324f..7f5408cb2377 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -567,12 +567,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
+                       comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+                       if (!comp) {
+                               ret = -ENOMEM;
++                              of_node_put(node);
+                               goto err_node;
+                       }
+ 
+                       ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+-                      if (ret)
++                      if (ret) {
++                              of_node_put(node);
+                               goto err_node;
++                      }
+ 
+                       private->ddp_comp[comp_id] = comp;
+               }
+diff --git a/drivers/gpu/drm/meson/meson_plane.c 
b/drivers/gpu/drm/meson/meson_plane.c
+index d90427b93a51..2cccbcf5b53c 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -153,6 +153,13 @@ static void meson_plane_atomic_update(struct drm_plane 
*plane,
+               priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+                                             OSD_COLOR_MATRIX_32_ARGB;
+               break;
++      case DRM_FORMAT_XBGR8888:
++              /* For XRGB, replace the pixel's alpha by 0xFF */
++              writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
++                                  priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
++              priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
++                                            OSD_COLOR_MATRIX_32_ABGR;
++              break;
+       case DRM_FORMAT_ARGB8888:
+               /* For ARGB, use the pixel's alpha */
+               writel_bits_relaxed(OSD_REPLACE_EN, 0,
+@@ -160,6 +167,13 @@ static void meson_plane_atomic_update(struct drm_plane 
*plane,
+               priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+                                             OSD_COLOR_MATRIX_32_ARGB;
+               break;
++      case DRM_FORMAT_ABGR8888:
++              /* For ARGB, use the pixel's alpha */
++              writel_bits_relaxed(OSD_REPLACE_EN, 0,
++                                  priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
++              priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
++                                            OSD_COLOR_MATRIX_32_ABGR;
++              break;
+       case DRM_FORMAT_RGB888:
+               priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+                                             OSD_COLOR_MATRIX_24_RGB;
+@@ -346,7 +360,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
+ 
+ static const uint32_t supported_drm_formats[] = {
+       DRM_FORMAT_ARGB8888,
++      DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB8888,
++      DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_RGB565,
+ };
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c 
b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 588907cc3b6b..6b90a40882f2 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -39,9 +39,16 @@
+ #define DFSDM_MAX_INT_OVERSAMPLING 256
+ #define DFSDM_MAX_FL_OVERSAMPLING 1024
+ 
+-/* Max sample resolutions */
+-#define DFSDM_MAX_RES BIT(31)
+-#define DFSDM_DATA_RES BIT(23)
++/* Limit filter output resolution to 31 bits. (i.e. sample range is +/-2^30) 
*/
++#define DFSDM_DATA_MAX BIT(30)
++/*
++ * Data are output as two's complement data in a 24 bit field.
++ * Data from filters are in the range +/-2^(n-1)
++ * 2^(n-1) maximum positive value cannot be coded in 2's complement n bits
++ * An extra bit is required to avoid wrap-around of the binary code for 
2^(n-1)
++ * So, the resolution of samples from filter is actually limited to 23 bits
++ */
++#define DFSDM_DATA_RES 24
+ 
+ /* Filter configuration */
+ #define DFSDM_CR1_CFG_MASK (DFSDM_CR1_RCH_MASK | DFSDM_CR1_RCONT_MASK | \
+@@ -181,14 +188,15 @@ static int stm32_dfsdm_get_jextsel(struct iio_dev 
*indio_dev,
+       return -EINVAL;
+ }
+ 
+-static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+-                              unsigned int fast, unsigned int oversamp)
++static int stm32_dfsdm_compute_osrs(struct stm32_dfsdm_filter *fl,
++                                  unsigned int fast, unsigned int oversamp)
+ {
+       unsigned int i, d, fosr, iosr;
+-      u64 res;
+-      s64 delta;
++      u64 res, max;
++      int bits, shift;
+       unsigned int m = 1;     /* multiplication factor */
+       unsigned int p = fl->ford;      /* filter order (ford) */
++      struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+ 
+       pr_debug("%s: Requested oversampling: %d\n",  __func__, oversamp);
+       /*
+@@ -207,11 +215,9 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter 
*fl,
+ 
+       /*
+        * Look for filter and integrator oversampling ratios which allows
+-       * to reach 24 bits data output resolution.
+-       * Leave as soon as if exact resolution if reached.
+-       * Otherwise the higher resolution below 32 bits is kept.
++       * to maximize data output resolution.
+        */
+-      fl->res = 0;
++      flo->res = 0;
+       for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+               for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+                       if (fast)
+@@ -236,32 +242,68 @@ static int stm32_dfsdm_set_osrs(struct 
stm32_dfsdm_filter *fl,
+                       res = fosr;
+                       for (i = p - 1; i > 0; i--) {
+                               res = res * (u64)fosr;
+-                              if (res > DFSDM_MAX_RES)
++                              if (res > DFSDM_DATA_MAX)
+                                       break;
+                       }
+-                      if (res > DFSDM_MAX_RES)
++                      if (res > DFSDM_DATA_MAX)
+                               continue;
++
+                       res = res * (u64)m * (u64)iosr;
+-                      if (res > DFSDM_MAX_RES)
++                      if (res > DFSDM_DATA_MAX)
+                               continue;
+ 
+-                      delta = res - DFSDM_DATA_RES;
+-
+-                      if (res >= fl->res) {
+-                              fl->res = res;
+-                              fl->fosr = fosr;
+-                              fl->iosr = iosr;
+-                              fl->fast = fast;
+-                              pr_debug("%s: fosr = %d, iosr = %d\n",
+-                                       __func__, fl->fosr, fl->iosr);
++                      if (res >= flo->res) {
++                              flo->res = res;
++                              flo->fosr = fosr;
++                              flo->iosr = iosr;
++
++                              bits = fls(flo->res);
++                              /* 8 LBSs in data register contain chan info */
++                              max = flo->res << 8;
++
++                              /* if resolution is not a power of two */
++                              if (flo->res > BIT(bits - 1))
++                                      bits++;
++                              else
++                                      max--;
++
++                              shift = DFSDM_DATA_RES - bits;
++                              /*
++                               * Compute right/left shift
++                               * Right shift is performed by hardware
++                               * when transferring samples to data register.
++                               * Left shift is done by software on buffer
++                               */
++                              if (shift > 0) {
++                                      /* Resolution is lower than 24 bits */
++                                      flo->rshift = 0;
++                                      flo->lshift = shift;
++                              } else {
++                                      /*
++                                       * If resolution is 24 bits or more,
++                                       * max positive value may be ambiguous
++                                       * (equal to max negative value as sign
++                                       * bit is dropped).
++                                       * Reduce resolution to 23 bits (rshift)
++                                       * to keep the sign on bit 23 and treat
++                                       * saturation before rescaling on 24
++                                       * bits (lshift).
++                                       */
++                                      flo->rshift = 1 - shift;
++                                      flo->lshift = 1;
++                                      max >>= flo->rshift;
++                              }
++                              flo->max = (s32)max;
++
++                              pr_debug("%s: fast %d, fosr %d, iosr %d, res 
0x%llx/%d bits, rshift %d, lshift %d\n",
++                                       __func__, fast, flo->fosr, flo->iosr,
++                                       flo->res, bits, flo->rshift,
++                                       flo->lshift);
+                       }
+-
+-                      if (!delta)
+-                              return 0;
+               }
+       }
+ 
+-      if (!fl->res)
++      if (!flo->res)
+               return -EINVAL;
+ 
+       return 0;
+@@ -384,6 +426,36 @@ static int stm32_dfsdm_filter_set_trig(struct 
stm32_dfsdm_adc *adc,
+       return 0;
+ }
+ 
++static int stm32_dfsdm_channels_configure(struct stm32_dfsdm_adc *adc,
++                                        unsigned int fl_id,
++                                        struct iio_trigger *trig)
++{
++      struct iio_dev *indio_dev = iio_priv_to_dev(adc);
++      struct regmap *regmap = adc->dfsdm->regmap;
++      struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
++      struct stm32_dfsdm_filter_osr *flo = &fl->flo;
++      const struct iio_chan_spec *chan;
++      unsigned int bit;
++      int ret;
++
++      if (!flo->res)
++              return -EINVAL;
++
++      for_each_set_bit(bit, &adc->smask,
++                       sizeof(adc->smask) * BITS_PER_BYTE) {
++              chan = indio_dev->channels + bit;
++
++              ret = regmap_update_bits(regmap,
++                                       DFSDM_CHCFGR2(chan->channel),
++                                       DFSDM_CHCFGR2_DTRBS_MASK,
++                                       DFSDM_CHCFGR2_DTRBS(flo->rshift));
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
+ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
+                                       unsigned int fl_id,
+                                       struct iio_trigger *trig)
+@@ -391,6 +463,7 @@ static int stm32_dfsdm_filter_configure(struct 
stm32_dfsdm_adc *adc,
+       struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+       struct regmap *regmap = adc->dfsdm->regmap;
+       struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
++      struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+       u32 cr1;
+       const struct iio_chan_spec *chan;
+       unsigned int bit, jchg = 0;
+@@ -398,13 +471,13 @@ static int stm32_dfsdm_filter_configure(struct 
stm32_dfsdm_adc *adc,
+ 
+       /* Average integrator oversampling */
+       ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+-                               DFSDM_FCR_IOSR(fl->iosr - 1));
++                               DFSDM_FCR_IOSR(flo->iosr - 1));
+       if (ret)
+               return ret;
+ 
+       /* Filter order and Oversampling */
+       ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+-                               DFSDM_FCR_FOSR(fl->fosr - 1));
++                               DFSDM_FCR_FOSR(flo->fosr - 1));
+       if (ret)
+               return ret;
+ 
+@@ -573,7 +646,7 @@ static int dfsdm_adc_set_samp_freq(struct iio_dev 
*indio_dev,
+                       "Rate not accurate. requested (%u), actual (%u)\n",
+                       sample_freq, spi_freq / oversamp);
+ 
+-      ret = stm32_dfsdm_set_osrs(fl, 0, oversamp);
++      ret = stm32_dfsdm_compute_osrs(fl, 0, oversamp);
+       if (ret < 0) {
+               dev_err(&indio_dev->dev, "No filter parameters that match!\n");
+               return ret;
+@@ -623,6 +696,10 @@ static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc 
*adc,
+       struct regmap *regmap = adc->dfsdm->regmap;
+       int ret;
+ 
++      ret = stm32_dfsdm_channels_configure(adc, adc->fl_id, trig);
++      if (ret < 0)
++              return ret;
++
+       ret = stm32_dfsdm_start_channel(adc);
+       if (ret < 0)
+               return ret;
+@@ -729,6 +806,8 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
+ {
+       struct iio_dev *indio_dev = data;
+       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
++      struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
++      struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+       int available = stm32_dfsdm_adc_dma_residue(adc);
+       size_t old_pos;
+ 
+@@ -751,10 +830,19 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
+       old_pos = adc->bufi;
+ 
+       while (available >= indio_dev->scan_bytes) {
+-              u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
++              s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
+ 
+               /* Mask 8 LSB that contains the channel ID */
+-              *buffer = (*buffer & 0xFFFFFF00) << 8;
++              *buffer &= 0xFFFFFF00;
++              /* Convert 2^(n-1) sample to 2^(n-1)-1 to avoid wrap-around */
++              if (*buffer > flo->max)
++                      *buffer -= 1;
++              /*
++               * Samples from filter are retrieved with 23 bits resolution
++               * or less. Shift left to align MSB on 24 bits.
++               */
++              *buffer <<= flo->lshift;
++
+               available -= indio_dev->scan_bytes;
+               adc->bufi += indio_dev->scan_bytes;
+               if (adc->bufi >= adc->buf_sz) {
+@@ -1078,7 +1166,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev 
*indio_dev,
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+-              ret = stm32_dfsdm_set_osrs(fl, 0, val);
++              ret = stm32_dfsdm_compute_osrs(fl, 0, val);
+               if (!ret)
+                       adc->oversamp = val;
+               iio_device_release_direct_mode(indio_dev);
+@@ -1277,11 +1365,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct 
iio_dev *indio_dev,
+                                       BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ 
+       if (adc->dev_data->type == DFSDM_AUDIO) {
+-              ch->scan_type.sign = 's';
+               ch->ext_info = dfsdm_adc_audio_ext_info;
+       } else {
+-              ch->scan_type.sign = 'u';
++              ch->scan_type.shift = 8;
+       }
++      ch->scan_type.sign = 's';
+       ch->scan_type.realbits = 24;
+       ch->scan_type.storagebits = 32;
+ 
+@@ -1327,8 +1415,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev 
*indio_dev)
+       int ret, chan_idx;
+ 
+       adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+-      ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+-                                 adc->oversamp);
++      ret = stm32_dfsdm_compute_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
++                                     adc->oversamp);
+       if (ret < 0)
+               return ret;
+ 
+diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
+index 8708394b0725..18b06ee6ed7b 100644
+--- a/drivers/iio/adc/stm32-dfsdm.h
++++ b/drivers/iio/adc/stm32-dfsdm.h
+@@ -243,19 +243,33 @@ enum stm32_dfsdm_sinc_order {
+ };
+ 
+ /**
+- * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
++ * struct stm32_dfsdm_filter_osr - DFSDM filter settings linked to 
oversampling
+  * @iosr: integrator oversampling
+  * @fosr: filter oversampling
+- * @ford: filter order
++ * @rshift: output sample right shift (hardware shift)
++ * @lshift: output sample left shift (software shift)
+  * @res: output sample resolution
++ * @max: output sample maximum positive value
++ */
++struct stm32_dfsdm_filter_osr {
++      unsigned int iosr;
++      unsigned int fosr;
++      unsigned int rshift;
++      unsigned int lshift;
++      u64 res;
++      s32 max;
++};
++
++/**
++ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
++ * @ford: filter order
++ * @flo: filter oversampling structure
+  * @sync_mode: filter synchronized with filter 0
+  * @fast: filter fast mode
+  */
+ struct stm32_dfsdm_filter {
+-      unsigned int iosr;
+-      unsigned int fosr;
+       enum stm32_dfsdm_sinc_order ford;
+-      u64 res;
++      struct stm32_dfsdm_filter_osr flo;
+       unsigned int sync_mode;
+       unsigned int fast;
+ };
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index 3c3ad42f22bf..c92b405b7646 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, 
size_t count, loff_t *ppos
+       if (!cdev->ap.applid)
+               return -ENODEV;
+ 
++      if (count < CAPIMSG_BASELEN)
++              return -EINVAL;
++
+       skb = alloc_skb(count, GFP_USER);
+       if (!skb)
+               return -ENOMEM;
+@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, 
size_t count, loff_t *ppos
+       }
+       mlen = CAPIMSG_LEN(skb->data);
+       if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
+-              if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
++              if (count < CAPI_DATA_B3_REQ_LEN ||
++                  (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
+@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, 
size_t count, loff_t *ppos
+       CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
+ 
+       if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
++              if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
++                      kfree_skb(skb);
++                      return -EINVAL;
++              }
+               mutex_lock(&cdev->lock);
+               capincci_free(cdev, CAPIMSG_NCCI(skb->data));
+               mutex_unlock(&cdev->lock);
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 7e0d3a49c06d..bb31e13648d6 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host 
*host)
+       struct dma_chan *terminate_chan = NULL;
+       struct mmc_request *mrq;
+ 
+-      cancel_delayed_work_sync(&host->timeout_work);
++      cancel_delayed_work(&host->timeout_work);
+ 
+       mrq = host->mrq;
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c 
b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 9dc4548271b4..19944b0049db 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+                                       mmc_hostname(host->mmc));
+                               host->flags &= ~SDHCI_SIGNALING_330;
+                               host->flags |= SDHCI_SIGNALING_180;
+-                              host->quirks2 |= 
SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
+                               host->mmc->caps2 |= MMC_CAP2_NO_SD;
+                               host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+                               pci_write_config_dword(chip->pdev,
+@@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
+ const struct sdhci_pci_fixes sdhci_o2 = {
+       .probe = sdhci_pci_o2_probe,
+       .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++      .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
+       .probe_slot = sdhci_pci_o2_probe_slot,
+ #ifdef CONFIG_PM_SLEEP
+       .resume = sdhci_pci_o2_resume,
+diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
+index c5ba13fae399..2f0b092d6dcc 100644
+--- a/drivers/mmc/host/tmio_mmc.h
++++ b/drivers/mmc/host/tmio_mmc.h
+@@ -163,6 +163,7 @@ struct tmio_mmc_host {
+       unsigned long           last_req_ts;
+       struct mutex            ios_lock;       /* protect set_ios() context */
+       bool                    native_hotplug;
++      bool                    runtime_synced;
+       bool                    sdio_irq_enabled;
+ 
+       /* Mandatory callback */
+diff --git a/drivers/mmc/host/tmio_mmc_core.c 
b/drivers/mmc/host/tmio_mmc_core.c
+index 84cb7d2aacdf..29ec78486e69 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -1258,20 +1258,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
+       /* See if we also get DMA */
+       tmio_mmc_request_dma(_host, pdata);
+ 
+-      pm_runtime_set_active(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
++      pm_runtime_get_sync(&pdev->dev);
+ 
+       ret = mmc_add_host(mmc);
+       if (ret)
+               goto remove_host;
+ 
+       dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
++      pm_runtime_put(&pdev->dev);
+ 
+       return 0;
+ 
+ remove_host:
++      pm_runtime_put_noidle(&pdev->dev);
+       tmio_mmc_host_remove(_host);
+       return ret;
+ }
+@@ -1282,12 +1284,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+       struct platform_device *pdev = host->pdev;
+       struct mmc_host *mmc = host->mmc;
+ 
++      pm_runtime_get_sync(&pdev->dev);
++
+       if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
+               sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+ 
+-      if (!host->native_hotplug)
+-              pm_runtime_get_sync(&pdev->dev);
+-
+       dev_pm_qos_hide_latency_limit(&pdev->dev);
+ 
+       mmc_remove_host(mmc);
+@@ -1296,6 +1297,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+       tmio_mmc_release_dma(host);
+ 
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
++      if (host->native_hotplug)
++              pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+ }
+@@ -1340,6 +1343,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
+ {
+       struct tmio_mmc_host *host = dev_get_drvdata(dev);
+ 
++      if (!host->runtime_synced) {
++              host->runtime_synced = true;
++              return 0;
++      }
++
+       tmio_mmc_clk_enable(host);
+       tmio_mmc_hw_reset(host->mmc);
+ 
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index 23fe19397315..d6a1354f4f62 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -853,19 +853,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, 
int page)
+       return mtk_nfc_write_page_raw(chip, NULL, 1, page);
+ }
+ 
+-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 
sectors)
++static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
++                                  u32 sectors)
+ {
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_ecc_stats stats;
++      u32 reg_size = mtk_nand->fdm.reg_size;
+       int rc, i;
+ 
+       rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+       if (rc) {
+               memset(buf, 0xff, sectors * chip->ecc.size);
+               for (i = 0; i < sectors; i++)
+-                      memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
++                      memset(oob_ptr(chip, start + i), 0xff, reg_size);
+               return 0;
+       }
+ 
+@@ -885,7 +887,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, 
struct nand_chip *chip,
+       u32 spare = mtk_nand->spare_per_sector;
+       u32 column, sectors, start, end, reg;
+       dma_addr_t addr;
+-      int bitflips;
++      int bitflips = 0;
+       size_t len;
+       u8 *buf;
+       int rc;
+@@ -952,14 +954,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, 
struct nand_chip *chip,
+       if (rc < 0) {
+               dev_err(nfc->dev, "subpage done timeout\n");
+               bitflips = -EIO;
+-      } else {
+-              bitflips = 0;
+-              if (!raw) {
+-                      rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+-                      bitflips = rc < 0 ? -ETIMEDOUT :
+-                              mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+-                      mtk_nfc_read_fdm(chip, start, sectors);
+-              }
++      } else if (!raw) {
++              rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
++              bitflips = rc < 0 ? -ETIMEDOUT :
++                      mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
++              mtk_nfc_read_fdm(chip, start, sectors);
+       }
+ 
+       dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index f7c049559c1a..f9f473ae4abe 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -36,6 +36,7 @@
+ #include <net/vxlan.h>
+ #include <net/mpls.h>
+ #include <net/xdp_sock.h>
++#include <net/xfrm.h>
+ 
+ #include "ixgbe.h"
+ #include "ixgbe_common.h"
+@@ -2621,7 +2622,7 @@ adjust_by_size:
+               /* 16K ints/sec to 9.2K ints/sec */
+               avg_wire_size *= 15;
+               avg_wire_size += 11452;
+-      } else if (avg_wire_size <= 1980) {
++      } else if (avg_wire_size < 1968) {
+               /* 9.2K ints/sec to 8K ints/sec */
+               avg_wire_size *= 5;
+               avg_wire_size += 22420;
+@@ -2654,6 +2655,8 @@ adjust_by_size:
+       case IXGBE_LINK_SPEED_2_5GB_FULL:
+       case IXGBE_LINK_SPEED_1GB_FULL:
+       case IXGBE_LINK_SPEED_10_FULL:
++              if (avg_wire_size > 8064)
++                      avg_wire_size = 8064;
+               itr += DIV_ROUND_UP(avg_wire_size,
+                                   IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
+                      IXGBE_ITR_ADAPTIVE_MIN_INC;
+@@ -8691,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ #endif /* IXGBE_FCOE */
+ 
+ #ifdef CONFIG_IXGBE_IPSEC
+-      if (secpath_exists(skb) &&
++      if (xfrm_offload(skb) &&
+           !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+               goto out_drop;
+ #endif
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index bfe95ce0bd7f..1f5fe115bd99 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -679,19 +679,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring 
*tx_ring,
+ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+                           struct ixgbe_ring *tx_ring, int napi_budget)
+ {
++      u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+       unsigned int total_packets = 0, total_bytes = 0;
+-      u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+-      unsigned int budget = q_vector->tx.work_limit;
+       struct xdp_umem *umem = tx_ring->xsk_umem;
+       union ixgbe_adv_tx_desc *tx_desc;
+       struct ixgbe_tx_buffer *tx_bi;
+-      bool xmit_done;
++      u32 xsk_frames = 0;
+ 
+-      tx_bi = &tx_ring->tx_buffer_info[i];
+-      tx_desc = IXGBE_TX_DESC(tx_ring, i);
+-      i -= tx_ring->count;
++      tx_bi = &tx_ring->tx_buffer_info[ntc];
++      tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
+ 
+-      do {
++      while (ntc != ntu) {
+               if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+                       break;
+ 
+@@ -708,22 +706,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector 
*q_vector,
+ 
+               tx_bi++;
+               tx_desc++;
+-              i++;
+-              if (unlikely(!i)) {
+-                      i -= tx_ring->count;
++              ntc++;
++              if (unlikely(ntc == tx_ring->count)) {
++                      ntc = 0;
+                       tx_bi = tx_ring->tx_buffer_info;
+                       tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+               }
+ 
+               /* issue prefetch for next Tx descriptor */
+               prefetch(tx_desc);
++      }
+ 
+-              /* update budget accounting */
+-              budget--;
+-      } while (likely(budget));
+-
+-      i += tx_ring->count;
+-      tx_ring->next_to_clean = i;
++      tx_ring->next_to_clean = ntc;
+ 
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.bytes += total_bytes;
+@@ -735,8 +729,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector 
*q_vector,
+       if (xsk_frames)
+               xsk_umem_complete_tx(umem, xsk_frames);
+ 
+-      xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+-      return budget > 0 && xmit_done;
++      return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ }
+ 
+ int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 
b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index d189ed247665..ac6c18821958 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -30,6 +30,7 @@
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
+ #include <linux/atomic.h>
++#include <net/xfrm.h>
+ 
+ #include "ixgbevf.h"
+ 
+@@ -4158,7 +4159,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
+       first->protocol = vlan_get_protocol(skb);
+ 
+ #ifdef CONFIG_IXGBEVF_IPSEC
+-      if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
++      if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+               goto out_drop;
+ #endif
+       tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index c45ee6e3fe01..a094d7197015 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -356,8 +356,8 @@ static void phylink_get_fixed_state(struct phylink *pl, 
struct phylink_link_stat
+  *  Local device  Link partner
+  *  Pause AsymDir Pause AsymDir Result
+  *    1     X       1     X     TX+RX
+- *    0     1       1     1     RX
+- *    1     1       0     1     TX
++ *    0     1       1     1     TX
++ *    1     1       0     1     RX
+  */
+ static void phylink_resolve_flow(struct phylink *pl,
+                                struct phylink_link_state *state)
+@@ -378,7 +378,7 @@ static void phylink_resolve_flow(struct phylink *pl,
+                       new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+               else if (pause & MLO_PAUSE_ASYM)
+                       new_pause = state->pause & MLO_PAUSE_SYM ?
+-                               MLO_PAUSE_RX : MLO_PAUSE_TX;
++                               MLO_PAUSE_TX : MLO_PAUSE_RX;
+       } else {
+               new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
+       }
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 192ac47fd055..3f42cd433605 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -788,7 +788,8 @@ static void tun_detach_all(struct net_device *dev)
+ }
+ 
+ static int tun_attach(struct tun_struct *tun, struct file *file,
+-                    bool skip_filter, bool napi, bool napi_frags)
++                    bool skip_filter, bool napi, bool napi_frags,
++                    bool publish_tun)
+ {
+       struct tun_file *tfile = file->private_data;
+       struct net_device *dev = tun->dev;
+@@ -871,7 +872,8 @@ static int tun_attach(struct tun_struct *tun, struct file 
*file,
+        * initialized tfile; otherwise we risk using half-initialized
+        * object.
+        */
+-      rcu_assign_pointer(tfile->tun, tun);
++      if (publish_tun)
++              rcu_assign_pointer(tfile->tun, tun);
+       rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+       tun->numqueues++;
+       tun_set_real_num_queues(tun);
+@@ -2731,7 +2733,7 @@ static int tun_set_iff(struct net *net, struct file 
*file, struct ifreq *ifr)
+ 
+               err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
+                                ifr->ifr_flags & IFF_NAPI,
+-                               ifr->ifr_flags & IFF_NAPI_FRAGS);
++                               ifr->ifr_flags & IFF_NAPI_FRAGS, true);
+               if (err < 0)
+                       return err;
+ 
+@@ -2830,13 +2832,17 @@ static int tun_set_iff(struct net *net, struct file 
*file, struct ifreq *ifr)
+ 
+               INIT_LIST_HEAD(&tun->disabled);
+               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+-                               ifr->ifr_flags & IFF_NAPI_FRAGS);
++                               ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+               if (err < 0)
+                       goto err_free_flow;
+ 
+               err = register_netdevice(tun->dev);
+               if (err < 0)
+                       goto err_detach;
++              /* free_netdev() won't check refcnt, to aovid race
++               * with dev_put() we need publish tun after registration.
++               */
++              rcu_assign_pointer(tfile->tun, tun);
+       }
+ 
+       netif_carrier_on(tun->dev);
+@@ -2979,7 +2985,7 @@ static int tun_set_queue(struct file *file, struct ifreq 
*ifr)
+               if (ret < 0)
+                       goto unlock;
+               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+-                               tun->flags & IFF_NAPI_FRAGS);
++                               tun->flags & IFF_NAPI_FRAGS, true);
+       } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
+               tun = rtnl_dereference(tfile->tun);
+               if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 8458e88c18e9..32f53de5b1fe 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct 
usb_interface *intf)
+               goto bad_desc;
+       }
+ skip:
+-      if (rndis && header.usb_cdc_acm_descriptor &&
++      /* Communcation class functions with bmCapabilities are not
++       * RNDIS.  But some Wireless class RNDIS functions use
++       * bmCapabilities for their own purpose. The failsafe is
++       * therefore applied only to Communication class RNDIS
++       * functions.  The rndis test is redundant, but a cheap
++       * optimization.
++       */
++      if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
++          header.usb_cdc_acm_descriptor &&
+           header.usb_cdc_acm_descriptor->bmCapabilities) {
+               dev_dbg(&intf->dev,
+                       "ACM capabilities %02x, not really RNDIS?\n",
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c 
b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 80e6b211f60b..8d7a47d1b205 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -77,11 +77,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
+               goto out;
+       }
+ 
+-      mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
+-      if (mvif->omac_idx < 0) {
++      idx = get_omac_idx(vif->type, dev->omac_mask);
++      if (idx < 0) {
+               ret = -ENOSPC;
+               goto out;
+       }
++      mvif->omac_idx = idx;
+ 
+       /* TODO: DBDC support. Use band 0 and wmm 0 for now */
+       mvif->band_idx = 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 
b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index ea67c6022fe6..dc1301effa24 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -1270,7 +1270,6 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct 
ieee80211_vif *vif,
+       mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+                             0, NULL);
+       memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+-      dev_kfree_skb(skb);
+ 
+       req.omac_idx = mvif->omac_idx;
+       req.enable = en;
+@@ -1281,6 +1280,7 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct 
ieee80211_vif *vif,
+       req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+       req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
+ 
++      dev_kfree_skb(skb);
+       skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ 
+       return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c 
b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+index 40c0d536e20d..9d4426f6905f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
+               dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+       }
+ 
++      if (is_mt7630(dev)) {
++              dev->mt76.cap.has_5ghz = false;
++              dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
++      }
++
+       if (!mt76x02_field_valid(nic_conf1 & 0xff))
+               nic_conf1 &= 0xff00;
+ 
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c 
b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index 621cd4ce69e2..5673dd858811 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -4156,24 +4156,18 @@ static void rt2800_config_channel(struct rt2x00_dev 
*rt2x00dev,
+       switch (rt2x00dev->default_ant.rx_chain_num) {
+       case 3:
+               /* Turn on tertiary LNAs */
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
+-                                 rf->channel > 14);
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
+-                                 rf->channel <= 14);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
+               /* fall-through */
+       case 2:
+               /* Turn on secondary LNAs */
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
+-                                 rf->channel > 14);
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
+-                                 rf->channel <= 14);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+               /* fall-through */
+       case 1:
+               /* Turn on primary LNAs */
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
+-                                 rf->channel > 14);
+-              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
+-                                 rf->channel <= 14);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
++              rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+               break;
+       }
+ 
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c 
b/drivers/net/wireless/rsi/rsi_91x_usb.c
+index f5048d4b8cb6..760eaffeebd6 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
+@@ -645,7 +645,6 @@ fail_rx:
+       kfree(rsi_dev->tx_buffer);
+ 
+ fail_eps:
+-      kfree(rsi_dev);
+ 
+       return status;
+ }
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 74c3df250d9c..9c8d619d5979 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
+ #ifdef CONFIG_PCI_IOV
+ static inline bool pci_device_can_probe(struct pci_dev *pdev)
+ {
+-      return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
++      return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
++              pdev->driver_override);
+ }
+ #else
+ static inline bool pci_device_can_probe(struct pci_dev *pdev)
+diff --git a/drivers/platform/x86/pcengines-apuv2.c 
b/drivers/platform/x86/pcengines-apuv2.c
+index 7a8cbfb5d213..d35a73a24b3c 100644
+--- a/drivers/platform/x86/pcengines-apuv2.c
++++ b/drivers/platform/x86/pcengines-apuv2.c
+@@ -93,7 +93,7 @@ struct gpiod_lookup_table gpios_led_table = {
+ 
+ static struct gpio_keys_button apu2_keys_buttons[] = {
+       {
+-              .code                   = KEY_SETUP,
++              .code                   = KEY_RESTART,
+               .active_low             = 1,
+               .desc                   = "front button",
+               .type                   = EV_KEY,
+diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
+index be802fd2182d..551ed44dd361 100644
+--- a/drivers/platform/x86/pmc_atom.c
++++ b/drivers/platform/x86/pmc_atom.c
+@@ -412,6 +412,14 @@ static const struct dmi_system_id critclk_systems[] = {
+                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
+               },
+       },
++      {
++              /* pmc_plt_clk* - are used for ethernet controllers */
++              .ident = "Beckhoff CB4063",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
++                      DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
++              },
++      },
+       {
+               /* pmc_plt_clk* - are used for ethernet controllers */
+               .ident = "Beckhoff CB6263",
+diff --git a/drivers/regulator/twl-regulator.c 
b/drivers/regulator/twl-regulator.c
+index 6fa15b2d6fb3..866b4dd01da9 100644
+--- a/drivers/regulator/twl-regulator.c
++++ b/drivers/regulator/twl-regulator.c
+@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
+       2500, 2750,
+ };
+ 
++/* 600mV to 1450mV in 12.5 mV steps */
++static const struct regulator_linear_range VDD1_ranges[] = {
++      REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
++};
++
++/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
++static const struct regulator_linear_range VDD2_ranges[] = {
++      REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
++      REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
++};
++
+ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+ {
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev 
*rdev)
+ }
+ 
+ static const struct regulator_ops twl4030smps_ops = {
++      .list_voltage   = regulator_list_voltage_linear_range,
++
+       .set_voltage    = twl4030smps_set_voltage,
+       .get_voltage    = twl4030smps_get_voltage,
+ };
+@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
+               }, \
+       }
+ 
+-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) 
\
++#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, 
\
++              n_volt) \
+ static const struct twlreg_info TWL4030_INFO_##label = { \
+       .base = offset, \
+       .id = num, \
+@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
+               .owner = THIS_MODULE, \
+               .enable_time = turnon_delay, \
+               .of_map_mode = twl4030reg_map_mode, \
++              .n_voltages = n_volt, \
++              .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
++              .linear_ranges = label ## _ranges, \
+               }, \
+       }
+ 
+@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
+ TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
+ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
+-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
+-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
++TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
++TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
+ /* VUSBCP is managed *only* by the USB subchip */
+ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
+ TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6c8297bcfeb7..1bfd7e34f31e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct 
btrfs_trans_handle *trans,
+                                                     BTRFS_I(inode),
+                                                     LOG_OTHER_INODE_ALL,
+                                                     0, LLONG_MAX, ctx);
+-                                      iput(inode);
++                                      btrfs_add_delayed_iput(inode);
+                               }
+                       }
+                       continue;
+@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct 
btrfs_trans_handle *trans,
+               ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
+                                     LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
+               if (ret) {
+-                      iput(inode);
++                      btrfs_add_delayed_iput(inode);
+                       continue;
+               }
+ 
+@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct 
btrfs_trans_handle *trans,
+               key.offset = 0;
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               if (ret < 0) {
+-                      iput(inode);
++                      btrfs_add_delayed_iput(inode);
+                       continue;
+               }
+ 
+@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct 
btrfs_trans_handle *trans,
+                       }
+                       path->slots[0]++;
+               }
+-              iput(inode);
++              btrfs_add_delayed_iput(inode);
+       }
+ 
+       return ret;
+@@ -5689,7 +5689,7 @@ process_leaf:
+                       }
+ 
+                       if (btrfs_inode_in_log(BTRFS_I(di_inode), 
trans->transid)) {
+-                              iput(di_inode);
++                              btrfs_add_delayed_iput(di_inode);
+                               break;
+                       }
+ 
+@@ -5701,7 +5701,7 @@ process_leaf:
+                       if (!ret &&
+                           btrfs_must_commit_transaction(trans, 
BTRFS_I(di_inode)))
+                               ret = 1;
+-                      iput(di_inode);
++                      btrfs_add_delayed_iput(di_inode);
+                       if (ret)
+                               goto next_dir_inode;
+                       if (ctx->log_new_dentries) {
+@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct 
btrfs_trans_handle *trans,
+                       if (!ret && ctx && ctx->log_new_dentries)
+                               ret = log_new_dir_dentries(trans, root,
+                                                  BTRFS_I(dir_inode), ctx);
+-                      iput(dir_inode);
++                      btrfs_add_delayed_iput(dir_inode);
+                       if (ret)
+                               goto out;
+               }
+@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle 
*trans,
+                       ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
+                                             LOG_INODE_EXISTS,
+                                             0, LLONG_MAX, ctx);
+-              iput(inode);
++              btrfs_add_delayed_iput(inode);
+               if (ret)
+                       return ret;
+ 
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index f5a823cb0e43..e8e7b0e9532e 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -1158,8 +1158,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct 
ubifs_info *c,
+  *   o exact match, i.e. the found zero-level znode contains key @key, then %1
+  *     is returned and slot number of the matched branch is stored in @n;
+  *   o not exact match, which means that zero-level znode does not contain
+- *     @key, then %0 is returned and slot number of the closest branch is 
stored
+- *     in @n;
++ *     @key, then %0 is returned and slot number of the closest branch or %-1
++ *     is stored in @n; In this case calling tnc_next() is mandatory.
+  *   o @key is so small that it is even less than the lowest key of the
+  *     leftmost zero-level node, then %0 is returned and %0 is stored in @n.
+  *
+@@ -1882,13 +1882,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const 
union ubifs_key *key,
+ 
+ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
+                           struct ubifs_dent_node *dent, uint32_t cookie,
+-                          struct ubifs_znode **zn, int *n)
++                          struct ubifs_znode **zn, int *n, int exact)
+ {
+       int err;
+       struct ubifs_znode *znode = *zn;
+       struct ubifs_zbranch *zbr;
+       union ubifs_key *dkey;
+ 
++      if (!exact) {
++              err = tnc_next(c, &znode, n);
++              if (err)
++                      return err;
++      }
++
+       for (;;) {
+               zbr = &znode->zbranch[*n];
+               dkey = &zbr->key;
+@@ -1930,7 +1936,7 @@ static int do_lookup_dh(struct ubifs_info *c, const 
union ubifs_key *key,
+       if (unlikely(err < 0))
+               goto out_unlock;
+ 
+-      err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
++      err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
+ 
+ out_unlock:
+       mutex_unlock(&c->tnc_mutex);
+@@ -2723,7 +2729,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const 
union ubifs_key *key,
+               if (unlikely(err < 0))
+                       goto out_free;
+ 
+-              err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
++              err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
+               if (err)
+                       goto out_free;
+       }
+diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
+index 1e5d86ebdaeb..52bc8e487ef7 100644
+--- a/include/linux/phy_fixed.h
++++ b/include/linux/phy_fixed.h
+@@ -11,6 +11,7 @@ struct fixed_phy_status {
+ };
+ 
+ struct device_node;
++struct gpio_desc;
+ 
+ #if IS_ENABLED(CONFIG_FIXED_PHY)
+ extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 2bcef4c70183..4230b8532adb 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -1397,4 +1397,23 @@ static inline unsigned int ksys_personality(unsigned 
int personality)
+       return old;
+ }
+ 
++/* for __ARCH_WANT_SYS_IPC */
++long ksys_semtimedop(int semid, struct sembuf __user *tsops,
++                   unsigned int nsops,
++                   const struct __kernel_timespec __user *timeout);
++long ksys_semget(key_t key, int nsems, int semflg);
++long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
++long ksys_msgget(key_t key, int msgflg);
++long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
++long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
++               long msgtyp, int msgflg);
++long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
++               int msgflg);
++long ksys_shmget(key_t key, size_t size, int shmflg);
++long ksys_shmdt(char __user *shmaddr);
++long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
++long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
++                          unsigned int nsops,
++                          const struct old_timespec32 __user *timeout);
++
+ #endif
+diff --git a/include/uapi/asm-generic/unistd.h 
b/include/uapi/asm-generic/unistd.h
+index a87904daf103..ae31a7f87ec8 100644
+--- a/include/uapi/asm-generic/unistd.h
++++ b/include/uapi/asm-generic/unistd.h
+@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
+ __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+ #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
+ #define __NR_semtimedop 192
+-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
++__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
+ #endif
+ #define __NR_semop 193
+ __SYSCALL(__NR_semop, sys_semop)
+diff --git a/include/uapi/linux/isdn/capicmd.h 
b/include/uapi/linux/isdn/capicmd.h
+index 4941628a4fb9..5ec88e7548a9 100644
+--- a/include/uapi/linux/isdn/capicmd.h
++++ b/include/uapi/linux/isdn/capicmd.h
+@@ -16,6 +16,7 @@
+ #define CAPI_MSG_BASELEN              8
+ #define CAPI_DATA_B3_REQ_LEN          (CAPI_MSG_BASELEN+4+4+2+2+2)
+ #define CAPI_DATA_B3_RESP_LEN         (CAPI_MSG_BASELEN+4+2)
++#define CAPI_DISCONNECT_B3_RESP_LEN   (CAPI_MSG_BASELEN+4)
+ 
+ /*----- CAPI commands -----*/
+ #define CAPI_ALERT                0x01
+diff --git a/ipc/util.h b/ipc/util.h
+index 0fcf8e719b76..5766c61aed0e 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd)
+       *cmd &= ~IPC_64;
+       return version;
+ }
+-#endif
+ 
+-/* for __ARCH_WANT_SYS_IPC */
+-long ksys_semtimedop(int semid, struct sembuf __user *tsops,
+-                   unsigned int nsops,
+-                   const struct __kernel_timespec __user *timeout);
+-long ksys_semget(key_t key, int nsems, int semflg);
+-long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
+-long ksys_msgget(key_t key, int msgflg);
+-long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+-long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+-               long msgtyp, int msgflg);
+-long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+-               int msgflg);
+-long ksys_shmget(key_t key, size_t size, int shmflg);
+-long ksys_shmdt(char __user *shmaddr);
+-long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+-
+-/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
+-long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
+-                          unsigned int nsops,
+-                          const struct old_timespec32 __user *timeout);
+-#ifdef CONFIG_COMPAT
+ long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
+ long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
+ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
+@@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, 
compat_ssize_t msgsz,
+ long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
+                      compat_ssize_t msgsz, int msgflg);
+ long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
+-#endif /* CONFIG_COMPAT */
++
++#endif
+ 
+ #endif
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index bf9dbffd46b1..d2cba714d3ee 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -5213,8 +5213,16 @@ static struct cgroup *cgroup_create(struct cgroup 
*parent)
+        * if the parent has to be frozen, the child has too.
+        */
+       cgrp->freezer.e_freeze = parent->freezer.e_freeze;
+-      if (cgrp->freezer.e_freeze)
++      if (cgrp->freezer.e_freeze) {
++              /*
++               * Set the CGRP_FREEZE flag, so when a process will be
++               * attached to the child cgroup, it will become frozen.
++               * At this point the new cgroup is unpopulated, so we can
++               * consider it frozen immediately.
++               */
++              set_bit(CGRP_FREEZE, &cgrp->flags);
+               set_bit(CGRP_FROZEN, &cgrp->flags);
++      }
+ 
+       spin_lock_irq(&css_set_lock);
+       for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
+index 95414ad3506a..98c04ca5fa43 100644
+--- a/kernel/irq/resend.c
++++ b/kernel/irq/resend.c
+@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
+               irq = find_first_bit(irqs_resend, nr_irqs);
+               clear_bit(irq, irqs_resend);
+               desc = irq_to_desc(irq);
++              if (!desc)
++                      continue;
+               local_irq_disable();
+               desc->handle_irq(desc);
+               local_irq_enable();
+diff --git a/kernel/module.c b/kernel/module.c
+index 8431c3d47c97..dcf2cc656e7c 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -64,14 +64,9 @@
+ 
+ /*
+  * Modules' sections will be aligned on page boundaries
+- * to ensure complete separation of code and data, but
+- * only when CONFIG_STRICT_MODULE_RWX=y
++ * to ensure complete separation of code and data
+  */
+-#ifdef CONFIG_STRICT_MODULE_RWX
+ # define debug_align(X) ALIGN(X, PAGE_SIZE)
+-#else
+-# define debug_align(X) (X)
+-#endif
+ 
+ /* If this is set, the section belongs in the init part of the module */
+ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+@@ -1697,6 +1692,8 @@ static int add_usage_links(struct module *mod)
+       return ret;
+ }
+ 
++static void module_remove_modinfo_attrs(struct module *mod, int end);
++
+ static int module_add_modinfo_attrs(struct module *mod)
+ {
+       struct module_attribute *attr;
+@@ -1711,24 +1708,34 @@ static int module_add_modinfo_attrs(struct module *mod)
+               return -ENOMEM;
+ 
+       temp_attr = mod->modinfo_attrs;
+-      for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
++      for (i = 0; (attr = modinfo_attrs[i]); i++) {
+               if (!attr->test || attr->test(mod)) {
+                       memcpy(temp_attr, attr, sizeof(*temp_attr));
+                       sysfs_attr_init(&temp_attr->attr);
+                       error = sysfs_create_file(&mod->mkobj.kobj,
+                                       &temp_attr->attr);
++                      if (error)
++                              goto error_out;
+                       ++temp_attr;
+               }
+       }
++
++      return 0;
++
++error_out:
++      if (i > 0)
++              module_remove_modinfo_attrs(mod, --i);
+       return error;
+ }
+ 
+-static void module_remove_modinfo_attrs(struct module *mod)
++static void module_remove_modinfo_attrs(struct module *mod, int end)
+ {
+       struct module_attribute *attr;
+       int i;
+ 
+       for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
++              if (end >= 0 && i > end)
++                      break;
+               /* pick a field to test for end of list */
+               if (!attr->attr.name)
+                       break;
+@@ -1816,7 +1823,7 @@ static int mod_sysfs_setup(struct module *mod,
+       return 0;
+ 
+ out_unreg_modinfo_attrs:
+-      module_remove_modinfo_attrs(mod);
++      module_remove_modinfo_attrs(mod, -1);
+ out_unreg_param:
+       module_param_sysfs_remove(mod);
+ out_unreg_holders:
+@@ -1852,7 +1859,7 @@ static void mod_sysfs_fini(struct module *mod)
+ {
+ }
+ 
+-static void module_remove_modinfo_attrs(struct module *mod)
++static void module_remove_modinfo_attrs(struct module *mod, int end)
+ {
+ }
+ 
+@@ -1868,14 +1875,14 @@ static void init_param_lock(struct module *mod)
+ static void mod_sysfs_teardown(struct module *mod)
+ {
+       del_usage_links(mod);
+-      module_remove_modinfo_attrs(mod);
++      module_remove_modinfo_attrs(mod, -1);
+       module_param_sysfs_remove(mod);
+       kobject_put(mod->mkobj.drivers_dir);
+       kobject_put(mod->holders_dir);
+       mod_sysfs_fini(mod);
+ }
+ 
+-#ifdef CONFIG_STRICT_MODULE_RWX
++#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
+ /*
+  * LKM RO/NX protection: protect module's text/ro-data
+  * from modification and any data from execution.
+@@ -1898,6 +1905,7 @@ static void frob_text(const struct module_layout *layout,
+                  layout->text_size >> PAGE_SHIFT);
+ }
+ 
++#ifdef CONFIG_STRICT_MODULE_RWX
+ static void frob_rodata(const struct module_layout *layout,
+                       int (*set_memory)(unsigned long start, int num_pages))
+ {
+@@ -1949,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool 
after_init)
+       set_vm_flush_reset_perms(mod->core_layout.base);
+       set_vm_flush_reset_perms(mod->init_layout.base);
+       frob_text(&mod->core_layout, set_memory_ro);
+-      frob_text(&mod->core_layout, set_memory_x);
+ 
+       frob_rodata(&mod->core_layout, set_memory_ro);
+-
+       frob_text(&mod->init_layout, set_memory_ro);
+-      frob_text(&mod->init_layout, set_memory_x);
+-
+       frob_rodata(&mod->init_layout, set_memory_ro);
+ 
+       if (after_init)
+@@ -2014,9 +2018,19 @@ void set_all_modules_text_ro(void)
+       }
+       mutex_unlock(&module_mutex);
+ }
+-#else
++#else /* !CONFIG_STRICT_MODULE_RWX */
+ static void module_enable_nx(const struct module *mod) { }
+-#endif
++#endif /*  CONFIG_STRICT_MODULE_RWX */
++static void module_enable_x(const struct module *mod)
++{
++      frob_text(&mod->core_layout, set_memory_x);
++      frob_text(&mod->init_layout, set_memory_x);
++}
++#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
++static void module_enable_nx(const struct module *mod) { }
++static void module_enable_x(const struct module *mod) { }
++#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
++
+ 
+ #ifdef CONFIG_LIVEPATCH
+ /*
+@@ -3614,6 +3628,7 @@ static int complete_formation(struct module *mod, struct 
load_info *info)
+ 
+       module_enable_ro(mod, false);
+       module_enable_nx(mod);
++      module_enable_x(mod);
+ 
+       /* Mark state as coming so strong_try_module_get() ignores us,
+        * but kallsyms etc. can see us. */
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 46686d0e3df8..8374b18ebe9a 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -1408,6 +1408,7 @@ static bool z3fold_page_isolate(struct page *page, 
isolate_mode_t mode)
+                                * should freak out.
+                                */
+                               WARN(1, "Z3fold is experiencing kref 
problems\n");
++                              z3fold_page_unlock(zhdr);
+                               return false;
+                       }
+                       z3fold_page_unlock(zhdr);
+@@ -1439,16 +1440,11 @@ static int z3fold_page_migrate(struct address_space 
*mapping, struct page *newpa
+       zhdr = page_address(page);
+       pool = zhdr_to_pool(zhdr);
+ 
+-      if (!trylock_page(page))
+-              return -EAGAIN;
+-
+       if (!z3fold_page_trylock(zhdr)) {
+-              unlock_page(page);
+               return -EAGAIN;
+       }
+       if (zhdr->mapped_count != 0) {
+               z3fold_page_unlock(zhdr);
+-              unlock_page(page);
+               return -EBUSY;
+       }
+       if (work_pending(&zhdr->work)) {
+@@ -1494,7 +1490,6 @@ static int z3fold_page_migrate(struct address_space 
*mapping, struct page *newpa
+       spin_unlock(&pool->lock);
+ 
+       page_mapcount_reset(page);
+-      unlock_page(page);
+       put_page(page);
+       return 0;
+ }
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index bf6acd34234d..63f9c08625f0 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -437,7 +437,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+ 
+-      nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
++      nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
+       if (!nlh)
+               return -EMSGSIZE;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 29fcff2c3d51..2ff556906b5d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8768,6 +8768,8 @@ int register_netdevice(struct net_device *dev)
+       ret = notifier_to_errno(ret);
+       if (ret) {
+               rollback_registered(dev);
++              rcu_barrier();
++
+               dev->reg_state = NETREG_UNREGISTERED;
+       }
+       /*
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c8cd99c3603f..74efd63f15e2 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3531,6 +3531,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+       int pos;
+       int dummy;
+ 
++      if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
++          (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
++              /* gso_size is untrusted, and we have a frag_list with a linear
++               * non head_frag head.
++               *
++               * (we assume checking the first list_skb member suffices;
++               * i.e if either of the list_skb members have non head_frag
++               * head, then the first one has too).
++               *
++               * If head_skb's headlen does not fit requested gso_size, it
++               * means that the frag_list members do NOT terminate on exact
++               * gso_size boundaries. Hence we cannot perform skb_frag_t page
++               * sharing. Therefore we must fallback to copying the frag_list
++               * skbs; we do so by disabling SG.
++               */
++              if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
++                      features &= ~NETIF_F_SG;
++      }
++
+       __skb_push(head_skb, doffset);
+       proto = skb_network_protocol(head_skb, &dummy);
+       if (unlikely(!proto))
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 8a4a45e7c29d..3b14de0e36d2 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -661,6 +661,7 @@ static int sock_hash_update_common(struct bpf_map *map, 
void *key,
+                                  struct sock *sk, u64 flags)
+ {
+       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
++      struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 key_size = map->key_size, hash;
+       struct bpf_htab_elem *elem, *elem_new;
+       struct bpf_htab_bucket *bucket;
+@@ -671,6 +672,8 @@ static int sock_hash_update_common(struct bpf_map *map, 
void *key,
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       if (unlikely(flags > BPF_EXIST))
+               return -EINVAL;
++      if (unlikely(icsk->icsk_ulp_data))
++              return -EINVAL;
+ 
+       link = sk_psock_init_link();
+       if (!link)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d95ee40df6c2..21ed010d7551 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const 
struct sk_buff *skb)
+ 
+ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
+ {
+-      tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
++      tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+ }
+ 
+ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 87d2d8c1db7c..98ac32b49d8c 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net 
*net)
+       return 0;
+ }
+ 
+-static void __net_init ping_v6_proc_exit_net(struct net *net)
++static void __net_exit ping_v6_proc_exit_net(struct net *net)
+ {
+       remove_proc_entry("icmp6", net->proc_net);
+ }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 5f5a0a42ce60..6a6e403c71ac 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3841,13 +3841,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
+       struct fib6_config cfg = {
+               .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
+               .fc_ifindex = idev->dev->ifindex,
+-              .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
++              .fc_flags = RTF_UP | RTF_NONEXTHOP,
+               .fc_dst = *addr,
+               .fc_dst_len = 128,
+               .fc_protocol = RTPROT_KERNEL,
+               .fc_nlinfo.nl_net = net,
+               .fc_ignore_dev_down = true,
+       };
++      struct fib6_info *f6i;
+ 
+       if (anycast) {
+               cfg.fc_type = RTN_ANYCAST;
+@@ -3857,7 +3858,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
+               cfg.fc_flags |= RTF_LOCAL;
+       }
+ 
+-      return ip6_route_info_create(&cfg, gfp_flags, NULL);
++      f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
++      if (!IS_ERR(f6i))
++              f6i->dst_nocount = true;
++      return f6i;
+ }
+ 
+ /* remove deleted ip from prefsrc entries */
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 137db1cbde85..ac28f6a5d70e 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
+  * - updates to tree and tree walking are only done under the rtnl mutex.
+  */
+ 
++#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
++
+ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+ {
+       const struct netdev_queue *txq = q->dev_queue;
+@@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct 
Qdisc *q)
+                               q->q.qlen--;
+                       }
+               } else {
+-                      skb = NULL;
++                      skb = SKB_XOFF_MAGIC;
+               }
+       }
+ 
+@@ -253,8 +255,11 @@ validate:
+               return skb;
+ 
+       skb = qdisc_dequeue_skb_bad_txq(q);
+-      if (unlikely(skb))
++      if (unlikely(skb)) {
++              if (skb == SKB_XOFF_MAGIC)
++                      return NULL;
+               goto bulk;
++      }
+       skb = q->dequeue(q);
+       if (skb) {
+ bulk:
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index cee6971c1c82..23cd1c873a2c 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr 
*opt,
+               new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
+ 
+       non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
+-      if (non_hh_quantum > INT_MAX)
++      if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
+               return -EINVAL;
+ 
+       sch_tree_lock(sch);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 23af232c0a25..e2b4a440416b 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
+       return status;
+ }
+ 
+-static void __net_init sctp_ctrlsock_exit(struct net *net)
++static void __net_exit sctp_ctrlsock_exit(struct net *net)
+ {
+       /* Free the control endpoint.  */
+       inet_ctl_sock_destroy(net->sctp.ctl_sock);
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 1cf5bb5b73c4..e52b2128e43b 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct 
sctp_cmd_seq *commands,
+       if (net->sctp.pf_enable &&
+          (transport->state == SCTP_ACTIVE) &&
+          (transport->error_count < transport->pathmaxrxt) &&
+-         (transport->error_count > asoc->pf_retrans)) {
++         (transport->error_count > transport->pf_retrans)) {
+ 
+               sctp_assoc_control_transport(asoc, transport,
+                                            SCTP_TRANSPORT_PF,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index f33aa9ee9e27..d0324796f0b3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7176,7 +7176,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock 
*sk,
+               val.spt_pathmaxrxt = trans->pathmaxrxt;
+               val.spt_pathpfthld = trans->pf_retrans;
+ 
+-              return 0;
++              goto out;
+       }
+ 
+       asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+@@ -7194,6 +7194,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock 
*sk,
+               val.spt_pathmaxrxt = sp->pathmaxrxt;
+       }
+ 
++out:
+       if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+               return -EFAULT;
+ 
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index 61219f0b9677..836e629e8f4a 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct 
publication *publ, u32 addr)
+                      publ->key);
+       }
+ 
+-      kfree_rcu(p, rcu);
++      if (p)
++              kfree_rcu(p, rcu);
+ }
+ 
+ /**

Reply via email to