commit: ee77ed5cd54e726d06c811541727b80b2472cd96 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Thu May 14 11:34:11 2020 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Thu May 14 11:34:11 2020 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ee77ed5c
Linux patch 5.6.13 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1012_linux-5.6.13.patch | 3958 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3962 insertions(+) diff --git a/0000_README b/0000_README index f4994be..6a6ec25 100644 --- a/0000_README +++ b/0000_README @@ -91,6 +91,10 @@ Patch: 1011_linux-5.6.12.patch From: http://www.kernel.org Desc: Linux 5.6.12 +Patch: 1012_linux-5.6.13.patch +From: http://www.kernel.org +Desc: Linux 5.6.13 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1012_linux-5.6.13.patch b/1012_linux-5.6.13.patch new file mode 100644 index 0000000..cf736d2 --- /dev/null +++ b/1012_linux-5.6.13.patch @@ -0,0 +1,3958 @@ +diff --git a/Makefile b/Makefile +index 97e4c4d9ac95..d252219666fd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 6 +-SUBLEVEL = 12 ++SUBLEVEL = 13 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c +index 6fdb0ac62b3d..59da6c0b63b6 100644 +--- a/arch/arm/crypto/chacha-glue.c ++++ b/arch/arm/crypto/chacha-glue.c +@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, + return; + } + +- kernel_neon_begin(); +- chacha_doneon(state, dst, src, bytes, nrounds); +- kernel_neon_end(); ++ do { ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); ++ ++ kernel_neon_begin(); ++ chacha_doneon(state, dst, src, todo, nrounds); ++ kernel_neon_end(); ++ ++ bytes -= todo; ++ src += todo; ++ dst += todo; ++ } while (bytes); + } + EXPORT_SYMBOL(chacha_crypt_arch); + +diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c +index ae5aefc44a4d..ffa8d73fe722 100644 +--- a/arch/arm/crypto/nhpoly1305-neon-glue.c ++++ b/arch/arm/crypto/nhpoly1305-neon-glue.c +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); +diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c +index ceec04ec2f40..13cfef4ae22e 100644 +--- a/arch/arm/crypto/poly1305-glue.c ++++ b/arch/arm/crypto/poly1305-glue.c +@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, + unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); + + if (static_branch_likely(&have_neon) && do_neon) { +- kernel_neon_begin(); +- poly1305_blocks_neon(&dctx->h, src, len, 1); +- kernel_neon_end(); ++ do { ++ unsigned int todo = min_t(unsigned int, len, SZ_4K); ++ ++ kernel_neon_begin(); ++ poly1305_blocks_neon(&dctx->h, src, todo, 1); ++ kernel_neon_end(); ++ ++ len -= todo; ++ src += todo; ++ } while (len); + } else { + poly1305_blocks_arm(&dctx->h, src, len, 1); ++ src += len; + } +- src += len; + nbytes %= POLY1305_BLOCK_SIZE; + } + +diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c +index 37ca3e889848..af2bbca38e70 100644 +--- a/arch/arm64/crypto/chacha-neon-glue.c ++++ b/arch/arm64/crypto/chacha-neon-glue.c +@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, + !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + +- kernel_neon_begin(); +- chacha_doneon(state, dst, src, bytes, nrounds); +- kernel_neon_end(); ++ do { ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); ++ ++ kernel_neon_begin(); ++ chacha_doneon(state, dst, src, todo, nrounds); ++ kernel_neon_end(); ++ ++ bytes -= todo; ++ src += todo; ++ dst += todo; ++ } while (bytes); + } + EXPORT_SYMBOL(chacha_crypt_arch); + +diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c +index 895d3727c1fb..c5405e6a6db7 100644 +--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c ++++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c +@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_neon_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); +diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c +index e97b092f56b8..f33ada70c4ed 100644 +--- a/arch/arm64/crypto/poly1305-glue.c ++++ b/arch/arm64/crypto/poly1305-glue.c +@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, + unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); + + if (static_branch_likely(&have_neon) && crypto_simd_usable()) { +- kernel_neon_begin(); +- poly1305_blocks_neon(&dctx->h, src, len, 1); +- kernel_neon_end(); ++ do { ++ unsigned int todo = min_t(unsigned int, len, SZ_4K); ++ ++ kernel_neon_begin(); ++ poly1305_blocks_neon(&dctx->h, src, todo, 1); ++ kernel_neon_end(); ++ ++ len -= todo; ++ src += todo; ++ } while (len); + } else { + poly1305_blocks(&dctx->h, src, len, 1); ++ src += len; + } +- src += len; + nbytes %= POLY1305_BLOCK_SIZE; + } + +diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c +index 2bd92301d32f..6194cb3309d0 100644 +--- a/arch/arm64/kvm/guest.c ++++ b/arch/arm64/kvm/guest.c +@@ -201,6 +201,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) + } + + memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); ++ ++ if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { ++ int i; ++ ++ for (i = 0; i < 16; i++) ++ *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i); ++ } + out: + return err; + } +diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c +index bbeb6a5a6ba6..0be3355e3499 100644 +--- a/arch/arm64/mm/hugetlbpage.c ++++ b/arch/arm64/mm/hugetlbpage.c +@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, + ptep = (pte_t *)pudp; + } else if (sz == (CONT_PTE_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); ++ if (!pmdp) ++ return NULL; + + WARN_ON(addr & (sz - 1)); + /* +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index fab855963c73..157924baa191 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -149,7 +149,8 @@ void __init setup_bootmem(void) + memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); + + set_max_mapnr(PFN_DOWN(mem_size)); +- max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ max_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ max_low_pfn = max_pfn; + + #ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c +index ed52ffa8d5d4..560310e29e27 100644 +--- a/arch/s390/kvm/priv.c ++++ b/arch/s390/kvm/priv.c +@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu) + * available for the guest are AQIC and TAPQ with the t bit set + * since we do not set IC.3 (FIII) we currently will only intercept + * the AQIC function code. ++ * Note: running nested under z/VM can result in intercepts for other ++ * function codes, e.g. PQAP(QCI). We do not support this and bail out. + */ + reg0 = vcpu->run->s.regs.gprs[0]; + fc = (reg0 >> 24) & 0xff; +- if (WARN_ON_ONCE(fc != 0x03)) ++ if (fc != 0x03) + return -EOPNOTSUPP; + + /* PQAP instruction is allowed for guest kernel only */ +diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c +index 06ef2d4a4701..6737bcea1fa1 100644 +--- a/arch/x86/crypto/blake2s-glue.c ++++ b/arch/x86/crypto/blake2s-glue.c +@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2s_state *state, + const u32 inc) + { + /* SIMD disables preemption, so relax after processing each page. */ +- BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); ++ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); + + if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { + blake2s_compress_generic(state, block, nblocks, inc); + return; + } + +- for (;;) { ++ do { + const size_t blocks = min_t(size_t, nblocks, +- PAGE_SIZE / BLAKE2S_BLOCK_SIZE); ++ SZ_4K / BLAKE2S_BLOCK_SIZE); + + kernel_fpu_begin(); + if (IS_ENABLED(CONFIG_AS_AVX512) && +@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2s_state *state, + kernel_fpu_end(); + + nblocks -= blocks; +- if (!nblocks) +- break; + block += blocks * BLAKE2S_BLOCK_SIZE; +- } ++ } while (nblocks); + } + EXPORT_SYMBOL(blake2s_compress_arch); + +diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c +index 68a74953efaf..ebf2cd7ff2f0 100644 +--- a/arch/x86/crypto/chacha_glue.c ++++ b/arch/x86/crypto/chacha_glue.c +@@ -154,9 +154,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, + bytes <= CHACHA_BLOCK_SIZE) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + +- kernel_fpu_begin(); +- chacha_dosimd(state, dst, src, bytes, nrounds); +- kernel_fpu_end(); ++ do { ++ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); ++ ++ kernel_fpu_begin(); ++ chacha_dosimd(state, dst, src, todo, nrounds); ++ kernel_fpu_end(); ++ ++ bytes -= todo; ++ src += todo; ++ dst += todo; ++ } while (bytes); + } + EXPORT_SYMBOL(chacha_crypt_arch); + +diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c +index f7567cbd35b6..80fcb85736e1 100644 +--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c ++++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c +@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); +diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c +index a661ede3b5cf..cc6b7c1a2705 100644 +--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c ++++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c +@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc, + return crypto_nhpoly1305_update(desc, src, srclen); + + do { +- unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); ++ unsigned int n = min_t(unsigned int, srclen, SZ_4K); + + kernel_fpu_begin(); + crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); +diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c +index 79bb58737d52..61b2bc8b6986 100644 +--- a/arch/x86/crypto/poly1305_glue.c ++++ b/arch/x86/crypto/poly1305_glue.c +@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, + struct poly1305_arch_internal *state = ctx; + + /* SIMD disables preemption, so relax after processing each page. */ +- BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || +- PAGE_SIZE % POLY1305_BLOCK_SIZE); ++ BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE || ++ SZ_4K % POLY1305_BLOCK_SIZE); + + if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || + (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || +@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, + return; + } + +- for (;;) { +- const size_t bytes = min_t(size_t, len, PAGE_SIZE); ++ do { ++ const size_t bytes = min_t(size_t, len, SZ_4K); + + kernel_fpu_begin(); + if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) +@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, + else + poly1305_blocks_avx(ctx, inp, bytes, padbit); + kernel_fpu_end(); ++ + len -= bytes; +- if (!len) +- break; + inp += bytes; +- } ++ } while (len); + } + + static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], +diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h +index 0789e13ece90..1c7f13bb6728 100644 +--- a/arch/x86/entry/calling.h ++++ b/arch/x86/entry/calling.h +@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with + #define SIZEOF_PTREGS 21*8 + + .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 +- /* +- * Push registers and sanitize registers of values that a +- * speculation attack might otherwise want to exploit. The +- * lower registers are likely clobbered well before they +- * could be put to use in a speculative execution gadget. +- * Interleave XOR with PUSH for better uop scheduling: +- */ + .if \save_ret + pushq %rsi /* pt_regs->si */ + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ +@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with + pushq %rsi /* pt_regs->si */ + .endif + pushq \rdx /* pt_regs->dx */ +- xorl %edx, %edx /* nospec dx */ + pushq %rcx /* pt_regs->cx */ +- xorl %ecx, %ecx /* nospec cx */ + pushq \rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ +- xorl %r8d, %r8d /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ +- xorl %r9d, %r9d /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ +- xorl %r10d, %r10d /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ +- xorl %r11d, %r11d /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ +- xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ +- xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ +- xorl %r12d, %r12d /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ +- xorl %r13d, %r13d /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ +- xorl %r14d, %r14d /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ +- xorl %r15d, %r15d /* nospec r15*/ + UNWIND_HINT_REGS ++ + .if \save_ret + pushq %rsi /* return address on top of stack */ + .endif ++ ++ /* ++ * Sanitize registers of values that a speculation attack might ++ * otherwise want to exploit. The lower registers are likely clobbered ++ * well before they could be put to use in a speculative execution ++ * gadget. ++ */ ++ xorl %edx, %edx /* nospec dx */ ++ xorl %ecx, %ecx /* nospec cx */ ++ xorl %r8d, %r8d /* nospec r8 */ ++ xorl %r9d, %r9d /* nospec r9 */ ++ xorl %r10d, %r10d /* nospec r10 */ ++ xorl %r11d, %r11d /* nospec r11 */ ++ xorl %ebx, %ebx /* nospec rbx */ ++ xorl %ebp, %ebp /* nospec rbp */ ++ xorl %r12d, %r12d /* nospec r12 */ ++ xorl %r13d, %r13d /* nospec r13 */ ++ xorl %r14d, %r14d /* nospec r14 */ ++ xorl %r15d, %r15d /* nospec r15 */ ++ + .endm + + .macro POP_REGS pop_rdi=1 skip_r11rcx=0 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index f2bb91e87877..faa53fee0663 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -249,7 +249,6 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) + */ + syscall_return_via_sysret: + /* rcx and r11 are already restored (see code above) */ +- UNWIND_HINT_EMPTY + POP_REGS pop_rdi=0 skip_r11rcx=1 + + /* +@@ -258,6 +257,7 @@ syscall_return_via_sysret: + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp ++ UNWIND_HINT_EMPTY + + pushq RSP-RDI(%rdi) /* RSP */ + pushq (%rdi) /* RDI */ +@@ -279,8 +279,7 @@ SYM_CODE_END(entry_SYSCALL_64) + * %rdi: prev task + * %rsi: next task + */ +-SYM_CODE_START(__switch_to_asm) +- UNWIND_HINT_FUNC ++SYM_FUNC_START(__switch_to_asm) + /* + * Save callee-saved registers + * This must match the order in inactive_task_frame +@@ -321,7 +320,7 @@ SYM_CODE_START(__switch_to_asm) + popq %rbp + + jmp __switch_to +-SYM_CODE_END(__switch_to_asm) ++SYM_FUNC_END(__switch_to_asm) + + /* + * A newly forked process directly context switches into this address. +@@ -512,7 +511,7 @@ SYM_CODE_END(spurious_entries_start) + * +----------------------------------------------------+ + */ + SYM_CODE_START(interrupt_entry) +- UNWIND_HINT_FUNC ++ UNWIND_HINT_IRET_REGS offset=16 + ASM_CLAC + cld + +@@ -544,9 +543,9 @@ SYM_CODE_START(interrupt_entry) + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ ++ UNWIND_HINT_IRET_REGS + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ +- UNWIND_HINT_FUNC + + movq (%rdi), %rdi + jmp 2f +@@ -637,6 +636,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp ++ UNWIND_HINT_EMPTY + + /* Copy the IRET frame to the trampoline stack. */ + pushq 6*8(%rdi) /* SS */ +@@ -1739,7 +1739,7 @@ SYM_CODE_START(rewind_stack_do_exit) + + movq PER_CPU_VAR(cpu_current_top_of_stack), %rax + leaq -PTREGS_SIZE(%rax), %rsp +- UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE ++ UNWIND_HINT_REGS + + call do_exit + SYM_CODE_END(rewind_stack_do_exit) +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index d79b40cd8283..7ba99c0759cf 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1664,8 +1664,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, + static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) + { + /* We can only post Fixed and LowPrio IRQs */ +- return (irq->delivery_mode == dest_Fixed || +- irq->delivery_mode == dest_LowestPrio); ++ return (irq->delivery_mode == APIC_DM_FIXED || ++ irq->delivery_mode == APIC_DM_LOWEST); + } + + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h +index 499578f7e6d7..70fc159ebe69 100644 +--- a/arch/x86/include/asm/unwind.h ++++ b/arch/x86/include/asm/unwind.h +@@ -19,7 +19,7 @@ struct unwind_state { + #if defined(CONFIG_UNWINDER_ORC) + bool signal, full_regs; + unsigned long sp, bp, ip; +- struct pt_regs *regs; ++ struct pt_regs *regs, *prev_regs; + #elif defined(CONFIG_UNWINDER_FRAME_POINTER) + bool got_irq; + unsigned long *bp, *orig_sp, ip; +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index e9cc182aa97e..80537dcbddef 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip) + { + static struct orc_entry *orc; + +- if (!orc_init) +- return NULL; +- + if (ip == 0) + return &null_orc_entry; + +@@ -381,9 +378,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr + return true; + } + ++/* ++ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg ++ * value from state->regs. ++ * ++ * Otherwise, if state->regs just points to IRET regs, and the previous frame ++ * had full regs, it's safe to get the value from the previous regs. This can ++ * happen when early/late IRQ entry code gets interrupted by an NMI. ++ */ ++static bool get_reg(struct unwind_state *state, unsigned int reg_off, ++ unsigned long *val) ++{ ++ unsigned int reg = reg_off/8; ++ ++ if (!state->regs) ++ return false; ++ ++ if (state->full_regs) { ++ *val = ((unsigned long *)state->regs)[reg]; ++ return true; ++ } ++ ++ if (state->prev_regs) { ++ *val = ((unsigned long *)state->prev_regs)[reg]; ++ return true; ++ } ++ ++ return false; ++} ++ + bool unwind_next_frame(struct unwind_state *state) + { +- unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; ++ unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; + enum stack_type prev_type = state->stack_info.type; + struct orc_entry *orc; + bool indirect = false; +@@ -445,39 +471,35 @@ bool unwind_next_frame(struct unwind_state *state) + break; + + case ORC_REG_R10: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { + orc_warn("missing regs for base reg R10 at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->r10; + break; + + case ORC_REG_R13: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { + orc_warn("missing regs for base reg R13 at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->r13; + break; + + case ORC_REG_DI: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { + orc_warn("missing regs for base reg DI at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->di; + break; + + case ORC_REG_DX: +- if (!state->regs || !state->full_regs) { ++ if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { + orc_warn("missing regs for base reg DX at ip %pB\n", + (void *)state->ip); + goto err; + } +- sp = state->regs->dx; + break; + + default: +@@ -504,6 +526,7 @@ bool unwind_next_frame(struct unwind_state *state) + + state->sp = sp; + state->regs = NULL; ++ state->prev_regs = NULL; + state->signal = false; + break; + +@@ -515,6 +538,7 @@ bool unwind_next_frame(struct unwind_state *state) + } + + state->regs = (struct pt_regs *)sp; ++ state->prev_regs = NULL; + state->full_regs = true; + state->signal = true; + break; +@@ -526,6 +550,8 @@ bool unwind_next_frame(struct unwind_state *state) + goto err; + } + ++ if (state->full_regs) ++ state->prev_regs = state->regs; + state->regs = (void *)sp - IRET_FRAME_OFFSET; + state->full_regs = false; + state->signal = true; +@@ -534,14 +560,14 @@ bool unwind_next_frame(struct unwind_state *state) + default: + orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", + orc->type, (void *)orig_ip); +- break; ++ goto err; + } + + /* Find BP: */ + switch (orc->bp_reg) { + case ORC_REG_UNDEFINED: +- if (state->regs && state->full_regs) +- state->bp = state->regs->bp; ++ if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) ++ state->bp = tmp; + break; + + case ORC_REG_PREV_SP: +@@ -585,6 +611,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame); + void __unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs, unsigned long *first_frame) + { ++ if (!orc_init) ++ goto done; ++ + memset(state, 0, sizeof(*state)); + state->task = task; + +@@ -651,7 +680,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + /* Otherwise, skip ahead to the user-specified starting frame: */ + while (!unwind_done(state) && + (!on_stack(&state->stack_info, first_frame, sizeof(long)) || +- state->sp <= (unsigned long)first_frame)) ++ state->sp < (unsigned long)first_frame)) + unwind_next_frame(state); + + return; +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c +index 750ff0b29404..d057376bd3d3 100644 +--- a/arch/x86/kvm/ioapic.c ++++ b/arch/x86/kvm/ioapic.c +@@ -225,12 +225,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, + } + + /* +- * AMD SVM AVIC accelerate EOI write and do not trap, +- * in-kernel IOAPIC will not be able to receive the EOI. +- * In this case, we do lazy update of the pending EOI when +- * trying to set IOAPIC irq. ++ * AMD SVM AVIC accelerate EOI write iff the interrupt is edge ++ * triggered, in which case the in-kernel IOAPIC will not be able ++ * to receive the EOI. In this case, we do a lazy update of the ++ * pending EOI when trying to set IOAPIC irq. + */ +- if (kvm_apicv_activated(ioapic->kvm)) ++ if (edge && kvm_apicv_activated(ioapic->kvm)) + ioapic_lazy_update_eoi(ioapic, irq); + + /* +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 451377533bcb..c974c49221eb 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -1886,7 +1886,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + return NULL; + + /* Pin the user virtual address. */ +- npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); ++ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + if (npinned != npages) { + pr_err("SEV: Failure locking %lu pages.\n", npages); + goto err; +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S +index 861ae40e7144..99410f372c41 100644 +--- a/arch/x86/kvm/vmx/vmenter.S ++++ b/arch/x86/kvm/vmx/vmenter.S +@@ -86,6 +86,9 @@ SYM_FUNC_START(vmx_vmexit) + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE + ++ /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */ ++ or $1, %_ASM_AX ++ + pop %_ASM_AX + .Lvmexit_skip_rsb: + #endif +diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c +index c4aedd00c1ba..7ab317e3184e 100644 +--- a/arch/x86/mm/pat/set_memory.c ++++ b/arch/x86/mm/pat/set_memory.c +@@ -42,7 +42,8 @@ struct cpa_data { + unsigned long pfn; + unsigned int flags; + unsigned int force_split : 1, +- force_static_prot : 1; ++ force_static_prot : 1, ++ force_flush_all : 1; + struct page **pages; + }; + +@@ -352,10 +353,10 @@ static void cpa_flush(struct cpa_data *data, int cache) + return; + } + +- if (cpa->numpages <= tlb_single_page_flush_ceiling) +- on_each_cpu(__cpa_flush_tlb, cpa, 1); +- else ++ if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) + flush_tlb_all(); ++ else ++ on_each_cpu(__cpa_flush_tlb, cpa, 1); + + if (!cache) + return; +@@ -1595,6 +1596,8 @@ static int cpa_process_alias(struct cpa_data *cpa) + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; + ++ cpa->force_flush_all = 1; ++ + ret = __change_page_attr_set_clr(&alias_cpa, 0); + if (ret) + return ret; +@@ -1615,6 +1618,7 @@ static int cpa_process_alias(struct cpa_data *cpa) + alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; + ++ cpa->force_flush_all = 1; + /* + * The high mapping range is imprecise, so ignore the + * return value. +diff --git a/block/blk-iocost.c b/block/blk-iocost.c +index 2dc5dc54e257..d083f7704082 100644 +--- a/block/blk-iocost.c ++++ b/block/blk-iocost.c +@@ -469,7 +469,7 @@ struct ioc_gq { + */ + atomic64_t vtime; + atomic64_t done_vtime; +- atomic64_t abs_vdebt; ++ u64 abs_vdebt; + u64 last_vtime; + + /* +@@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) + struct iocg_wake_ctx ctx = { .iocg = iocg }; + u64 margin_ns = (u64)(ioc->period_us * + WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; +- u64 abs_vdebt, vdebt, vshortage, expires, oexpires; ++ u64 vdebt, vshortage, expires, oexpires; + s64 vbudget; + u32 hw_inuse; + +@@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) + vbudget = now->vnow - atomic64_read(&iocg->vtime); + + /* pay off debt */ +- abs_vdebt = atomic64_read(&iocg->abs_vdebt); +- vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse); ++ vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + if (vdebt && vbudget > 0) { + u64 delta = min_t(u64, vbudget, vdebt); + u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), +- abs_vdebt); ++ iocg->abs_vdebt); + + atomic64_add(delta, &iocg->vtime); + atomic64_add(delta, &iocg->done_vtime); +- atomic64_sub(abs_delta, &iocg->abs_vdebt); +- if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0)) +- atomic64_set(&iocg->abs_vdebt, 0); ++ iocg->abs_vdebt -= abs_delta; + } + + /* +@@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) + u64 expires, oexpires; + u32 hw_inuse; + ++ lockdep_assert_held(&iocg->waitq.lock); ++ + /* debt-adjust vtime */ + current_hweight(iocg, NULL, &hw_inuse); +- vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); ++ vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + +- /* clear or maintain depending on the overage */ +- if (time_before_eq64(vtime, now->vnow)) { ++ /* ++ * Clear or maintain depending on the overage. Non-zero vdebt is what ++ * guarantees that @iocg is online and future iocg_kick_delay() will ++ * clear use_delay. Don't leave it on when there's no vdebt. ++ */ ++ if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { + blkcg_clear_delay(blkg); + return false; + } +@@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) + { + struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); + struct ioc_now now; ++ unsigned long flags; + ++ spin_lock_irqsave(&iocg->waitq.lock, flags); + ioc_now(iocg->ioc, &now); + iocg_kick_delay(iocg, &now, 0); ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags); + + return HRTIMER_NORESTART; + } +@@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer) + * should have woken up in the last period and expire idle iocgs. + */ + list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { +- if (!waitqueue_active(&iocg->waitq) && +- !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) ++ if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt && ++ !iocg_is_idle(iocg)) + continue; + + spin_lock(&iocg->waitq.lock); + +- if (waitqueue_active(&iocg->waitq) || +- atomic64_read(&iocg->abs_vdebt)) { ++ if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { + /* might be oversleeping vtime / hweight changes, kick */ + iocg_kick_waitq(iocg, &now); + iocg_kick_delay(iocg, &now, 0); +@@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) + * tests are racy but the races aren't systemic - we only miss once + * in a while which is fine. + */ +- if (!waitqueue_active(&iocg->waitq) && +- !atomic64_read(&iocg->abs_vdebt) && ++ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && + time_before_eq64(vtime + cost, now.vnow)) { + iocg_commit_bio(iocg, bio, cost); + return; + } + + /* +- * We're over budget. If @bio has to be issued regardless, +- * remember the abs_cost instead of advancing vtime. +- * iocg_kick_waitq() will pay off the debt before waking more IOs. ++ * We activated above but w/o any synchronization. Deactivation is ++ * synchronized with waitq.lock and we won't get deactivated as long ++ * as we're waiting or has debt, so we're good if we're activated ++ * here. In the unlikely case that we aren't, just issue the IO. ++ */ ++ spin_lock_irq(&iocg->waitq.lock); ++ ++ if (unlikely(list_empty(&iocg->active_list))) { ++ spin_unlock_irq(&iocg->waitq.lock); ++ iocg_commit_bio(iocg, bio, cost); ++ return; ++ } ++ ++ /* ++ * We're over budget. If @bio has to be issued regardless, remember ++ * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay ++ * off the debt before waking more IOs. ++ * + * This way, the debt is continuously paid off each period with the +- * actual budget available to the cgroup. If we just wound vtime, +- * we would incorrectly use the current hw_inuse for the entire +- * amount which, for example, can lead to the cgroup staying +- * blocked for a long time even with substantially raised hw_inuse. ++ * actual budget available to the cgroup. If we just wound vtime, we ++ * would incorrectly use the current hw_inuse for the entire amount ++ * which, for example, can lead to the cgroup staying blocked for a ++ * long time even with substantially raised hw_inuse. ++ * ++ * An iocg with vdebt should stay online so that the timer can keep ++ * deducting its vdebt and [de]activate use_delay mechanism ++ * accordingly. We don't want to race against the timer trying to ++ * clear them and leave @iocg inactive w/ dangling use_delay heavily ++ * penalizing the cgroup and its descendants. + */ + if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { +- atomic64_add(abs_cost, &iocg->abs_vdebt); ++ iocg->abs_vdebt += abs_cost; + if (iocg_kick_delay(iocg, &now, cost)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); ++ spin_unlock_irq(&iocg->waitq.lock); + return; + } + +@@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) + * All waiters are on iocg->waitq and the wait states are + * synchronized using waitq.lock. + */ +- spin_lock_irq(&iocg->waitq.lock); +- +- /* +- * We activated above but w/o any synchronization. Deactivation is +- * synchronized with waitq.lock and we won't get deactivated as +- * long as we're waiting, so we're good if we're activated here. +- * In the unlikely case that we are deactivated, just issue the IO. +- */ +- if (unlikely(list_empty(&iocg->active_list))) { +- spin_unlock_irq(&iocg->waitq.lock); +- iocg_commit_bio(iocg, bio, cost); +- return; +- } +- + init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); + wait.wait.private = current; + wait.bio = bio; +@@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, + struct ioc_now now; + u32 hw_inuse; + u64 abs_cost, cost; ++ unsigned long flags; + + /* bypass if disabled or for root cgroup */ + if (!ioc->enabled || !iocg->level) +@@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, + iocg->cursor = bio_end; + + /* +- * Charge if there's enough vtime budget and the existing request +- * has cost assigned. Otherwise, account it as debt. See debt +- * handling in ioc_rqos_throttle() for details. ++ * Charge if there's enough vtime budget and the existing request has ++ * cost assigned. + */ + if (rq->bio && rq->bio->bi_iocost_cost && +- time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) ++ time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { + iocg_commit_bio(iocg, bio, cost); +- else +- atomic64_add(abs_cost, &iocg->abs_vdebt); ++ return; ++ } ++ ++ /* ++ * Otherwise, account it as debt if @iocg is online, which it should ++ * be for the vast majority of cases. See debt handling in ++ * ioc_rqos_throttle() for details. ++ */ ++ spin_lock_irqsave(&iocg->waitq.lock, flags); ++ if (likely(!list_empty(&iocg->active_list))) { ++ iocg->abs_vdebt += abs_cost; ++ iocg_kick_delay(iocg, &now, cost); ++ } else { ++ iocg_commit_bio(iocg, bio, cost); ++ } ++ spin_unlock_irqrestore(&iocg->waitq.lock, flags); + } + + static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) +@@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd) + iocg->ioc = ioc; + atomic64_set(&iocg->vtime, now.vnow); + atomic64_set(&iocg->done_vtime, now.vnow); +- atomic64_set(&iocg->abs_vdebt, 0); + atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); + INIT_LIST_HEAD(&iocg->active_list); + iocg->hweight_active = HWEIGHT_WHOLE; +diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c +index fe1523664816..8558b629880b 100644 +--- a/drivers/amba/bus.c ++++ b/drivers/amba/bus.c +@@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name) + dev->dev.release = amba_device_release; + dev->dev.bus = &amba_bustype; + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; ++ dev->dev.dma_parms = &dev->dma_parms; + dev->res.name = dev_name(&dev->dev); + } + +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index b5ce7b085795..c81b68d5d66d 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -361,6 +361,8 @@ struct platform_object { + */ + static void setup_pdev_dma_masks(struct platform_device *pdev) + { ++ pdev->dev.dma_parms = &pdev->dma_parms; ++ + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + if (!pdev->dev.dma_mask) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index f184cdca938d..5fcbacddb9b0 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3325,15 +3325,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) + } + } + +- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); +- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); +- +- amdgpu_amdkfd_suspend(adev); +- + amdgpu_ras_suspend(adev); + + r = amdgpu_device_ip_suspend_phase1(adev); + ++ amdgpu_amdkfd_suspend(adev); ++ + /* evict vram memory */ + amdgpu_bo_evict_vram(adev); + +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +index e310d67c399a..1b0bca9587d0 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +@@ -3034,25 +3034,32 @@ validate_out: + return out; + } + +- +-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, +- bool fast_validate) ++/* ++ * This must be noinline to ensure anything that deals with FP registers ++ * is contained within this call; previously our compiling with hard-float ++ * would result in fp instructions being emitted outside of the boundaries ++ * of the DC_FP_START/END macros, which makes sense as the compiler has no ++ * idea about what is wrapped and what is not ++ * ++ * This is largely just a workaround to avoid breakage introduced with 5.6, ++ * ideally all fp-using code should be moved into its own file, only that ++ * should be compiled with hard-float, and all code exported from there ++ * should be strictly wrapped with DC_FP_START/END ++ */ ++static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, ++ struct dc_state *context, bool fast_validate) + { + bool voltage_supported = false; + bool full_pstate_supported = false; + bool dummy_pstate_supported = false; + double p_state_latency_us; + +- DC_FP_START(); + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = + dc->debug.disable_dram_clock_change_vactive_support; + + if (fast_validate) { +- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); +- +- DC_FP_END(); +- return voltage_supported; ++ return dcn20_validate_bandwidth_internal(dc, context, true); + } + + // Best case, we support full UCLK switch latency +@@ -3081,7 +3088,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + + restore_dml_state: + context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; ++ return voltage_supported; ++} + ++bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, ++ bool fast_validate) ++{ ++ bool voltage_supported = false; ++ DC_FP_START(); ++ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate); + DC_FP_END(); + return voltage_supported; + } +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c +index 6d47ef7b148c..bcba2f024842 100644 +--- a/drivers/gpu/drm/ingenic/ingenic-drm.c ++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c +@@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = { + { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info }, + { /* sentinel */ }, + }; ++MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); + + static struct platform_driver ingenic_drm_driver = { + .driver = { +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index c7bc9db5b192..17a638f15082 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid) + struct usbhid_device *usbhid = hid->driver_data; + int res; + ++ mutex_lock(&usbhid->mutex); ++ + set_bit(HID_OPENED, &usbhid->iofl); + +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL) +- return 0; ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { ++ res = 0; ++ goto Done; ++ } + + res = usb_autopm_get_interface(usbhid->intf); + /* the device must be awake to reliably request remote wakeup */ + if (res < 0) { + clear_bit(HID_OPENED, &usbhid->iofl); +- return -EIO; ++ res = -EIO; ++ goto Done; + } + + usbhid->intf->needs_remote_wakeup = 1; +@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid) + msleep(50); + + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); ++ ++ Done: ++ mutex_unlock(&usbhid->mutex); + return res; + } + +@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid) + { + struct usbhid_device *usbhid = hid->driver_data; + ++ mutex_lock(&usbhid->mutex); ++ + /* + * Make sure we don't restart data acquisition due to + * a resumption we no longer care about by avoiding racing +@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid) + clear_bit(HID_IN_POLLING, &usbhid->iofl); + spin_unlock_irq(&usbhid->lock); + +- if (hid->quirks & HID_QUIRK_ALWAYS_POLL) +- return; ++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { ++ hid_cancel_delayed_stuff(usbhid); ++ usb_kill_urb(usbhid->urbin); ++ usbhid->intf->needs_remote_wakeup = 0; ++ } + +- hid_cancel_delayed_stuff(usbhid); +- usb_kill_urb(usbhid->urbin); +- usbhid->intf->needs_remote_wakeup = 0; ++ mutex_unlock(&usbhid->mutex); + } + + /* +@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid) + unsigned int n, insize = 0; + int ret; + ++ mutex_lock(&usbhid->mutex); ++ + clear_bit(HID_DISCONNECTED, &usbhid->iofl); + + usbhid->bufsize = HID_MIN_BUFFER_SIZE; +@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid) + usbhid_set_leds(hid); + device_set_wakeup_enable(&dev->dev, 1); + } ++ ++ mutex_unlock(&usbhid->mutex); + return 0; + + fail: +@@ -1187,6 +1202,7 @@ fail: + usbhid->urbout = NULL; + usbhid->urbctrl = NULL; + hid_free_buffers(dev, hid); ++ mutex_unlock(&usbhid->mutex); + return ret; + } + +@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid) + usbhid->intf->needs_remote_wakeup = 0; + } + ++ mutex_lock(&usbhid->mutex); ++ + clear_bit(HID_STARTED, &usbhid->iofl); + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ + set_bit(HID_DISCONNECTED, &usbhid->iofl); +@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid) + usbhid->urbout = NULL; + + hid_free_buffers(hid_to_usb_dev(hid), hid); ++ ++ mutex_unlock(&usbhid->mutex); + } + + static int usbhid_power(struct hid_device *hid, int lvl) +@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * + INIT_WORK(&usbhid->reset_work, hid_reset); + timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); + spin_lock_init(&usbhid->lock); ++ mutex_init(&usbhid->mutex); + + ret = hid_add_device(hid); + if (ret) { +diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h +index 8620408bd7af..75fe85d3d27a 100644 +--- a/drivers/hid/usbhid/usbhid.h ++++ b/drivers/hid/usbhid/usbhid.h +@@ -80,6 +80,7 @@ struct usbhid_device { + dma_addr_t outbuf_dma; /* Output buffer dma */ + unsigned long last_out; /* record of last output for timeouts */ + ++ struct mutex mutex; /* start/stop/open/close */ + spinlock_t lock; /* fifo spinlock */ + unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ + struct timer_list io_retry; /* Retry timer */ +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 5ded94b7bf68..cd71e7133944 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev, + data[0] = field->report->id; + ret = wacom_get_report(hdev, HID_FEATURE_REPORT, + data, n, WAC_CMD_RETRIES); +- if (ret == n) { ++ if (ret == n && features->type == HID_GENERIC) { + ret = hid_report_raw_event(hdev, + HID_FEATURE_REPORT, data, n, 0); ++ } else if (ret == 2 && features->type != HID_GENERIC) { ++ features->touch_max = data[1]; + } else { + features->touch_max = 16; + hid_warn(hdev, "wacom_feature_mapping: " +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index d99a9d407671..1c96809b51c9 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) + { + struct input_dev *pad_input = wacom->pad_input; + unsigned char *data = wacom->data; ++ int nbuttons = wacom->features.numbered_buttons; + +- int buttons = data[282] | ((data[281] & 0x40) << 2); ++ int expresskeys = data[282]; ++ int center = (data[281] & 0x40) >> 6; + int ring = data[285] & 0x7F; + bool ringstatus = data[285] & 0x80; +- bool prox = buttons || ringstatus; ++ bool prox = expresskeys || center || ringstatus; + + /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ + ring = 71 - ring; +@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) + if (ring > 71) + ring -= 72; + +- wacom_report_numbered_buttons(pad_input, 9, buttons); ++ wacom_report_numbered_buttons(pad_input, nbuttons, ++ expresskeys | (center << (nbuttons - 1))); + + input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); + +@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, + case HID_DG_TIPSWITCH: + hid_data->last_slot_field = equivalent_usage; + break; ++ case HID_DG_CONTACTCOUNT: ++ hid_data->cc_report = report->id; ++ hid_data->cc_index = i; ++ hid_data->cc_value_index = j; ++ break; + } + } + } ++ ++ if (hid_data->cc_report != 0 && ++ hid_data->cc_index >= 0) { ++ struct hid_field *field = report->field[hid_data->cc_index]; ++ int value = field->value[hid_data->cc_value_index]; ++ if (value) ++ hid_data->num_expected = value; ++ } ++ else { ++ hid_data->num_expected = wacom_wac->features.touch_max; ++ } + } + + static void wacom_wac_finger_report(struct hid_device *hdev, +@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct input_dev *input = wacom_wac->touch_input; + unsigned touch_max = wacom_wac->features.touch_max; +- struct hid_data *hid_data = &wacom_wac->hid_data; + + /* If more packets of data are expected, give us a chance to + * process them rather than immediately syncing a partial +@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev, + + input_sync(input); + wacom_wac->hid_data.num_received = 0; +- hid_data->num_expected = 0; + + /* keep touch state for pen event */ + wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); +@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev, + } + } + +-static void wacom_set_num_expected(struct hid_device *hdev, +- struct hid_report *report, +- int collection_index, +- struct hid_field *field, +- int field_index) +-{ +- struct wacom *wacom = hid_get_drvdata(hdev); +- struct wacom_wac *wacom_wac = &wacom->wacom_wac; +- struct hid_data *hid_data = &wacom_wac->hid_data; +- unsigned int original_collection_level = +- hdev->collection[collection_index].level; +- bool end_collection = false; +- int i; +- +- if (hid_data->num_expected) +- return; +- +- // find the contact count value for this segment +- for (i = field_index; i < report->maxfield && !end_collection; i++) { +- struct hid_field *field = report->field[i]; +- unsigned int field_level = +- hdev->collection[field->usage[0].collection_index].level; +- unsigned int j; +- +- if (field_level != original_collection_level) +- continue; +- +- for (j = 0; j < field->maxusage; j++) { +- struct hid_usage *usage = &field->usage[j]; +- +- if (usage->collection_index != collection_index) { +- end_collection = true; +- break; +- } +- if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) { +- hid_data->cc_report = report->id; +- hid_data->cc_index = i; +- hid_data->cc_value_index = j; +- +- if (hid_data->cc_report != 0 && +- hid_data->cc_index >= 0) { +- +- struct hid_field *field = +- report->field[hid_data->cc_index]; +- int value = +- field->value[hid_data->cc_value_index]; +- +- if (value) +- hid_data->num_expected = value; +- } +- } +- } +- } +- +- if (hid_data->cc_report == 0 || hid_data->cc_index < 0) +- hid_data->num_expected = wacom_wac->features.touch_max; +-} +- + static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, + int collection_index, struct hid_field *field, + int field_index) + { + struct wacom *wacom = hid_get_drvdata(hdev); + +- if (WACOM_FINGER_FIELD(field)) +- wacom_set_num_expected(hdev, report, collection_index, field, +- field_index); + wacom_report_events(hdev, report, collection_index, field_index); + + /* +diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c +index 5eed75cd121f..e5dcbe80cf85 100644 +--- a/drivers/iommu/virtio-iommu.c ++++ b/drivers/iommu/virtio-iommu.c +@@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, + if (!region) + return -ENOMEM; + +- list_add(&vdev->resv_regions, ®ion->list); ++ list_add(®ion->list, &vdev->resv_regions); + return 0; + } + +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c +index 668418d7ea77..f620442addf5 100644 +--- a/drivers/misc/mei/hw-me.c ++++ b/drivers/misc/mei/hw-me.c +@@ -1465,6 +1465,13 @@ static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_DMA_128, + }; + ++/* LBG with quirk for SPS Firmware exclusion */ ++static const struct mei_cfg mei_me_pch12_sps_cfg = { ++ MEI_CFG_PCH8_HFS, ++ MEI_CFG_FW_VER_SUPP, ++ MEI_CFG_FW_SPS, ++}; ++ + /* Tiger Lake and newer devices */ + static const struct mei_cfg mei_me_pch15_cfg = { + MEI_CFG_PCH8_HFS, +@@ -1487,6 +1494,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { + [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, + [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, ++ [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, + [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + }; + +diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h +index 4a8d4dcd5a91..b6b94e211464 100644 +--- a/drivers/misc/mei/hw-me.h ++++ b/drivers/misc/mei/hw-me.h +@@ -80,6 +80,9 @@ struct mei_me_hw { + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer ++ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer ++ * servers platforms with quirk for ++ * SPS firmware exclusion. + * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer + * @MEI_ME_NUM_CFG: Upper Sentinel. + */ +@@ -93,6 +96,7 @@ enum mei_cfg_idx { + MEI_ME_PCH8_CFG, + MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH12_CFG, ++ MEI_ME_PCH12_SPS_CFG, + MEI_ME_PCH15_CFG, + MEI_ME_NUM_CFG, + }; +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 2eb7b2968e5d..0dd2922aa06d 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -79,7 +79,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, +- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index d28b406a26b1..d0ddd08c4112 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -6662,7 +6662,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + int rc; + + if (!mem_size) +- return 0; ++ return -EINVAL; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { +@@ -9794,6 +9794,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, + netdev_features_t features) + { + struct bnxt *bp = netdev_priv(dev); ++ netdev_features_t vlan_features; + + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) + features &= ~NETIF_F_NTUPLE; +@@ -9810,12 +9811,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, + /* Both CTAG and STAG VLAN accelaration on the RX side have to be + * turned on or off together. + */ +- if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != +- (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { ++ vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_STAG_RX); ++ if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_STAG_RX)) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); +- else ++ else if (vlan_features) + features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; + } +@@ -12173,12 +12176,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) + bnxt_ulp_start(bp, err); + } + +- if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) +- dev_close(netdev); ++ if (result != PCI_ERS_RESULT_RECOVERED) { ++ if (netif_running(netdev)) ++ dev_close(netdev); ++ pci_disable_device(pdev); ++ } + + rtnl_unlock(); + +- return PCI_ERS_RESULT_RECOVERED; ++ return result; + } + + /** +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +index 63b170658532..ef0268649822 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +@@ -1064,7 +1064,6 @@ struct bnxt_vf_info { + #define BNXT_VF_LINK_FORCED 0x4 + #define BNXT_VF_LINK_UP 0x8 + #define BNXT_VF_TRUST 0x10 +- u32 func_flags; /* func cfg flags */ + u32 min_tx_rate; + u32 max_tx_rate; + void *hwrm_cmd_req_addr; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +index 95f893f2a74d..d5c8bd49383a 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) + #define BNXT_NVM_CFG_VER_BITS 24 + #define BNXT_NVM_CFG_VER_BYTES 4 + +-#define BNXT_MSIX_VEC_MAX 1280 ++#define BNXT_MSIX_VEC_MAX 512 + #define BNXT_MSIX_VEC_MIN_MAX 128 + + enum bnxt_nvm_dir_type { +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +index 2aba1e02a8f4..1259d135c9cc 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) + if (old_setting == setting) + return 0; + +- func_flags = vf->func_flags; + if (setting) +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + else +- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; ++ func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + /*TODO: if the driver supports VLAN filter on guest VLAN, + * the spoof check should also include vlan anti-spoofing + */ +@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) + req.flags = cpu_to_le32(func_flags); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { +- vf->func_flags = func_flags; + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else +@@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) + memcpy(vf->mac_addr, mac, ETH_ALEN); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +@@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.dflt_vlan = cpu_to_le16(vlan_tag); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +@@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req.max_bw = cpu_to_le32(max_tx_rate); + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); +@@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) + vf = &bp->pf.vf[vf_id]; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); +- req.flags = cpu_to_le32(vf->func_flags); + + if (is_valid_ether_addr(vf->mac_addr)) { + req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index b3a51935e8e0..f42382c2ecd0 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); +- if (status < 0) ++ if (status < 0) { ++ pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; ++ } + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) +@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); +- if (status < 0) ++ if (status < 0) { ++ pm_runtime_put_noidle(&bp->pdev->dev); + goto mdio_pm_exit; ++ } + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) +@@ -3803,8 +3807,10 @@ static int at91ether_open(struct net_device *dev) + int ret; + + ret = pm_runtime_get_sync(&lp->pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(&lp->pdev->dev); + return ret; ++ } + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); +@@ -4159,15 +4165,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, + + static int fu540_c000_init(struct platform_device *pdev) + { +- struct resource *res; +- +- res = platform_get_resource(pdev, IORESOURCE_MEM, 1); +- if (!res) +- return -ENODEV; +- +- mgmt->reg = ioremap(res->start, resource_size(res)); +- if (!mgmt->reg) +- return -ENOMEM; ++ mgmt->reg = devm_platform_ioremap_resource(pdev, 1); ++ if (IS_ERR(mgmt->reg)) ++ return PTR_ERR(mgmt->reg); + + return macb_init(pdev); + } +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c +index cab3d17e0e1a..d6eebd640753 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c +@@ -2202,6 +2202,9 @@ static void ethofld_hard_xmit(struct net_device *dev, + if (unlikely(skip_eotx_wr)) { + start = (u64 *)wr; + eosw_txq->state = next_state; ++ eosw_txq->cred -= wrlen16; ++ eosw_txq->ncompl++; ++ eosw_txq->last_compl = 0; + goto write_wr_headers; + } + +@@ -2360,6 +2363,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) + return cxgb4_eth_xmit(skb, dev); + } + ++static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) ++{ ++ int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; ++ int pidx = eosw_txq->pidx; ++ struct sk_buff *skb; ++ ++ if (!pktcount) ++ return; ++ ++ if (pktcount < 0) ++ pktcount += eosw_txq->ndesc; ++ ++ while (pktcount--) { ++ pidx--; ++ if (pidx < 0) ++ pidx += eosw_txq->ndesc; ++ ++ skb = eosw_txq->desc[pidx].skb; ++ if (skb) { ++ dev_consume_skb_any(skb); ++ eosw_txq->desc[pidx].skb = NULL; ++ eosw_txq->inuse--; ++ } ++ } ++ ++ eosw_txq->pidx = eosw_txq->last_pidx + 1; ++} ++ + /** + * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. + * @dev - netdevice +@@ -2435,9 +2466,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) + FW_FLOWC_MNEM_EOSTATE_CLOSING : + FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); + +- eosw_txq->cred -= len16; +- eosw_txq->ncompl++; +- eosw_txq->last_compl = 0; ++ /* Free up any pending skbs to ensure there's room for ++ * termination FLOWC. ++ */ ++ if (tc == FW_SCHED_CLS_NONE) ++ eosw_txq_flush_pending_skbs(eosw_txq); + + ret = eosw_txq_enqueue(eosw_txq, skb); + if (ret) { +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c +index ebc635f8a4cc..15f37c5b8dc1 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c +@@ -74,8 +74,8 @@ err_pci_mem_reg: + pci_disable_device(pdev); + err_pci_enable: + err_mdiobus_alloc: +- iounmap(port_regs); + err_hw_alloc: ++ iounmap(port_regs); + err_ioremap: + return err; + } +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +index 35478cba2aa5..4344a59c823f 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +@@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct mvpp2_ethtool_fs *efs; + int ret; + ++ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) ++ return -EINVAL; ++ + efs = port->rfs_rules[info->fs.location]; + if (!efs) + return -EINVAL; +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 72133cbe55d4..eb78a948bee3 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -4325,6 +4325,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; ++ if (rss_context >= MVPP22_N_RSS_TABLES) ++ return -EINVAL; + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index 5716c3d2bb86..c72c4e1ea383 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) + + if (!err || err == -ENOSPC) { + priv->def_counter[port] = idx; ++ err = 0; + } else if (err == -ENOENT) { + err = 0; + continue; +@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *idx = get_param_l(&out_param); +- ++ if (WARN_ON(err == -ENOSPC)) ++ err = -EINVAL; + return err; + } + return __mlx4_counter_alloc(dev, idx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 34cba97f7bf4..cede5bdfd598 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work) + } + + cmd->ent_arr[ent->idx] = ent; +- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); +@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work) + + if (ent->callback) + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); ++ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || +@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work) + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); + + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ++ /* no doorbell, no need to keep the entry */ ++ free_ent(cmd, ent->idx); ++ if (ent->callback) ++ free_cmd(ent); + return; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index ffc193c4ad43..2ad0d09cc9bd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1692,19 +1692,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) + + static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) + { +- int err = mlx5e_init_rep_rx(priv); +- +- if (err) +- return err; +- + mlx5e_create_q_counters(priv); +- return 0; ++ return mlx5e_init_rep_rx(priv); + } + + static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) + { +- mlx5e_destroy_q_counters(priv); + mlx5e_cleanup_rep_rx(priv); ++ mlx5e_destroy_q_counters(priv); + } + + static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index 095ec7b1399d..7c77378accf0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq, + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); + } + ++static void dr_cq_complete(struct mlx5_core_cq *mcq, ++ struct mlx5_eqe *eqe) ++{ ++ pr_err("CQ completion CQ: #%u\n", mcq->cqn); ++} ++ + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + struct mlx5_uars_page *uar, + size_t ncqe) +@@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); + + cq->mcq.event = dr_cq_event; ++ cq->mcq.comp = dr_cq_complete; + + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); + kvfree(in); +@@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + cq->mcq.set_ci_db = cq->wq_ctrl.db.db; + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; + *cq->mcq.set_ci_db = 0; +- *cq->mcq.arm_db = 0; ++ ++ /* set no-zero value, in order to avoid the HW to run db-recovery on ++ * CQ that used in polling mode. ++ */ ++ *cq->mcq.arm_db = cpu_to_be32(2 << 28); ++ + cq->mcq.vector = 0; + cq->mcq.irqn = irqn; + cq->mcq.uar = uar; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +index e993159e8e4c..295b27112d36 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) + { ++ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; + struct mlxsw_sp_acl_tcam_vregion *vregion; +- struct mlxsw_sp_acl_tcam_vchunk *vchunk; ++ struct list_head *pos; + int err; + + if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) +@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + } + + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); +- list_add_tail(&vchunk->list, &vregion->vchunk_list); ++ ++ /* Position the vchunk inside the list according to priority */ ++ list_for_each(pos, &vregion->vchunk_list) { ++ vchunk2 = list_entry(pos, typeof(*vchunk2), list); ++ if (vchunk2->priority > priority) ++ break; ++ } ++ list_add_tail(&vchunk->list, pos); + mutex_unlock(&vregion->lock); + + return vchunk; +diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c +index 9183b3e85d21..354efffac0f9 100644 +--- a/drivers/net/ethernet/netronome/nfp/abm/main.c ++++ b/drivers/net/ethernet/netronome/nfp/abm/main.c +@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, + if (!nfp_nsp_has_hwinfo_lookup(nsp)) { + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); + eth_hw_addr_random(nn->dp.netdev); ++ nfp_nsp_close(nsp); + return; + } + +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c +index 3fd43d30b20d..a1066fbb93b5 100644 +--- a/drivers/net/ethernet/toshiba/tc35815.c ++++ b/drivers/net/ethernet/toshiba/tc35815.c +@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + } +- linkmode_and(phydev->supported, phydev->supported, mask); ++ linkmode_andnot(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); + + lp->link = 0; +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 35aa7b0a2aeb..11028ef8be4e 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -1226,7 +1226,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) + struct crypto_aead *tfm; + int ret; + +- tfm = crypto_alloc_aead("gcm(aes)", 0, 0); ++ /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ ++ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + + if (IS_ERR(tfm)) + return tfm; +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index ac72a324fcd1..b1d771325c57 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) + goto out; + } + dp83640_clock_init(clock, bus); +- list_add_tail(&phyter_clocks, &clock->list); ++ list_add_tail(&clock->list, &phyter_clocks); + out: + mutex_unlock(&phyter_clocks_lock); + +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c +index 64c9f3bba2cd..e2658dace15d 100644 +--- a/drivers/net/phy/marvell10g.c ++++ b/drivers/net/phy/marvell10g.c +@@ -44,6 +44,9 @@ enum { + MV_PCS_PAIRSWAP_AB = 0x0002, + MV_PCS_PAIRSWAP_NONE = 0x0003, + ++ /* Temperature read register (88E2110 only) */ ++ MV_PCS_TEMP = 0x8042, ++ + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control + * registers appear to set themselves to the 0x800X when AN is + * restarted, but status registers appear readable from either. +@@ -54,6 +57,7 @@ enum { + /* Vendor2 MMD registers */ + MV_V2_PORT_CTRL = 0xf001, + MV_V2_PORT_CTRL_PWRDOWN = 0x0800, ++ /* Temperature control/read registers (88X3310 only) */ + MV_V2_TEMP_CTRL = 0xf08a, + MV_V2_TEMP_CTRL_MASK = 0xc000, + MV_V2_TEMP_CTRL_SAMPLE = 0x0000, +@@ -79,6 +83,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data, + return 0; + } + ++static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev) ++{ ++ return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); ++} ++ ++static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev) ++{ ++ return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP); ++} ++ ++static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev) ++{ ++ if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310) ++ return mv3310_hwmon_read_temp_reg(phydev); ++ else /* MARVELL_PHY_ID_88E2110 */ ++ return mv2110_hwmon_read_temp_reg(phydev); ++} ++ + static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) + { +@@ -91,7 +113,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + } + + if (type == hwmon_temp && attr == hwmon_temp_input) { +- temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); ++ temp = mv10g_hwmon_read_temp_reg(phydev); + if (temp < 0) + return temp; + +@@ -144,6 +166,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) + u16 val; + int ret; + ++ if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310) ++ return 0; ++ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP, + MV_V2_TEMP_UNKNOWN); + if (ret < 0) +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 6c738a271257..4bb8552a00d3 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ ++ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ +diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c +index 5c964fcb994e..71b8e80b58e1 100644 +--- a/drivers/net/wireguard/queueing.c ++++ b/drivers/net/wireguard/queueing.c +@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, + if (multicore) { + queue->worker = wg_packet_percpu_multicore_worker_alloc( + function, queue); +- if (!queue->worker) ++ if (!queue->worker) { ++ ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; ++ } + } else { + INIT_WORK(&queue->work, function); + } +diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c +index da3b782ab7d3..2566e13a292d 100644 +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -393,13 +393,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) + goto dishonest_packet_size; +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) +- IP_ECN_set_ce(ip_hdr(skb)); ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + len = ntohs(ipv6_hdr(skb)->payload_len) + + sizeof(struct ipv6hdr); +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) +- IP6_ECN_set_ce(skb, ipv6_hdr(skb)); ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); + } else { + goto dishonest_packet_type; + } +@@ -518,6 +516,8 @@ void wg_packet_decrypt_worker(struct work_struct *work) + &PACKET_CB(skb)->keypair->receiving)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; + wg_queue_enqueue_per_peer_napi(skb, state); ++ if (need_resched()) ++ cond_resched(); + } + } + +diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c +index 7348c10cbae3..e8a7d0a0cb88 100644 +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_struct *work) + + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); ++ if (need_resched()) ++ cond_resched(); + } + } + +@@ -305,6 +307,8 @@ void wg_packet_encrypt_worker(struct work_struct *work) + wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, + state); + ++ if (need_resched()) ++ cond_resched(); + } + } + +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c +index b0d6541582d3..f9018027fc13 100644 +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; +- } else if (unlikely(rt->dst.dev == skb->dev)) { +- ip_rt_put(rt); +- ret = -ELOOP; +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", +- wg->dev->name, &endpoint->addr); +- goto err; + } + if (cache) + dst_cache_set_ip4(cache, &rt->dst, fl.saddr); +@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; +- } else if (unlikely(dst->dev == skb->dev)) { +- dst_release(dst); +- ret = -ELOOP; +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", +- wg->dev->name, &endpoint->addr); +- goto err; + } + if (cache) + dst_cache_set_ip6(cache, dst, &fl.saddr); +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index fb4c35a43065..84f20369d846 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1075,8 +1075,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, + + status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, + NVME_IDENTIFY_DATA_SIZE); +- if (status) ++ if (status) { ++ dev_warn(ctrl->device, ++ "Identify Descriptors failed (%d)\n", status); ++ /* ++ * Don't treat an error as fatal, as we potentially already ++ * have a NGUID or EUI-64. ++ */ ++ if (status > 0 && !(status & NVME_SC_DNR)) ++ status = 0; + goto free_data; ++ } + + for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { + struct nvme_ns_id_desc *cur = data + pos; +@@ -1734,26 +1743,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) + static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, + struct nvme_id_ns *id, struct nvme_ns_ids *ids) + { +- int ret = 0; +- + memset(ids, 0, sizeof(*ids)); + + if (ctrl->vs >= NVME_VS(1, 1, 0)) + memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); + if (ctrl->vs >= NVME_VS(1, 2, 0)) + memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); +- if (ctrl->vs >= NVME_VS(1, 3, 0)) { +- /* Don't treat error as fatal we potentially +- * already have a NGUID or EUI-64 +- */ +- ret = nvme_identify_ns_descs(ctrl, nsid, ids); +- if (ret) +- dev_warn(ctrl->device, +- "Identify Descriptors failed (%d)\n", ret); +- if (ret > 0) +- ret = 0; +- } +- return ret; ++ if (ctrl->vs >= NVME_VS(1, 3, 0)) ++ return nvme_identify_ns_descs(ctrl, nsid, ids); ++ return 0; + } + + static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) +diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c +index be6b50f454b4..d3f255c740e9 100644 +--- a/drivers/staging/gasket/gasket_core.c ++++ b/drivers/staging/gasket/gasket_core.c +@@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma, + gasket_get_bar_index(gasket_dev, + (vma->vm_pgoff << PAGE_SHIFT) + + driver_desc->legacy_mmap_address_offset); ++ ++ if (bar_index < 0) ++ return DO_MAP_REGION_INVALID; ++ + phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; + while (mapped_bytes < map_length) { + /* +diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c +index b341fc60c4ba..114fbe51527c 100644 +--- a/drivers/thunderbolt/usb4.c ++++ b/drivers/thunderbolt/usb4.c +@@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); ++ if (ret) ++ return ret; ++ + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c +index 7a9b360b0438..1d8b6993a435 100644 +--- a/drivers/tty/serial/xilinx_uartps.c ++++ b/drivers/tty/serial/xilinx_uartps.c +@@ -1471,6 +1471,7 @@ static int cdns_uart_probe(struct platform_device *pdev) + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; + #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE + cdns_uart_uart_driver.cons = &cdns_uart_console; ++ cdns_uart_console.index = id; + #endif + + rc = uart_register_driver(&cdns_uart_uart_driver); +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index cc1a04191365..699d8b56cbe7 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows) + return uniscr; + } + ++static void vc_uniscr_free(struct uni_screen *uniscr) ++{ ++ vfree(uniscr); ++} ++ + static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) + { +- vfree(vc->vc_uni_screen); ++ vc_uniscr_free(vc->vc_uni_screen); + vc->vc_uni_screen = new_uniscr; + } + +@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + err = resize_screen(vc, new_cols, new_rows, user); + if (err) { + kfree(newscreen); +- kfree(new_uniscr); ++ vc_uniscr_free(new_uniscr); + return err; + } + +diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c +index af648ba6544d..46105457e1ca 100644 +--- a/drivers/usb/chipidea/ci_hdrc_msm.c ++++ b/drivers/usb/chipidea/ci_hdrc_msm.c +@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, + HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0); + +- if (!IS_ERR(ci->platdata->vbus_extcon.edev)) { ++ if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) { + hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, + HS_PHY_SESS_VLD_CTRL_EN, + HS_PHY_SESS_VLD_CTRL_EN); +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c +index ffd984142171..d63072fee099 100644 +--- a/drivers/usb/serial/garmin_gps.c ++++ b/drivers/usb/serial/garmin_gps.c +@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, + send it directly to the tty port */ + if (garmin_data_p->flags & FLAGS_QUEUING) { + pkt_add(garmin_data_p, data, data_length); +- } else if (bulk_data || +- getLayerId(data) == GARMIN_LAYERID_APPL) { ++ } else if (bulk_data || (data_length >= sizeof(u32) && ++ getLayerId(data) == GARMIN_LAYERID_APPL)) { + + spin_lock_irqsave(&garmin_data_p->lock, flags); + garmin_data_p->flags |= APP_RESP_SEEN; +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 613f91add03d..ce0401d3137f 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ + {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ + {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ ++ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ + {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ + {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 1b23741036ee..37157ed9a881 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -28,6 +28,13 @@ + * and don't forget to CC: the USB development list <linux-...@vger.kernel.org> + */ + ++/* Reported-by: Julian Groß <julia...@posteo.de> */ ++UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, ++ "LaCie", ++ "2Big Quadra USB3", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_REPORT_OPCODES), ++ + /* + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI + * commands in UAS mode. Observed with the 1.28 firmware; are there others? +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index bbbbddf71326..da7d5c9e3133 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -3116,8 +3116,7 @@ static void handle_session(struct ceph_mds_session *session, + void *end = p + msg->front.iov_len; + struct ceph_mds_session_head *h; + u32 op; +- u64 seq; +- unsigned long features = 0; ++ u64 seq, features = 0; + int wake = 0; + bool blacklisted = false; + +@@ -3136,9 +3135,8 @@ static void handle_session(struct ceph_mds_session *session, + goto bad; + /* version >= 3, feature bits */ + ceph_decode_32_safe(&p, end, len, bad); +- ceph_decode_need(&p, end, len, bad); +- memcpy(&features, p, min_t(size_t, len, sizeof(features))); +- p += len; ++ ceph_decode_64_safe(&p, end, features, bad); ++ p += len - sizeof(features); + } + + mutex_lock(&mdsc->mutex); +diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c +index de56dee60540..19507e2fdb57 100644 +--- a/fs/ceph/quota.c ++++ b/fs/ceph/quota.c +@@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, + } + + if (IS_ERR(in)) { +- pr_warn("Can't lookup inode %llx (err: %ld)\n", +- realm->ino, PTR_ERR(in)); ++ dout("Can't lookup inode %llx (err: %ld)\n", ++ realm->ino, PTR_ERR(in)); + qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ + } else { + qri->timeout = 0; +diff --git a/fs/coredump.c b/fs/coredump.c +index 408418e6aa13..478a0d810136 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo) + if (displaced) + put_files_struct(displaced); + if (!dump_interrupted()) { ++ /* ++ * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would ++ * have this set to NULL. ++ */ ++ if (!cprm.file) { ++ pr_info("Core dump to |%s disabled\n", cn.corename); ++ goto close_fail; ++ } + file_start_write(cprm.file); + core_dumped = binfmt->core_dump(&cprm); + file_end_write(cprm.file); +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index eee3c92a9ebf..b0a097274cfe 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -1149,6 +1149,10 @@ static inline bool chain_epi_lockless(struct epitem *epi) + { + struct eventpoll *ep = epi->ep; + ++ /* Fast preliminary check */ ++ if (epi->next != EP_UNACTIVE_PTR) ++ return false; ++ + /* Check that the same epi has not been just chained from another CPU */ + if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) + return false; +@@ -1215,16 +1219,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v + * chained in ep->ovflist and requeued later on. + */ + if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { +- if (epi->next == EP_UNACTIVE_PTR && +- chain_epi_lockless(epi)) ++ if (chain_epi_lockless(epi)) ++ ep_pm_stay_awake_rcu(epi); ++ } else if (!ep_is_linked(epi)) { ++ /* In the usual case, add event to ready list. */ ++ if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) + ep_pm_stay_awake_rcu(epi); +- goto out_unlock; +- } +- +- /* If this file is already in the ready list we exit soon */ +- if (!ep_is_linked(epi) && +- list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) { +- ep_pm_stay_awake_rcu(epi); + } + + /* +@@ -1800,7 +1800,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, + { + int res = 0, eavail, timed_out = 0; + u64 slack = 0; +- bool waiter = false; + wait_queue_entry_t wait; + ktime_t expires, *to = NULL; + +@@ -1845,21 +1844,23 @@ fetch_events: + */ + ep_reset_busy_poll_napi_id(ep); + +- /* +- * We don't have any available event to return to the caller. We need +- * to sleep here, and we will be woken by ep_poll_callback() when events +- * become available. +- */ +- if (!waiter) { +- waiter = true; +- init_waitqueue_entry(&wait, current); +- ++ do { ++ /* ++ * Internally init_wait() uses autoremove_wake_function(), ++ * thus wait entry is removed from the wait queue on each ++ * wakeup. Why it is important? In case of several waiters ++ * each new wakeup will hit the next waiter, giving it the ++ * chance to harvest new event. Otherwise wakeup can be ++ * lost. This is also good performance-wise, because on ++ * normal wakeup path no need to call __remove_wait_queue() ++ * explicitly, thus ep->lock is not taken, which halts the ++ * event delivery. ++ */ ++ init_wait(&wait); + write_lock_irq(&ep->lock); + __add_wait_queue_exclusive(&ep->wq, &wait); + write_unlock_irq(&ep->lock); +- } + +- for (;;) { + /* + * We don't want to sleep if the ep_poll_callback() sends us + * a wakeup in between. That's why we set the task state +@@ -1889,10 +1890,20 @@ fetch_events: + timed_out = 1; + break; + } +- } ++ ++ /* We were woken up, thus go and try to harvest some events */ ++ eavail = 1; ++ ++ } while (0); + + __set_current_state(TASK_RUNNING); + ++ if (!list_empty_careful(&wait.entry)) { ++ write_lock_irq(&ep->lock); ++ __remove_wait_queue(&ep->wq, &wait); ++ write_unlock_irq(&ep->lock); ++ } ++ + send_events: + /* + * Try to transfer events to user space. In case we get 0 events and +@@ -1903,12 +1914,6 @@ send_events: + !(res = ep_send_events(ep, events, maxevents)) && !timed_out) + goto fetch_events; + +- if (waiter) { +- write_lock_irq(&ep->lock); +- __remove_wait_queue(&ep->wq, &wait); +- write_unlock_irq(&ep->lock); +- } +- + return res; + } + +diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h +index 7ea4f6fa173b..4b9002f0e84c 100644 +--- a/fs/ext4/ext4_jbd2.h ++++ b/fs/ext4/ext4_jbd2.h +@@ -512,6 +512,9 @@ static inline int ext4_should_dioread_nolock(struct inode *inode) + return 0; + if (ext4_should_journal_data(inode)) + return 0; ++ /* temporary fix to prevent generic/422 test failures */ ++ if (!test_opt(inode->i_sb, DELALLOC)) ++ return 0; + return 1; + } + +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 446158ab507d..70796de7c468 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2181,6 +2181,14 @@ static int parse_options(char *options, struct super_block *sb, + } + } + #endif ++ if (test_opt(sb, DIOREAD_NOLOCK)) { ++ int blocksize = ++ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); ++ if (blocksize < PAGE_SIZE) ++ ext4_msg(sb, KERN_WARNING, "Warning: mounting with an " ++ "experimental mount option 'dioread_nolock' " ++ "for blocksize < PAGE_SIZE"); ++ } + return 1; + } + +@@ -3787,7 +3795,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + set_opt(sb, NO_UID32); + /* xattr user namespace & acls are now defaulted on */ + set_opt(sb, XATTR_USER); +- set_opt(sb, DIOREAD_NOLOCK); + #ifdef CONFIG_EXT4_FS_POSIX_ACL + set_opt(sb, POSIX_ACL); + #endif +@@ -3837,6 +3844,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; + + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); ++ ++ if (blocksize == PAGE_SIZE) ++ set_opt(sb, DIOREAD_NOLOCK); ++ + if (blocksize < EXT4_MIN_BLOCK_SIZE || + blocksize > EXT4_MAX_BLOCK_SIZE) { + ext4_msg(sb, KERN_ERR, +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 38b25f599896..9690c845a3e4 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -696,8 +696,6 @@ static const struct io_op_def io_op_defs[] = { + .needs_file = 1, + }, + [IORING_OP_OPENAT] = { +- .needs_file = 1, +- .fd_non_neg = 1, + .file_table = 1, + .needs_fs = 1, + }, +@@ -711,8 +709,6 @@ static const struct io_op_def io_op_defs[] = { + }, + [IORING_OP_STATX] = { + .needs_mm = 1, +- .needs_file = 1, +- .fd_non_neg = 1, + .needs_fs = 1, + .file_table = 1, + }, +@@ -743,8 +739,6 @@ static const struct io_op_def io_op_defs[] = { + .unbound_nonreg_file = 1, + }, + [IORING_OP_OPENAT2] = { +- .needs_file = 1, +- .fd_non_neg = 1, + .file_table = 1, + .needs_fs = 1, + }, +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c +index 5778d1347b35..f5d30573f4a9 100644 +--- a/fs/notify/fanotify/fanotify.c ++++ b/fs/notify/fanotify/fanotify.c +@@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn, + old = FANOTIFY_E(old_fsn); + new = FANOTIFY_E(new_fsn); + +- if (old_fsn->inode != new_fsn->inode || old->pid != new->pid || ++ if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid || + old->fh_type != new->fh_type || old->fh_len != new->fh_len) + return false; + +@@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, + if (!event) + goto out; + init: __maybe_unused +- fsnotify_init_event(&event->fse, inode); ++ /* ++ * Use the victim inode instead of the watching inode as the id for ++ * event queue, so event reported on parent is merged with event ++ * reported on child when both directory and child watches exist. ++ */ ++ fsnotify_init_event(&event->fse, (unsigned long)id); + event->mask = mask; + if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) + event->pid = get_pid(task_pid(current)); +diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c +index d510223d302c..589dee962993 100644 +--- a/fs/notify/inotify/inotify_fsnotify.c ++++ b/fs/notify/inotify/inotify_fsnotify.c +@@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn, + if (old->mask & FS_IN_IGNORED) + return false; + if ((old->mask == new->mask) && +- (old_fsn->inode == new_fsn->inode) && ++ (old_fsn->objectid == new_fsn->objectid) && + (old->name_len == new->name_len) && + (!old->name_len || !strcmp(old->name, new->name))) + return true; +@@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group, + mask &= ~IN_ISDIR; + + fsn_event = &event->fse; +- fsnotify_init_event(fsn_event, inode); ++ fsnotify_init_event(fsn_event, (unsigned long)inode); + event->mask = mask; + event->wd = i_mark->wd; + event->sync_cookie = cookie; +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c +index 107537a543fd..81ffc8629fc4 100644 +--- a/fs/notify/inotify/inotify_user.c ++++ b/fs/notify/inotify/inotify_user.c +@@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) + return ERR_PTR(-ENOMEM); + } + group->overflow_event = &oevent->fse; +- fsnotify_init_event(group->overflow_event, NULL); ++ fsnotify_init_event(group->overflow_event, 0); + oevent->mask = FS_Q_OVERFLOW; + oevent->wd = -1; + oevent->sync_cookie = 0; +diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h +index 26f0ecf401ea..0bbfd647f5c6 100644 +--- a/include/linux/amba/bus.h ++++ b/include/linux/amba/bus.h +@@ -65,6 +65,7 @@ struct amba_device { + struct device dev; + struct resource res; + struct clk *pclk; ++ struct device_dma_parameters dma_parms; + unsigned int periphid; + unsigned int cid; + struct amba_cs_uci_id uci; +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h +index 4fc87dee005a..2849bdbb3acb 100644 +--- a/include/linux/backing-dev-defs.h ++++ b/include/linux/backing-dev-defs.h +@@ -220,6 +220,7 @@ struct backing_dev_info { + wait_queue_head_t wb_waitq; + + struct device *dev; ++ char dev_name[64]; + struct device *owner; + + struct timer_list laptop_mode_wb_timer; +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index f88197c1ffc2..c9ad5c3b7b4b 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) + (1 << WB_async_congested)); + } + +-extern const char *bdi_unknown_name; +- +-static inline const char *bdi_dev_name(struct backing_dev_info *bdi) +-{ +- if (!bdi || !bdi->dev) +- return bdi_unknown_name; +- return dev_name(bdi->dev); +-} ++const char *bdi_dev_name(struct backing_dev_info *bdi); + + #endif /* _LINUX_BACKING_DEV_H */ +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h +index 1915bdba2fad..64cfb5446f4d 100644 +--- a/include/linux/fsnotify_backend.h ++++ b/include/linux/fsnotify_backend.h +@@ -133,8 +133,7 @@ struct fsnotify_ops { + */ + struct fsnotify_event { + struct list_head list; +- /* inode may ONLY be dereferenced during handle_event(). */ +- struct inode *inode; /* either the inode the event happened to or its parent */ ++ unsigned long objectid; /* identifier for queue merges */ + }; + + /* +@@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); + extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); + + static inline void fsnotify_init_event(struct fsnotify_event *event, +- struct inode *inode) ++ unsigned long objectid) + { + INIT_LIST_HEAD(&event->list); +- event->inode = inode; ++ event->objectid = objectid; + } + + #else +diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h +index 041bfa412aa0..81900b3cbe37 100644 +--- a/include/linux/platform_device.h ++++ b/include/linux/platform_device.h +@@ -25,6 +25,7 @@ struct platform_device { + bool id_auto; + struct device dev; + u64 platform_dma_mask; ++ struct device_dma_parameters dma_parms; + u32 num_resources; + struct resource *resource; + +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h +index 0d1fe9297ac6..6f6ade63b04c 100644 +--- a/include/linux/virtio_net.h ++++ b/include/linux/virtio_net.h +@@ -3,6 +3,8 @@ + #define _LINUX_VIRTIO_NET_H + + #include <linux/if_vlan.h> ++#include <uapi/linux/tcp.h> ++#include <uapi/linux/udp.h> + #include <uapi/linux/virtio_net.h> + + static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, +@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + bool little_endian) + { + unsigned int gso_type = 0; ++ unsigned int thlen = 0; ++ unsigned int ip_proto; + + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + gso_type = SKB_GSO_TCPV4; ++ ip_proto = IPPROTO_TCP; ++ thlen = sizeof(struct tcphdr); + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + gso_type = SKB_GSO_TCPV6; ++ ip_proto = IPPROTO_TCP; ++ thlen = sizeof(struct tcphdr); + break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; ++ ip_proto = IPPROTO_UDP; ++ thlen = sizeof(struct udphdr); + break; + default: + return -EINVAL; +@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, + + if (!skb_partial_csum_set(skb, start, off)) + return -EINVAL; ++ ++ if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) ++ return -EINVAL; + } else { + /* gso packets without NEEDS_CSUM do not set transport_offset. + * probe and drop if does not match one of the above types. + */ + if (gso_type && skb->network_header) { ++ struct flow_keys_basic keys; ++ + if (!skb->protocol) + virtio_net_hdr_set_proto(skb, hdr); + retry: +- skb_probe_transport_header(skb); +- if (!skb_transport_header_was_set(skb)) { ++ if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, ++ NULL, 0, 0, 0, ++ 0)) { + /* UFO does not specify ipv4 or 6: try both */ + if (gso_type & SKB_GSO_UDP && + skb->protocol == htons(ETH_P_IP)) { +@@ -75,6 +91,12 @@ retry: + } + return -EINVAL; + } ++ ++ if (keys.control.thoff + thlen > skb_headlen(skb) || ++ keys.basic.ip_proto != ip_proto) ++ return -EINVAL; ++ ++ skb_set_transport_header(skb, keys.control.thoff); + } + } + +diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h +index c8e2bebd8d93..0f0d1efe06dd 100644 +--- a/include/net/inet_ecn.h ++++ b/include/net/inet_ecn.h +@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph) + return 1; + } + ++static inline int IP_ECN_set_ect1(struct iphdr *iph) ++{ ++ u32 check = (__force u32)iph->check; ++ ++ if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0) ++ return 0; ++ ++ check += (__force u16)htons(0x100); ++ ++ iph->check = (__force __sum16)(check + (check>=0xFFFF)); ++ iph->tos ^= INET_ECN_MASK; ++ return 1; ++} ++ + static inline void IP_ECN_clear(struct iphdr *iph) + { + iph->tos &= ~INET_ECN_MASK; +@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) + return 1; + } + ++static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph) ++{ ++ __be32 from, to; ++ ++ if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0) ++ return 0; ++ ++ from = *(__be32 *)iph; ++ to = from ^ htonl(INET_ECN_MASK << 20); ++ *(__be32 *)iph = to; ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), ++ (__force __wsum)to); ++ return 1; ++} ++ + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) + { + dscp &= ~INET_ECN_MASK; +@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) + return 0; + } + ++static inline int INET_ECN_set_ect1(struct sk_buff *skb) ++{ ++ switch (skb->protocol) { ++ case cpu_to_be16(ETH_P_IP): ++ if (skb_network_header(skb) + sizeof(struct iphdr) <= ++ skb_tail_pointer(skb)) ++ return IP_ECN_set_ect1(ip_hdr(skb)); ++ break; ++ ++ case cpu_to_be16(ETH_P_IPV6): ++ if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= ++ skb_tail_pointer(skb)) ++ return IP6_ECN_set_ect1(skb, ipv6_hdr(skb)); ++ break; ++ } ++ ++ return 0; ++} ++ + /* + * RFC 6040 4.2 + * To decapsulate the inner header at the tunnel egress, a compliant +@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb, + int rc; + + rc = __INET_ECN_decapsulate(outer, inner, &set_ce); +- if (!rc && set_ce) +- INET_ECN_set_ce(skb); ++ if (!rc) { ++ if (set_ce) ++ INET_ECN_set_ce(skb); ++ else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1) ++ INET_ECN_set_ect1(skb); ++ } + + return rc; + } +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h +index fd60a8ac02ee..98ec56e2fae2 100644 +--- a/include/net/ip6_fib.h ++++ b/include/net/ip6_fib.h +@@ -204,6 +204,7 @@ struct fib6_info { + struct rt6_info { + struct dst_entry dst; + struct fib6_info __rcu *from; ++ int sernum; + + struct rt6key rt6i_dst; + struct rt6key rt6i_src; +@@ -292,6 +293,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) + struct fib6_info *from; + u32 cookie = 0; + ++ if (rt->sernum) ++ return rt->sernum; ++ + rcu_read_lock(); + + from = rcu_dereference(rt->from); +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index 854d39ef1ca3..9cdb67e3a553 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -432,6 +432,13 @@ static inline int rt_genid_ipv4(const struct net *net) + return atomic_read(&net->ipv4.rt_genid); + } + ++#if IS_ENABLED(CONFIG_IPV6) ++static inline int rt_genid_ipv6(const struct net *net) ++{ ++ return atomic_read(&net->ipv6.fib6_sernum); ++} ++#endif ++ + static inline void rt_genid_bump_ipv4(struct net *net) + { + atomic_inc(&net->ipv4.rt_genid); +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index 49a05ba3000d..3ba0ea3d5920 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -142,6 +142,7 @@ struct mqueue_inode_info { + + struct sigevent notify; + struct pid *notify_owner; ++ u32 notify_self_exec_id; + struct user_namespace *notify_user_ns; + struct user_struct *user; /* user who created, for accounting */ + struct sock *notify_sock; +@@ -774,28 +775,44 @@ static void __do_notify(struct mqueue_inode_info *info) + * synchronously. */ + if (info->notify_owner && + info->attr.mq_curmsgs == 1) { +- struct kernel_siginfo sig_i; + switch (info->notify.sigev_notify) { + case SIGEV_NONE: + break; +- case SIGEV_SIGNAL: +- /* sends signal */ ++ case SIGEV_SIGNAL: { ++ struct kernel_siginfo sig_i; ++ struct task_struct *task; ++ ++ /* do_mq_notify() accepts sigev_signo == 0, why?? */ ++ if (!info->notify.sigev_signo) ++ break; + + clear_siginfo(&sig_i); + sig_i.si_signo = info->notify.sigev_signo; + sig_i.si_errno = 0; + sig_i.si_code = SI_MESGQ; + sig_i.si_value = info->notify.sigev_value; +- /* map current pid/uid into info->owner's namespaces */ + rcu_read_lock(); ++ /* map current pid/uid into info->owner's namespaces */ + sig_i.si_pid = task_tgid_nr_ns(current, + ns_of_pid(info->notify_owner)); +- sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); ++ sig_i.si_uid = from_kuid_munged(info->notify_user_ns, ++ current_uid()); ++ /* ++ * We can't use kill_pid_info(), this signal should ++ * bypass check_kill_permission(). It is from kernel ++ * but si_fromuser() can't know this. ++ * We do check the self_exec_id, to avoid sending ++ * signals to programs that don't expect them. ++ */ ++ task = pid_task(info->notify_owner, PIDTYPE_TGID); ++ if (task && task->self_exec_id == ++ info->notify_self_exec_id) { ++ do_send_sig_info(info->notify.sigev_signo, ++ &sig_i, task, PIDTYPE_TGID); ++ } + rcu_read_unlock(); +- +- kill_pid_info(info->notify.sigev_signo, +- &sig_i, info->notify_owner); + break; ++ } + case SIGEV_THREAD: + set_cookie(info->notify_cookie, NOTIFY_WOKENUP); + netlink_sendskb(info->notify_sock, info->notify_cookie); +@@ -1384,6 +1401,7 @@ retry: + info->notify.sigev_signo = notification->sigev_signo; + info->notify.sigev_value = notification->sigev_value; + info->notify.sigev_notify = SIGEV_SIGNAL; ++ info->notify_self_exec_id = current->self_exec_id; + break; + } + +diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c +index 31c0fad4cb9e..c4c86de63cf9 100644 +--- a/kernel/trace/preemptirq_delay_test.c ++++ b/kernel/trace/preemptirq_delay_test.c +@@ -113,22 +113,42 @@ static int preemptirq_delay_run(void *data) + + for (i = 0; i < s; i++) + (testfuncs[i])(i); ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ ++ __set_current_state(TASK_RUNNING); ++ + return 0; + } + +-static struct task_struct *preemptirq_start_test(void) ++static int preemptirq_run_test(void) + { ++ struct task_struct *task; ++ + char task_name[50]; + + snprintf(task_name, sizeof(task_name), "%s_test", test_mode); +- return kthread_run(preemptirq_delay_run, NULL, task_name); ++ task = kthread_run(preemptirq_delay_run, NULL, task_name); ++ if (IS_ERR(task)) ++ return PTR_ERR(task); ++ if (task) ++ kthread_stop(task); ++ return 0; + } + + + static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) + { +- preemptirq_start_test(); ++ ssize_t ret; ++ ++ ret = preemptirq_run_test(); ++ if (ret) ++ return ret; + return count; + } + +@@ -148,11 +168,9 @@ static struct kobject *preemptirq_delay_kobj; + + static int __init preemptirq_delay_init(void) + { +- struct task_struct *test_task; + int retval; + +- test_task = preemptirq_start_test(); +- retval = PTR_ERR_OR_ZERO(test_task); ++ retval = preemptirq_run_test(); + if (retval != 0) + return retval; + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 6b11e4e2150c..5f0aa5d66e22 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -8452,6 +8452,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) + */ + allocate_snapshot = false; + #endif ++ ++ /* ++ * Because of some magic with the way alloc_percpu() works on ++ * x86_64, we need to synchronize the pgd of all the tables, ++ * otherwise the trace events that happen in x86_64 page fault ++ * handlers can't cope with accessing the chance that a ++ * alloc_percpu()'d memory might be touched in the page fault trace ++ * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() ++ * calls in tracing, because something might get triggered within a ++ * page fault trace event! ++ */ ++ vmalloc_sync_mappings(); ++ + return 0; + } + +diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c +index 06d7feb5255f..9de29bb45a27 100644 +--- a/kernel/trace/trace_boot.c ++++ b/kernel/trace/trace_boot.c +@@ -95,24 +95,20 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) + struct xbc_node *anode; + char buf[MAX_BUF_LEN]; + const char *val; +- int ret; ++ int ret = 0; + +- kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); ++ xbc_node_for_each_array_value(node, "probes", anode, val) { ++ kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); + +- ret = kprobe_event_gen_cmd_start(&cmd, event, NULL); +- if (ret) +- return ret; ++ ret = kprobe_event_gen_cmd_start(&cmd, event, val); ++ if (ret) ++ break; + +- xbc_node_for_each_array_value(node, "probes", anode, val) { +- ret = kprobe_event_add_field(&cmd, val); ++ ret = kprobe_event_gen_cmd_end(&cmd); + if (ret) +- return ret; ++ pr_err("Failed to add probe: %s\n", buf); + } + +- ret = kprobe_event_gen_cmd_end(&cmd); +- if (ret) +- pr_err("Failed to add probe: %s\n", buf); +- + return ret; + } + #else +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index d0568af4a0ef..35989383ae11 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr) + + static bool within_notrace_func(struct trace_kprobe *tk) + { +- unsigned long addr = addr = trace_kprobe_address(tk); ++ unsigned long addr = trace_kprobe_address(tk); + char symname[KSYM_NAME_LEN], *p; + + if (!__within_notrace_func(addr)) +@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init); + * complete command or only the first part of it; in the latter case, + * kprobe_event_add_fields() can be used to add more fields following this. + * ++ * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This ++ * returns -EINVAL if @loc == NULL. ++ * + * Return: 0 if successful, error otherwise. + */ + int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, +@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe, + if (cmd->type != DYNEVENT_TYPE_KPROBE) + return -EINVAL; + ++ if (!loc) ++ return -EINVAL; ++ + if (kretprobe) + snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name); + else +diff --git a/kernel/umh.c b/kernel/umh.c +index 7f255b5a8845..11bf5eea474c 100644 +--- a/kernel/umh.c ++++ b/kernel/umh.c +@@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob); + * Runs a user-space application. The application is started + * asynchronously if wait is not set, and runs as a child of system workqueues. + * (ie. it runs with full root capabilities and optimized affinity). ++ * ++ * Note: successful return value does not guarantee the helper was called at ++ * all. You can't rely on sub_info->{init,cleanup} being called even for ++ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers ++ * into a successful no-op. + */ + int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) + { +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index 62f05f605fb5..3f2480e4c5af 100644 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { + EXPORT_SYMBOL_GPL(noop_backing_dev_info); + + static struct class *bdi_class; +-const char *bdi_unknown_name = "(unknown)"; ++static const char *bdi_unknown_name = "(unknown)"; + + /* + * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU +@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) + if (bdi->dev) /* The driver needs to use separate queues per device */ + return 0; + +- dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args); ++ vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); ++ dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); + if (IS_ERR(dev)) + return PTR_ERR(dev); + +@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi) + } + EXPORT_SYMBOL(bdi_put); + ++const char *bdi_dev_name(struct backing_dev_info *bdi) ++{ ++ if (!bdi || !bdi->dev) ++ return bdi_unknown_name; ++ return bdi->dev_name; ++} ++EXPORT_SYMBOL_GPL(bdi_dev_name); ++ + static wait_queue_head_t congestion_wqh[2] = { + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), + __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index 615d73acd0da..537eae162ed3 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4977,19 +4977,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + unsigned int size; + int node; + int __maybe_unused i; ++ long error = -ENOMEM; + + size = sizeof(struct mem_cgroup); + size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); + + memcg = kzalloc(size, GFP_KERNEL); + if (!memcg) +- return NULL; ++ return ERR_PTR(error); + + memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, + 1, MEM_CGROUP_ID_MAX, + GFP_KERNEL); +- if (memcg->id.id < 0) ++ if (memcg->id.id < 0) { ++ error = memcg->id.id; + goto fail; ++ } + + memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); + if (!memcg->vmstats_local) +@@ -5033,7 +5036,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) + fail: + mem_cgroup_id_remove(memcg); + __mem_cgroup_free(memcg); +- return NULL; ++ return ERR_PTR(error); + } + + static struct cgroup_subsys_state * __ref +@@ -5044,8 +5047,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + long error = -ENOMEM; + + memcg = mem_cgroup_alloc(); +- if (!memcg) +- return ERR_PTR(error); ++ if (IS_ERR(memcg)) ++ return ERR_CAST(memcg); + + memcg->high = PAGE_COUNTER_MAX; + memcg->soft_limit = PAGE_COUNTER_MAX; +@@ -5095,7 +5098,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) + fail: + mem_cgroup_id_remove(memcg); + mem_cgroup_free(memcg); +- return ERR_PTR(-ENOMEM); ++ return ERR_PTR(error); + } + + static int mem_cgroup_css_online(struct cgroup_subsys_state *css) +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 3c4eb750a199..a97de355a13c 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone) + if (!__pageblock_pfn_to_page(block_start_pfn, + block_end_pfn, zone)) + return; ++ cond_resched(); + } + + /* We confirm that there is no hole */ +@@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone) + + if (!watermark_boost_factor) + return; ++ /* ++ * Don't bother in zones that are unlikely to produce results. ++ * On small machines, including kdump capture kernels running ++ * in a small area, boosting the watermark can cause an out of ++ * memory situation immediately. ++ */ ++ if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) ++ return; + + max_boost = mult_frac(zone->_watermark[WMARK_HIGH], + watermark_boost_factor, 10000); +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c +index 969466218999..80b87b1f4e3a 100644 +--- a/net/batman-adv/bat_v_ogm.c ++++ b/net/batman-adv/bat_v_ogm.c +@@ -893,7 +893,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, + + orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); + if (!orig_node) +- return; ++ goto out; + + neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, + ethhdr->h_source); +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c +index 8f0717c3f7b5..b0469d15da0e 100644 +--- a/net/batman-adv/network-coding.c ++++ b/net/batman-adv/network-coding.c +@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv, + */ + static u8 batadv_nc_random_weight_tq(u8 tq) + { +- u8 rand_val, rand_tq; +- +- get_random_bytes(&rand_val, sizeof(rand_val)); +- + /* randomize the estimated packet loss (max TQ - estimated TQ) */ +- rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); +- +- /* normalize the randomized packet loss */ +- rand_tq /= BATADV_TQ_MAX_VALUE; ++ u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); + + /* convert to (randomized) estimated tq again */ + return BATADV_TQ_MAX_VALUE - rand_tq; +diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c +index c45962d8527b..0f962dcd239e 100644 +--- a/net/batman-adv/sysfs.c ++++ b/net/batman-adv/sysfs.c +@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, + ret = batadv_parse_throughput(net_dev, buff, "throughput_override", + &tp_override); + if (!ret) +- return count; ++ goto out; + + old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + if (old_tp_override == tp_override) +@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj, + + tp_override = atomic_read(&hard_iface->bat_v.throughput_override); + ++ batadv_hardif_put(hard_iface); + return sprintf(buff, "%u.%u MBit\n", tp_override / 10, + tp_override % 10); + } +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c +index 43dab4066f91..a0f5dbee8f9c 100644 +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -612,6 +612,7 @@ int br_process_vlan_info(struct net_bridge *br, + v - 1, rtm_cmd); + v_change_start = 0; + } ++ cond_resched(); + } + /* v_change_start is set only if the last/whole range changed */ + if (v_change_start) +diff --git a/net/core/devlink.c b/net/core/devlink.c +index b831c5545d6a..b4e26b702352 100644 +--- a/net/core/devlink.c ++++ b/net/core/devlink.c +@@ -4030,6 +4030,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, + end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); + end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); + dump = false; ++ ++ if (start_offset == end_offset) { ++ err = 0; ++ goto nla_put_failure; ++ } + } + + err = devlink_nl_region_read_snapshot_fill(skb, devlink, +@@ -5029,6 +5034,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter, + { + enum devlink_health_reporter_state prev_health_state; + struct devlink *devlink = reporter->devlink; ++ unsigned long recover_ts_threshold; + + /* write a log message of the current error */ + WARN_ON(!msg); +@@ -5039,10 +5045,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter, + devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); + + /* abort if the previous error wasn't recovered */ ++ recover_ts_threshold = reporter->last_recovery_ts + ++ msecs_to_jiffies(reporter->graceful_period); + if (reporter->auto_recover && + (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || +- jiffies - reporter->last_recovery_ts < +- msecs_to_jiffies(reporter->graceful_period))) { ++ (reporter->last_recovery_ts && reporter->recovery_count && ++ time_is_after_jiffies(recover_ts_threshold)))) { + trace_devlink_health_recover_aborted(devlink, + reporter->ops->name, + reporter->health_state, +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 789a73aa7bd8..04953e5f2530 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + NEIGH_UPDATE_F_OVERRIDE_ISROUTER); + } + ++ if (protocol) ++ neigh->protocol = protocol; ++ + if (ndm->ndm_flags & NTF_EXT_LEARNED) + flags |= NEIGH_UPDATE_F_EXT_LEARNED; + +@@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, + err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, + NETLINK_CB(skb).portid, extack); + +- if (protocol) +- neigh->protocol = protocol; +- + neigh_release(neigh); + + out: +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c +index e7c30b472034..154b639d27b8 100644 +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -459,7 +459,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) + list_for_each_entry(dp, &dst->ports, list) { + err = dsa_port_setup(dp); + if (err) +- goto teardown; ++ continue; + } + + return 0; +diff --git a/net/dsa/master.c b/net/dsa/master.c +index bd44bde272f4..4f5219e2e63c 100644 +--- a/net/dsa/master.c ++++ b/net/dsa/master.c +@@ -289,7 +289,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev) + { + struct dsa_port *cpu_dp = dev->dsa_ptr; + +- dev->netdev_ops = cpu_dp->orig_ndo_ops; ++ if (cpu_dp->orig_ndo_ops) ++ dev->netdev_ops = cpu_dp->orig_ndo_ops; + cpu_dp->orig_ndo_ops = NULL; + } + +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 2931224b674e..42d0596dd398 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res) + } + ip6_rt_copy_init(pcpu_rt, res); + pcpu_rt->rt6i_flags |= RTF_PCPU; ++ ++ if (f6i->nh) ++ pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); ++ + return pcpu_rt; + } + ++static bool rt6_is_valid(const struct rt6_info *rt6) ++{ ++ return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); ++} ++ + /* It should be called with rcu_read_lock() acquired */ + static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) + { +@@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) + + pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); + ++ if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { ++ struct rt6_info *prev, **p; ++ ++ p = this_cpu_ptr(res->nh->rt6i_pcpu); ++ prev = xchg(p, NULL); ++ if (prev) { ++ dst_dev_put(&prev->dst); ++ dst_release(&prev->dst); ++ } ++ ++ pcpu_rt = NULL; ++ } ++ + return pcpu_rt; + } + +@@ -2596,6 +2618,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) + + rt = container_of(dst, struct rt6_info, dst); + ++ if (rt->sernum) ++ return rt6_is_valid(rt) ? dst : NULL; ++ + rcu_read_lock(); + + /* All IPV6 dsts are created with ->obsolete set to the value +diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c +index 3d816a1e5442..59151dc07fdc 100644 +--- a/net/netfilter/nf_nat_proto.c ++++ b/net/netfilter/nf_nat_proto.c +@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb, + enum nf_nat_manip_type maniptype) + { + struct udphdr *hdr; +- bool do_csum; + + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) + return false; + + hdr = (struct udphdr *)(skb->data + hdroff); +- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; ++ __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check); + +- __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum); + return true; + } + +diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c +index 9f5dea0064ea..916a3c7f9eaf 100644 +--- a/net/netfilter/nfnetlink_osf.c ++++ b/net/netfilter/nfnetlink_osf.c +@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb, + static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, + const struct sk_buff *skb, + const struct iphdr *ip, +- unsigned char *opts) ++ unsigned char *opts, ++ struct tcphdr *_tcph) + { + const struct tcphdr *tcp; +- struct tcphdr _tcph; + +- tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); ++ tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph); + if (!tcp) + return NULL; + +@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family, + int fmatch = FMATCH_WRONG; + struct nf_osf_hdr_ctx ctx; + const struct tcphdr *tcp; ++ struct tcphdr _tcph; + + memset(&ctx, 0, sizeof(ctx)); + +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); + if (!tcp) + return false; + +@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb, + const struct nf_osf_finger *kf; + struct nf_osf_hdr_ctx ctx; + const struct tcphdr *tcp; ++ struct tcphdr _tcph; + + memset(&ctx, 0, sizeof(ctx)); + +- tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); ++ tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); + if (!tcp) + return false; + +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c +index a36974e9c601..1bcf8fbfd40e 100644 +--- a/net/sched/sch_choke.c ++++ b/net/sched/sch_choke.c +@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch) + + sch->q.qlen = 0; + sch->qstats.backlog = 0; +- memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); ++ if (q->tab) ++ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + q->head = q->tail = 0; + red_restart(&q->vars); + } +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c +index 968519ff36e9..436160be9c18 100644 +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -416,7 +416,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, + q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); + + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) +- q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); ++ q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); + + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index c787d4d46017..5a6def5e4e6d 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) + if (ctl->divisor && + (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) + return -EINVAL; ++ ++ /* slot->allot is a short, make sure quantum is not too big. */ ++ if (ctl->quantum) { ++ unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); ++ ++ if (scaled <= 0 || scaled > SHRT_MAX) ++ return -EINVAL; ++ } ++ + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, + ctl_v1->Wlog)) + return -EINVAL; +diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c +index 0fb10abf7579..7a5e4c454715 100644 +--- a/net/sched/sch_skbprio.c ++++ b/net/sched/sch_skbprio.c +@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, + { + struct tc_skbprio_qopt *ctl = nla_data(opt); + ++ if (opt->nla_len != nla_attr_size(sizeof(*ctl))) ++ return -EINVAL; ++ + sch->limit = ctl->limit; + return 0; + } +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 6a16af4b1ef6..26788f4a3b9e 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, +- SCTP_ST_CHUNK(0), NULL, ++ SCTP_ST_CHUNK(0), repl, + commands); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, +@@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown( + * in the Cumulative TSN Ack field the last sequential TSN it + * has received from the peer. + */ +- reply = sctp_make_shutdown(asoc, NULL); ++ reply = sctp_make_shutdown(asoc, arg); + if (!reply) + goto nomem; + +@@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire( + disposition = SCTP_DISPOSITION_CONSUME; + if (sctp_outq_is_empty(&asoc->outqueue)) { + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, +- arg, commands); ++ NULL, commands); + } + + return disposition; +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c +index 3a12fc18239b..73dbed0c4b6b 100644 +--- a/net/tipc/topsrv.c ++++ b/net/tipc/topsrv.c +@@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) + read_lock_bh(&sk->sk_callback_lock); + ret = tipc_conn_rcv_sub(srv, con, &s); + read_unlock_bh(&sk->sk_callback_lock); ++ if (!ret) ++ return 0; + } +- if (ret < 0) +- tipc_conn_close(con); + ++ tipc_conn_close(con); + return ret; + } + +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index c98e602a1a2d..e23f94a5549b 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -800,6 +800,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, + *copied -= sk_msg_free(sk, msg); + tls_free_open_rec(sk); + } ++ if (psock) ++ sk_psock_put(sk, psock); + return err; + } + more_data: +@@ -2081,8 +2083,9 @@ static void tls_data_ready(struct sock *sk) + strp_data_ready(&ctx->strp); + + psock = sk_psock_get(sk); +- if (psock && !list_empty(&psock->ingress_msg)) { +- ctx->saved_data_ready(sk); ++ if (psock) { ++ if (!list_empty(&psock->ingress_msg)) ++ ctx->saved_data_ready(sk); + sk_psock_put(sk, psock); + } + } +diff --git a/scripts/decodecode b/scripts/decodecode +index ba8b8d5834e6..fbdb325cdf4f 100755 +--- a/scripts/decodecode ++++ b/scripts/decodecode +@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ + faultline=`cat $T.dis | head -1 | cut -d":" -f2-` + faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` + +-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" ++cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" + echo + cat $T.aa + cleanup +diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py +index 7427a5ee761b..9d8e9613008a 100644 +--- a/tools/cgroup/iocost_monitor.py ++++ b/tools/cgroup/iocost_monitor.py +@@ -159,7 +159,12 @@ class IocgStat: + else: + self.inflight_pct = 0 + +- self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 ++ # vdebt used to be an atomic64_t and is now u64, support both ++ try: ++ self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 ++ except: ++ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000 ++ + self.use_delay = blkg.use_delay.counter.value_() + self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 + +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index 95c485d3d4d8..f9ffb548b4fa 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -1403,7 +1403,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s + struct cfi_reg *cfa = &state->cfa; + struct stack_op *op = &insn->stack_op; + +- if (cfa->base != CFI_SP) ++ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) + return 0; + + /* push */ +diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c +index 35505b31e5cc..4555f88252ba 100644 +--- a/tools/testing/selftests/net/tcp_mmap.c ++++ b/tools/testing/selftests/net/tcp_mmap.c +@@ -165,9 +165,10 @@ void *child_thread(void *arg) + socklen_t zc_len = sizeof(zc); + int res; + ++ memset(&zc, 0, sizeof(zc)); + zc.address = (__u64)((unsigned long)addr); + zc.length = chunk_size; +- zc.recv_skip_hint = 0; ++ + res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, + &zc, &zc_len); + if (res == -1) +@@ -281,12 +282,14 @@ static void setup_sockaddr(int domain, const char *str_addr, + static void do_accept(int fdlisten) + { + pthread_attr_t attr; ++ int rcvlowat; + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + ++ rcvlowat = chunk_size; + if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT, +- &chunk_size, sizeof(chunk_size)) == -1) { ++ &rcvlowat, sizeof(rcvlowat)) == -1) { + perror("setsockopt SO_RCVLOWAT"); + } + +diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh +index 936e1ca9410e..17a1f53ceba0 100755 +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -48,8 +48,11 @@ cleanup() { + exec 2>/dev/null + printf "$orig_message_cost" > /proc/sys/net/core/message_cost + ip0 link del dev wg0 ++ ip0 link del dev wg1 + ip1 link del dev wg0 ++ ip1 link del dev wg1 + ip2 link del dev wg0 ++ ip2 link del dev wg1 + local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" + [[ -n $to_kill ]] && kill $to_kill + pp ip netns del $netns1 +@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2 + key1="$(pp wg genkey)" + key2="$(pp wg genkey)" + key3="$(pp wg genkey)" ++key4="$(pp wg genkey)" + pub1="$(pp wg pubkey <<<"$key1")" + pub2="$(pp wg pubkey <<<"$key2")" + pub3="$(pp wg pubkey <<<"$key3")" ++pub4="$(pp wg pubkey <<<"$key4")" + psk="$(pp wg genpsk)" + [[ -n $key1 && -n $key2 && -n $psk ]] + + configure_peers() { + ip1 addr add 192.168.241.1/24 dev wg0 +- ip1 addr add fd00::1/24 dev wg0 ++ ip1 addr add fd00::1/112 dev wg0 + + ip2 addr add 192.168.241.2/24 dev wg0 +- ip2 addr add fd00::2/24 dev wg0 ++ ip2 addr add fd00::2/112 dev wg0 + + n1 wg set wg0 \ + private-key <(echo "$key1") \ +@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2 + n1 wg set wg0 private-key <(echo "$key3") + n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove + n1 ping -W 1 -c 1 192.168.241.2 ++n2 wg set wg0 peer "$pub3" remove ++ ++# Test that we can route wg through wg ++ip1 addr flush dev wg0 ++ip2 addr flush dev wg0 ++ip1 addr add fd00::5:1/112 dev wg0 ++ip2 addr add fd00::5:2/112 dev wg0 ++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 ++n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 ++ip1 link add wg1 type wireguard ++ip2 link add wg1 type wireguard ++ip1 addr add 192.168.241.1/24 dev wg1 ++ip1 addr add fd00::1/112 dev wg1 ++ip2 addr add 192.168.241.2/24 dev wg1 ++ip2 addr add fd00::2/112 dev wg1 ++ip1 link set mtu 1340 up dev wg1 ++ip2 link set mtu 1340 up dev wg1 ++n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 ++n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 ++tests ++# Try to set up a routing loop between the two namespaces ++ip1 link set netns $netns0 dev wg1 ++ip0 addr add 192.168.241.1/24 dev wg1 ++ip0 link set up dev wg1 ++n0 ping -W 1 -c 1 192.168.241.2 ++n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 ++ip2 link del wg0 ++ip2 link del wg1 ++! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel + ++ip0 link del wg1 + ip1 link del wg0 +-ip2 link del wg0 + + # Test using NAT. We now change the topology to this: + # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ +@@ -282,6 +316,20 @@ pp sleep 3 + n2 ping -W 1 -c 1 192.168.241.1 + n1 wg set wg0 peer "$pub2" persistent-keepalive 0 + ++# Test that onion routing works, even when it loops ++n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 ++ip1 addr add 192.168.242.1/24 dev wg0 ++ip2 link add wg1 type wireguard ++ip2 addr add 192.168.242.2/24 dev wg1 ++n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 ++ip2 link set wg1 up ++n1 ping -W 1 -c 1 192.168.242.2 ++ip2 link del wg1 ++n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 ++! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel ++n1 wg set wg0 peer "$pub3" remove ++ip1 addr del 192.168.242.1/24 dev wg0 ++ + # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. + ip1 -6 addr add fc00::9/96 dev vethc + ip1 -6 route add default via fc00::1 +diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c +index d31f267961e7..25c0e47d57cb 100644 +--- a/virt/kvm/arm/hyp/aarch32.c ++++ b/virt/kvm/arm/hyp/aarch32.c +@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) + */ + void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) + { ++ u32 pc = *vcpu_pc(vcpu); + bool is_thumb; + + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); + if (is_thumb && !is_wide_instr) +- *vcpu_pc(vcpu) += 2; ++ pc += 2; + else +- *vcpu_pc(vcpu) += 4; ++ pc += 4; ++ ++ *vcpu_pc(vcpu) = pc; ++ + kvm_adjust_itstate(vcpu); + } +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c +index 97fb2a40e6ba..e7abd05ea896 100644 +--- a/virt/kvm/arm/vgic/vgic-mmio.c ++++ b/virt/kvm/arm/vgic/vgic-mmio.c +@@ -368,7 +368,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, + static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) + { + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid > VGIC_NR_PRIVATE_IRQS) ++ intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_guest(vcpu->kvm); + } + +@@ -376,7 +376,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) + static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) + { + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || +- intid > VGIC_NR_PRIVATE_IRQS) ++ intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_guest(vcpu->kvm); + } +