commit: 3a924a86ce265b7be84b1d3032ede91669b0962a Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Sun May 18 14:35:25 2025 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Sun May 18 14:35:25 2025 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3a924a86
Linux patch 5.15.183 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1182_linux-5.15.183.patch | 1930 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1934 insertions(+) diff --git a/0000_README b/0000_README index ba74306a..f44e48d0 100644 --- a/0000_README +++ b/0000_README @@ -771,6 +771,10 @@ Patch: 1181_linux-5.15.182.patch From: https://www.kernel.org Desc: Linux 5.15.182 +Patch: 1182_linux-5.15.183.patch +From: https://www.kernel.org +Desc: Linux 5.15.183 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1182_linux-5.15.183.patch b/1182_linux-5.15.183.patch new file mode 100644 index 00000000..fb59a452 --- /dev/null +++ b/1182_linux-5.15.183.patch @@ -0,0 +1,1930 @@ +diff --git a/Makefile b/Makefile +index 2288ad8ae88a9c..09de195b86f268 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 15 +-SUBLEVEL = 182 ++SUBLEVEL = 183 + EXTRAVERSION = + NAME = Trick or Treat + +diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h +index 428b9f1cf1de27..b1da249dcd71c5 100644 +--- a/arch/mips/include/asm/ptrace.h ++++ b/arch/mips/include/asm/ptrace.h +@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs, + + /* Query offset/name of register from its name/offset */ + extern int regs_query_register_offset(const char *name); +-#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) ++#define MAX_REG_OFFSET \ ++ (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) + + /** + * regs_get_register() - get register value from its offset +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 75cd45f2338dcc..c10d93d2773b4b 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1656,10 +1656,11 @@ static void __init bhi_select_mitigation(void) + return; + } + +- if (spec_ctrl_bhi_dis()) ++ if (!IS_ENABLED(CONFIG_X86_64)) + return; + +- if (!IS_ENABLED(CONFIG_X86_64)) ++ /* Mitigate in hardware if supported */ ++ if (spec_ctrl_bhi_dis()) + return; + + /* Mitigate KVM by default */ +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index dbaea8a6175b51..1ebd67c95d869a 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1345,9 +1345,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (vulnerable_to_rfds(x86_arch_cap_msr)) + setup_force_cpu_bug(X86_BUG_RFDS); + +- /* When virtualized, eIBRS could be hidden, assume vulnerable */ +- if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && +- !cpu_matches(cpu_vuln_whitelist, NO_BHI) && ++ /* ++ * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with ++ * BHI_NO still need to use the BHI mitigation to prevent Intra-mode ++ * attacks. When virtualized, eIBRS could be hidden, assume vulnerable. ++ */ ++ if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) && + (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || + boot_cpu_has(X86_FEATURE_HYPERVISOR))) + setup_force_cpu_bug(X86_BUG_BHI); +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c +index 94a23fcb207372..11a43d373baee3 100644 +--- a/arch/x86/mm/tlb.c ++++ b/arch/x86/mm/tlb.c +@@ -616,7 +616,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, + + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + +- /* Let nmi_uaccess_okay() know that we're changing CR3. */ ++ /* ++ * Indicate that CR3 is about to change. nmi_uaccess_okay() ++ * and others are sensitive to the window where mm_cpumask(), ++ * CR3 and cpu_tlbstate.loaded_mm are not all in sync. ++ */ + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); + barrier(); + } +@@ -856,8 +860,16 @@ static void flush_tlb_func(void *info) + + static bool should_flush_tlb(int cpu, void *data) + { ++ struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu); + struct flush_tlb_info *info = data; + ++ /* ++ * Order the 'loaded_mm' and 'is_lazy' against their ++ * write ordering in switch_mm_irqs_off(). Ensure ++ * 'is_lazy' is at least as new as 'loaded_mm'. ++ */ ++ smp_rmb(); ++ + /* Lazy TLB will get flushed at the next context switch. */ + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) + return false; +@@ -866,8 +878,15 @@ static bool should_flush_tlb(int cpu, void *data) + if (!info->mm) + return true; + ++ /* ++ * While switching, the remote CPU could have state from ++ * either the prev or next mm. Assume the worst and flush. ++ */ ++ if (loaded_mm == LOADED_MM_SWITCHING) ++ return true; ++ + /* The target mm is loaded, and the CPU is not lazy. */ +- if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm) ++ if (loaded_mm == info->mm) + return true; + + /* In cpumask, but not the loaded mm? Periodically remove by flushing. */ +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index ac06f53391ec19..f62ebeee8b14ef 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -36,6 +36,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) + #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) + #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) + #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) ++#define EMIT5(b1, b2, b3, b4, b5) \ ++ do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0) + + #define EMIT1_off32(b1, off) \ + do { EMIT1(b1); EMIT(off, 4); } while (0) +@@ -932,6 +934,47 @@ static void emit_nops(u8 **pprog, int len) + + #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) + ++static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip, ++ struct bpf_prog *bpf_prog) ++{ ++ u8 *prog = *pprog; ++ u8 *func; ++ ++ if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) { ++ /* The clearing sequence clobbers eax and ecx. */ ++ EMIT1(0x50); /* push rax */ ++ EMIT1(0x51); /* push rcx */ ++ ip += 2; ++ ++ func = (u8 *)clear_bhb_loop; ++ ++ if (emit_call(&prog, func, ip)) ++ return -EINVAL; ++ EMIT1(0x59); /* pop rcx */ ++ EMIT1(0x58); /* pop rax */ ++ } ++ /* Insert IBHF instruction */ ++ if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) && ++ cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) || ++ cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) { ++ /* ++ * Add an Indirect Branch History Fence (IBHF). IBHF acts as a ++ * fence preventing branch history from before the fence from ++ * affecting indirect branches after the fence. This is ++ * specifically used in cBPF jitted code to prevent Intra-mode ++ * BHI attacks. The IBHF instruction is designed to be a NOP on ++ * hardware that doesn't need or support it. The REP and REX.W ++ * prefixes are required by the microcode, and they also ensure ++ * that the NOP is unlikely to be used in existing code. ++ * ++ * IBHF is not a valid instruction in 32-bit mode. ++ */ ++ EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */ ++ } ++ *pprog = prog; ++ return 0; ++} ++ + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, + int oldproglen, struct jit_context *ctx, bool jmp_padding) + { +@@ -1737,6 +1780,15 @@ st: if (is_imm8(insn->off)) + seen_exit = true; + /* Update cleanup_addr */ + ctx->cleanup_addr = proglen; ++ ++ if (bpf_prog_was_classic(bpf_prog) && ++ !capable(CAP_SYS_ADMIN)) { ++ u8 *ip = image + addrs[i - 1]; ++ ++ if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) ++ return -EINVAL; ++ } ++ + pop_callee_regs(&prog, callee_regs_used); + EMIT1(0xC9); /* leave */ + emit_return(&prog, image + addrs[i - 1] + (prog - temp)); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 38e59fff4c61c3..001fe6cfe74ef6 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -53,6 +53,9 @@ + #include "dc/dcn20/dcn20_resource.h" + #endif + ++/* ++ * This function handles both native AUX and I2C-Over-AUX transactions. ++ */ + static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) + { +@@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + if (adev->dm.aux_hpd_discon_quirk) { + if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && + operation_result == AUX_RET_ERROR_HPD_DISCON) { +- result = 0; ++ result = msg->size; + operation_result = AUX_RET_SUCCESS; + } + } + +- if (payload.write && result >= 0) +- result = msg->size; ++ /* ++ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER ++ */ ++ if (payload.write && result >= 0) { ++ if (result) { ++ /*one byte indicating partially written bytes. Force 0 to retry*/ ++ drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); ++ result = 0; ++ } else if (!payload.reply[0]) ++ /*I2C_ACK|AUX_ACK*/ ++ result = msg->size; ++ } + +- if (result < 0) ++ if (result < 0) { + switch (operation_result) { + case AUX_RET_SUCCESS: + break; +@@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + break; + } + ++ drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); ++ } ++ ++ if (payload.reply[0]) ++ drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", ++ payload.reply[0]); ++ + return result; + } + +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 67412115b4b30f..ecea88d63f0d8a 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -1210,27 +1210,28 @@ static const struct panel_desc auo_g070vvn01 = { + }, + }; + +-static const struct drm_display_mode auo_g101evn010_mode = { +- .clock = 68930, +- .hdisplay = 1280, +- .hsync_start = 1280 + 82, +- .hsync_end = 1280 + 82 + 2, +- .htotal = 1280 + 82 + 2 + 84, +- .vdisplay = 800, +- .vsync_start = 800 + 8, +- .vsync_end = 800 + 8 + 2, +- .vtotal = 800 + 8 + 2 + 6, ++static const struct display_timing auo_g101evn010_timing = { ++ .pixelclock = { 64000000, 68930000, 85000000 }, ++ .hactive = { 1280, 1280, 1280 }, ++ .hfront_porch = { 8, 64, 256 }, ++ .hback_porch = { 8, 64, 256 }, ++ .hsync_len = { 40, 168, 767 }, ++ .vactive = { 800, 800, 800 }, ++ .vfront_porch = { 4, 8, 100 }, ++ .vback_porch = { 4, 8, 100 }, ++ .vsync_len = { 8, 16, 223 }, + }; + + static const struct panel_desc auo_g101evn010 = { +- .modes = &auo_g101evn010_mode, +- .num_modes = 1, ++ .timings = &auo_g101evn010_timing, ++ .num_timings = 1, + .bpc = 6, + .size = { + .width = 216, + .height = 135, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, ++ .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, + }; + +diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c +index dfb8e2e5bdf58d..f063226a42f375 100644 +--- a/drivers/iio/accel/adis16201.c ++++ b/drivers/iio/accel/adis16201.c +@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = { + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), + ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, +- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ++ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), + ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, +- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ++ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), + IIO_CHAN_SOFT_TIMESTAMP(7) + }; + +diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c +index e1ad2cd61b7f9b..e9f4043966aedb 100644 +--- a/drivers/iio/adc/ad7606_spi.c ++++ b/drivers/iio/adc/ad7606_spi.c +@@ -127,7 +127,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr) + { + .tx_buf = &st->d16[0], + .len = 2, +- .cs_change = 0, ++ .cs_change = 1, + }, { + .rx_buf = &st->d16[1], + .len = 2, +diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c +index 97d162a3cba4ea..49a2588e7431ed 100644 +--- a/drivers/iio/adc/dln2-adc.c ++++ b/drivers/iio/adc/dln2-adc.c +@@ -483,7 +483,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct { + __le16 values[DLN2_ADC_MAX_CHANNELS]; +- int64_t timestamp_space; ++ aligned_s64 timestamp_space; + } data; + struct dln2_adc_get_all_vals dev_data; + struct dln2_adc *dln2 = iio_priv(indio_dev); +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +index 16730a78096436..e78b699a044ed5 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +@@ -369,6 +369,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) + if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK)) + return 0; + ++ if (!pattern_len) ++ pattern_len = ST_LSM6DSX_SAMPLE_SIZE; ++ + fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) * + ST_LSM6DSX_CHAN_SIZE; + fifo_len = (fifo_len / pattern_len) * pattern_len; +@@ -586,6 +589,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw) + if (!fifo_len) + return 0; + ++ if (!pattern_len) ++ pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE; ++ + for (read_len = 0; read_len < fifo_len; read_len += pattern_len) { + err = st_lsm6dsx_read_block(hw, + ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR, +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c +index f64f25079481ae..058a152769829d 100644 +--- a/drivers/input/mouse/synaptics.c ++++ b/drivers/input/mouse/synaptics.c +@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = { + + static const char * const smbus_pnp_ids[] = { + /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ ++ "DLL060d", /* Dell Precision M3800 */ + "LEN0048", /* X1 Carbon 3 */ + "LEN0046", /* X250 */ + "LEN0049", /* Yoga 11e */ +@@ -187,11 +188,15 @@ static const char * const smbus_pnp_ids[] = { + "LEN2054", /* E480 */ + "LEN2055", /* E580 */ + "LEN2068", /* T14 Gen 1 */ ++ "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */ ++ "SYN3003", /* HP EliteBook 850 G1 */ + "SYN3015", /* HP EliteBook 840 G2 */ + "SYN3052", /* HP EliteBook 840 G4 */ + "SYN3221", /* HP 15-ay000 */ + "SYN323d", /* HP Spectre X360 13-w013dx */ + "SYN3257", /* HP Envy 13-ad105ng */ ++ "TOS01f6", /* Dynabook Portege X30L-G */ ++ "TOS0213", /* Dynabook Portege X30-D */ + NULL + }; + +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index a4edd7ab37eb2e..e8b35661c53891 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -2066,9 +2066,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register); + + void m_can_class_unregister(struct m_can_classdev *cdev) + { ++ unregister_candev(cdev->net); + if (cdev->is_peripheral) + can_rx_offload_del(&cdev->offload); +- unregister_candev(cdev->net); + } + EXPORT_SYMBOL_GPL(m_can_class_unregister); + +diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +index 3a0f022b15625d..3fbe79c22286f7 100644 +--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c ++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +@@ -3020,8 +3020,8 @@ static int mcp251xfd_remove(struct spi_device *spi) + struct mcp251xfd_priv *priv = spi_get_drvdata(spi); + struct net_device *ndev = priv->ndev; + +- can_rx_offload_del(&priv->offload); + mcp251xfd_unregister(priv); ++ can_rx_offload_del(&priv->offload); + spi->max_speed_hz = priv->spi_max_speed_hz_orig; + free_candev(ndev); + +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index 27025ca5a75981..3bd0d1632b657c 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); + } + ++ vc1 &= ~VC1_RX_MCST_FWD_EN; ++ + if (enable) { + vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; +- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; ++ vc1 |= VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { +- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; ++ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } + +@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, + + } else { + vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); +- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); ++ vc1 &= ~VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc5 &= ~VC5_DROP_VTABLE_MISS; + +@@ -1500,12 +1502,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port, + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct b53_vlan *vl; ++ u16 old_pvid, new_pvid; + int err; + + err = b53_vlan_prepare(ds, port, vlan); + if (err) + return err; + ++ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid); ++ if (pvid) ++ new_pvid = vlan->vid; ++ else if (!pvid && vlan->vid == old_pvid) ++ new_pvid = b53_default_pvid(dev); ++ else ++ new_pvid = old_pvid; ++ + vl = &dev->vlans[vlan->vid]; + + b53_get_vlan_entry(dev, vlan->vid, vl); +@@ -1522,10 +1533,10 @@ int b53_vlan_add(struct dsa_switch *ds, int port, + b53_set_vlan_entry(dev, vlan->vid, vl); + b53_fast_age_vlan(dev, vlan->vid); + +- if (pvid && !dsa_is_cpu_port(ds, port)) { ++ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), +- vlan->vid); +- b53_fast_age_vlan(dev, vlan->vid); ++ new_pvid); ++ b53_fast_age_vlan(dev, old_pvid); + } + + return 0; +@@ -1915,7 +1926,7 @@ EXPORT_SYMBOL(b53_br_join); + void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + { + struct b53_device *dev = ds->priv; +- struct b53_vlan *vl = &dev->vlans[0]; ++ struct b53_vlan *vl; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + unsigned int i; + u16 pvlan, reg, pvid; +@@ -1941,6 +1952,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + dev->ports[port].vlan_ctl_mask = pvlan; + + pvid = b53_default_pvid(dev); ++ vl = &dev->vlans[pvid]; + + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { +@@ -1949,12 +1961,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); +- } else { +- b53_get_vlan_entry(dev, pvid, vl); +- vl->members |= BIT(port) | BIT(cpu_port); +- vl->untag |= BIT(port) | BIT(cpu_port); +- b53_set_vlan_entry(dev, pvid, vl); + } ++ ++ b53_get_vlan_entry(dev, pvid, vl); ++ vl->members |= BIT(port) | BIT(cpu_port); ++ vl->untag |= BIT(port) | BIT(cpu_port); ++ b53_set_vlan_entry(dev, pvid, vl); + } + EXPORT_SYMBOL(b53_br_leave); + +diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c +index 7c8bcec0a8fab0..230f2fcf9c46a1 100644 +--- a/drivers/net/phy/microchip.c ++++ b/drivers/net/phy/microchip.c +@@ -31,6 +31,47 @@ static int lan88xx_write_page(struct phy_device *phydev, int page) + return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); + } + ++static int lan88xx_phy_config_intr(struct phy_device *phydev) ++{ ++ int rc; ++ ++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { ++ /* unmask all source and clear them before enable */ ++ rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF); ++ rc = phy_read(phydev, LAN88XX_INT_STS); ++ rc = phy_write(phydev, LAN88XX_INT_MASK, ++ LAN88XX_INT_MASK_MDINTPIN_EN_ | ++ LAN88XX_INT_MASK_LINK_CHANGE_); ++ } else { ++ rc = phy_write(phydev, LAN88XX_INT_MASK, 0); ++ if (rc) ++ return rc; ++ ++ /* Ack interrupts after they have been disabled */ ++ rc = phy_read(phydev, LAN88XX_INT_STS); ++ } ++ ++ return rc < 0 ? rc : 0; ++} ++ ++static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev) ++{ ++ int irq_status; ++ ++ irq_status = phy_read(phydev, LAN88XX_INT_STS); ++ if (irq_status < 0) { ++ phy_error(phydev); ++ return IRQ_NONE; ++ } ++ ++ if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_)) ++ return IRQ_NONE; ++ ++ phy_trigger_machine(phydev); ++ ++ return IRQ_HANDLED; ++} ++ + static int lan88xx_suspend(struct phy_device *phydev) + { + struct lan88xx_priv *priv = phydev->priv; +@@ -347,9 +388,8 @@ static struct phy_driver microchip_phy_driver[] = { + .config_aneg = lan88xx_config_aneg, + .link_change_notify = lan88xx_link_change_notify, + +- /* Interrupt handling is broken, do not define related +- * functions to force polling. +- */ ++ .config_intr = lan88xx_phy_config_intr, ++ .handle_interrupt = lan88xx_handle_interrupt, + + .suspend = lan88xx_suspend, + .resume = genphy_resume, +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index b9ba20f4048d13..7065f66ef8cf4b 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -4389,7 +4389,8 @@ static void nvme_fw_act_work(struct work_struct *work) + msleep(100); + } + +- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) ++ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || ++ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + return; + + nvme_start_queues(ctrl); +diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c +index b23eabb863d175..46b79c945079ef 100644 +--- a/drivers/staging/axis-fifo/axis-fifo.c ++++ b/drivers/staging/axis-fifo/axis-fifo.c +@@ -401,16 +401,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + + bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); + if (!bytes_available) { +- dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); +- reset_ip_core(fifo); ++ dev_err(fifo->dt_device, "received a packet of length 0\n"); + ret = -EIO; + goto end_unlock; + } + + if (bytes_available > len) { +- dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", ++ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", + bytes_available, len); +- reset_ip_core(fifo); + ret = -EINVAL; + goto end_unlock; + } +@@ -419,8 +417,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + /* this probably can't happen unless IP + * registers were previously mishandled + */ +- dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); +- reset_ip_core(fifo); ++ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); + ret = -EIO; + goto end_unlock; + } +@@ -441,7 +438,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, + + if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, + copy * sizeof(u32))) { +- reset_ip_core(fifo); + ret = -EFAULT; + goto end_unlock; + } +@@ -552,7 +548,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, + + if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), + copy * sizeof(u32))) { +- reset_ip_core(fifo); + ret = -EFAULT; + goto end_unlock; + } +@@ -785,9 +780,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo) + goto end; + } + +- /* IP sets TDFV to fifo depth - 4 so we will do the same */ +- fifo->tx_fifo_depth -= 4; +- + ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); + if (ret) { + dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); +diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c +index 6c14d7bcdd6750..081b17f498638b 100644 +--- a/drivers/staging/iio/adc/ad7816.c ++++ b/drivers/staging/iio/adc/ad7816.c +@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev, + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct ad7816_chip_info *chip = iio_priv(indio_dev); + +- if (strcmp(buf, "full")) { ++ if (strcmp(buf, "full") == 0) { + gpiod_set_value(chip->rdwr_pin, 1); + chip->mode = AD7816_FULL; + } else { +diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c +index d9fb29eb99db12..1c29491ee56d5a 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.c ++++ b/drivers/usb/cdns3/cdnsp-gadget.c +@@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, + (portsc & PORT_CHANGE_BITS), port_regs); + } + ++static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) ++{ ++ struct cdns *cdns = dev_get_drvdata(pdev->dev); ++ __le32 __iomem *reg; ++ void __iomem *base; ++ u32 offset = 0; ++ u32 val; ++ ++ if (!cdns->override_apb_timeout) ++ return; ++ ++ base = &pdev->cap_regs->hc_capbase; ++ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); ++ reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; ++ ++ val = le32_to_cpu(readl(reg)); ++ val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); ++ writel(cpu_to_le32(val), reg); ++} ++ + static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) + { + __le32 __iomem *reg; +@@ -1779,6 +1799,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) + reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); + pdev->rev_cap = reg; + ++ pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); ++ + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", + readl(&pdev->rev_cap->ctrl_revision), + readl(&pdev->rev_cap->rtl_revision), +@@ -1804,6 +1826,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev) + pdev->hci_version = HC_VERSION(pdev->hcc_params); + pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + ++ /* ++ * Override the APB timeout value to give the controller more time for ++ * enabling UTMI clock and synchronizing APB and UTMI clock domains. ++ * This fix is platform specific and is required to fixes issue with ++ * reading incorrect value from PORTSC register after resuming ++ * from L1 state. ++ */ ++ cdnsp_set_apb_timeout_value(pdev); ++ + cdnsp_get_rev_cap(pdev); + + /* Make sure the Device Controller is halted. */ +diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h +index a61aef0dc273ca..5cffc1444d3a0c 100644 +--- a/drivers/usb/cdns3/cdnsp-gadget.h ++++ b/drivers/usb/cdns3/cdnsp-gadget.h +@@ -520,6 +520,9 @@ struct cdnsp_rev_cap { + #define REG_CHICKEN_BITS_2_OFFSET 0x48 + #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) + ++#define REG_CHICKEN_BITS_3_OFFSET 0x4C ++#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val)) ++ + /* XBUF Extended Capability ID. */ + #define XBUF_CAP_ID 0xCB + #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C +@@ -1359,6 +1362,7 @@ struct cdnsp_port { + * @rev_cap: Controller Capabilities Registers. + * @hcs_params1: Cached register copies of read-only HCSPARAMS1 + * @hcc_params: Cached register copies of read-only HCCPARAMS1 ++ * @rtl_revision: Cached controller rtl revision. + * @setup: Temporary buffer for setup packet. + * @ep0_preq: Internal allocated request used during enumeration. + * @ep0_stage: ep0 stage during enumeration process. +@@ -1413,6 +1417,8 @@ struct cdnsp_device { + __u32 hcs_params1; + __u32 hcs_params3; + __u32 hcc_params; ++ #define RTL_REVISION_NEW_LPM 0x2700 ++ __u32 rtl_revision; + /* Lock used in interrupt thread context. */ + spinlock_t lock; + struct usb_ctrlrequest setup; +diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c +index a85db23fa19f23..b7a1f28faa1fec 100644 +--- a/drivers/usb/cdns3/cdnsp-pci.c ++++ b/drivers/usb/cdns3/cdnsp-pci.c +@@ -33,6 +33,8 @@ + #define CDNS_DRD_ID 0x0100 + #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) + ++#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20 ++ + static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) + { + /* +@@ -144,6 +146,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, + cdnsp->otg_irq = pdev->irq; + } + ++ /* ++ * Cadence PCI based platform require some longer timeout for APB ++ * to fixes domain clock synchronization issue after resuming ++ * controller from L1 state. ++ */ ++ cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE; ++ pci_set_drvdata(pdev, cdnsp); ++ + if (pci_is_enabled(func)) { + cdnsp->dev = dev; + cdnsp->gadget_init = cdnsp_gadget_init; +@@ -153,8 +163,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, + goto free_cdnsp; + } + +- pci_set_drvdata(pdev, cdnsp); +- + device_wakeup_enable(&pdev->dev); + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); +diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c +index 47096b8e317939..6247584cb93913 100644 +--- a/drivers/usb/cdns3/cdnsp-ring.c ++++ b/drivers/usb/cdns3/cdnsp-ring.c +@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, + + writel(db_value, reg_addr); + +- cdnsp_force_l0_go(pdev); ++ if (pdev->rtl_revision < RTL_REVISION_NEW_LPM) ++ cdnsp_force_l0_go(pdev); + + /* Doorbell was set. */ + return true; +diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h +index 7d4b8311051d88..847d738b909e08 100644 +--- a/drivers/usb/cdns3/core.h ++++ b/drivers/usb/cdns3/core.h +@@ -79,6 +79,8 @@ struct cdns3_platform_data { + * @pdata: platform data from glue layer + * @lock: spinlock structure + * @xhci_plat_data: xhci private data structure pointer ++ * @override_apb_timeout: hold value of APB timeout. For value 0 the default ++ * value in CHICKEN_BITS_3 will be preserved. + * @gadget_init: pointer to gadget initialization function + */ + struct cdns { +@@ -117,6 +119,7 @@ struct cdns { + struct cdns3_platform_data *pdata; + spinlock_t lock; + struct xhci_plat_priv *xhci_plat_data; ++ u32 override_apb_timeout; + + int (*gadget_init)(struct cdns *cdns); + }; +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index c2e666e82857c1..2f92905e05cad0 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + u8 *buffer; + u8 tag; + int rv; ++ long wait_rv; + + dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", + data->iin_ep_present); +@@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + } + + if (data->iin_ep_present) { +- rv = wait_event_interruptible_timeout( ++ wait_rv = wait_event_interruptible_timeout( + data->waitq, + atomic_read(&data->iin_data_valid) != 0, + file_data->timeout); +- if (rv < 0) { +- dev_dbg(dev, "wait interrupted %d\n", rv); ++ if (wait_rv < 0) { ++ dev_dbg(dev, "wait interrupted %ld\n", wait_rv); ++ rv = wait_rv; + goto exit; + } + +- if (rv == 0) { ++ if (wait_rv == 0) { + dev_dbg(dev, "wait timed out\n"); + rv = -ETIMEDOUT; + goto exit; +@@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) + + dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); + ++ rv = 0; ++ + exit: + /* bump interrupt bTag */ + data->iin_bTag += 1; +@@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, + { + struct usbtmc_device_data *data = file_data->data; + struct device *dev = &data->intf->dev; +- int rv; + u32 timeout; + unsigned long expire; ++ long wait_rv; + + if (!data->iin_ep_present) { + dev_dbg(dev, "no interrupt endpoint present\n"); +@@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, + + mutex_unlock(&data->io_mutex); + +- rv = wait_event_interruptible_timeout( +- data->waitq, +- atomic_read(&file_data->srq_asserted) != 0 || +- atomic_read(&file_data->closing), +- expire); ++ wait_rv = wait_event_interruptible_timeout( ++ data->waitq, ++ atomic_read(&file_data->srq_asserted) != 0 || ++ atomic_read(&file_data->closing), ++ expire); + + mutex_lock(&data->io_mutex); + + /* Note! disconnect or close could be called in the meantime */ + if (atomic_read(&file_data->closing) || data->zombie) +- rv = -ENODEV; ++ return -ENODEV; + +- if (rv < 0) { +- /* dev can be invalid now! */ +- pr_debug("%s - wait interrupted %d\n", __func__, rv); +- return rv; ++ if (wait_rv < 0) { ++ dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv); ++ return wait_rv; + } + +- if (rv == 0) { ++ if (wait_rv == 0) { + dev_dbg(dev, "%s - wait timed out\n", __func__); + return -ETIMEDOUT; + } +@@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, + unsigned long expire; + int bufcount = 1; + int again = 0; ++ long wait_rv; + + /* mutex already locked */ + +@@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, + if (!(flags & USBTMC_FLAG_ASYNC)) { + dev_dbg(dev, "%s: before wait time %lu\n", + __func__, expire); +- retval = wait_event_interruptible_timeout( ++ wait_rv = wait_event_interruptible_timeout( + file_data->wait_bulk_in, + usbtmc_do_transfer(file_data), + expire); + +- dev_dbg(dev, "%s: wait returned %d\n", +- __func__, retval); ++ dev_dbg(dev, "%s: wait returned %ld\n", ++ __func__, wait_rv); ++ ++ if (wait_rv < 0) { ++ retval = wait_rv; ++ goto error; ++ } + +- if (retval <= 0) { +- if (retval == 0) +- retval = -ETIMEDOUT; ++ if (wait_rv == 0) { ++ retval = -ETIMEDOUT; + goto error; + } ++ + } + + urb = usb_get_from_anchor(&file_data->in_anchor); +@@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, + if (!buffer) + return -ENOMEM; + +- mutex_lock(&data->io_mutex); ++ retval = mutex_lock_interruptible(&data->io_mutex); ++ if (retval < 0) ++ goto exit_nolock; ++ + if (data->zombie) { + retval = -ENODEV; + goto exit; +@@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, + + exit: + mutex_unlock(&data->io_mutex); ++exit_nolock: + kfree(buffer); + return retval; + } +diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c +index fdbb9d73aa8e49..13a2f5276072ad 100644 +--- a/drivers/usb/gadget/udc/tegra-xudc.c ++++ b/drivers/usb/gadget/udc/tegra-xudc.c +@@ -1737,6 +1737,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) + val = xudc_readl(xudc, CTRL); + val &= ~CTRL_RUN; + xudc_writel(xudc, val, CTRL); ++ ++ val = xudc_readl(xudc, ST); ++ if (val & ST_RC) ++ xudc_writel(xudc, ST_RC, ST); + } + + dev_info(xudc->dev, "ep %u disabled\n", ep->index); +diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c +index be9e9db7cad104..c0834bac4c953f 100644 +--- a/drivers/usb/host/uhci-platform.c ++++ b/drivers/usb/host/uhci-platform.c +@@ -122,7 +122,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) + } + + /* Get and enable clock if any specified */ +- uhci->clk = devm_clk_get(&pdev->dev, NULL); ++ uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(uhci->clk)) { + ret = PTR_ERR(uhci->clk); + goto err_rmr; +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 51eabc5e877012..14a772feab7946 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -1228,6 +1228,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, + tegra->otg_usb2_port); + ++ pm_runtime_get_sync(tegra->dev); + if (tegra->host_mode) { + /* switch to host mode */ + if (tegra->otg_usb3_port >= 0) { +@@ -1257,6 +1258,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + } + + tegra_xhci_set_port_power(tegra, true, true); ++ pm_runtime_mark_last_busy(tegra->dev); + + } else { + if (tegra->otg_usb3_port >= 0) +@@ -1264,6 +1266,7 @@ static void tegra_xhci_id_work(struct work_struct *work) + + tegra_xhci_set_port_power(tegra, true, false); + } ++ pm_runtime_put_autosuspend(tegra->dev); + } + + #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index b645bbc0c57353..d8e299bcbf8408 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -5055,7 +5055,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, + case SNK_TRY_WAIT_DEBOUNCE: + if (!tcpm_port_is_sink(port)) { + port->max_wait = 0; +- tcpm_set_state(port, SRC_TRYWAIT, 0); ++ tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE); + } + break; + case SRC_TRY_WAIT: +diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c +index 2431febc461516..8c19081c325542 100644 +--- a/drivers/usb/typec/ucsi/displayport.c ++++ b/drivers/usb/typec/ucsi/displayport.c +@@ -296,6 +296,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) + if (!dp) + return; + ++ cancel_work_sync(&dp->work); ++ + dp->data.conf = 0; + dp->data.status = 0; + dp->initialized = false; +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h +index 2754bdfadcb89c..4ba73320694a4c 100644 +--- a/drivers/xen/xenbus/xenbus.h ++++ b/drivers/xen/xenbus/xenbus.h +@@ -77,6 +77,7 @@ enum xb_req_state { + struct xb_req_data { + struct list_head list; + wait_queue_head_t wq; ++ struct kref kref; + struct xsd_sockmsg msg; + uint32_t caller_req_id; + enum xsd_sockmsg_type type; +@@ -103,6 +104,7 @@ int xb_init_comms(void); + void xb_deinit_comms(void); + int xs_watch_msg(struct xs_watch_event *event); + void xs_request_exit(struct xb_req_data *req); ++void xs_free_req(struct kref *kref); + + int xenbus_match(struct device *_dev, struct device_driver *_drv); + int xenbus_dev_probe(struct device *_dev); +diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c +index e5fda0256feb3d..82df2da1b880b8 100644 +--- a/drivers/xen/xenbus/xenbus_comms.c ++++ b/drivers/xen/xenbus/xenbus_comms.c +@@ -309,8 +309,8 @@ static int process_msg(void) + virt_wmb(); + req->state = xb_req_state_got_reply; + req->cb(req); +- } else +- kfree(req); ++ } ++ kref_put(&req->kref, xs_free_req); + } + + mutex_unlock(&xs_response_mutex); +@@ -386,14 +386,13 @@ static int process_writes(void) + state.req->msg.type = XS_ERROR; + state.req->err = err; + list_del(&state.req->list); +- if (state.req->state == xb_req_state_aborted) +- kfree(state.req); +- else { ++ if (state.req->state != xb_req_state_aborted) { + /* write err, then update state */ + virt_wmb(); + state.req->state = xb_req_state_got_reply; + wake_up(&state.req->wq); + } ++ kref_put(&state.req->kref, xs_free_req); + + mutex_unlock(&xb_write_mutex); + +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c +index 0792fda49a15f3..c495cff3da308b 100644 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c +@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req) + mutex_unlock(&u->reply_mutex); + + kfree(req->body); +- kfree(req); ++ kref_put(&req->kref, xs_free_req); + + kref_put(&u->kref, xenbus_file_free); + +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c +index 12e02eb01f5991..a4dd92719e7ae8 100644 +--- a/drivers/xen/xenbus/xenbus_xs.c ++++ b/drivers/xen/xenbus/xenbus_xs.c +@@ -112,6 +112,12 @@ static void xs_suspend_exit(void) + wake_up_all(&xs_state_enter_wq); + } + ++void xs_free_req(struct kref *kref) ++{ ++ struct xb_req_data *req = container_of(kref, struct xb_req_data, kref); ++ kfree(req); ++} ++ + static uint32_t xs_request_enter(struct xb_req_data *req) + { + uint32_t rq_id; +@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) + req->caller_req_id = req->msg.req_id; + req->msg.req_id = xs_request_enter(req); + ++ /* ++ * Take 2nd ref. One for this thread, and the second for the ++ * xenbus_thread. ++ */ ++ kref_get(&req->kref); ++ + mutex_lock(&xb_write_mutex); + list_add_tail(&req->list, &xb_write_list); + notify = list_is_singular(&xb_write_list); +@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg) + if (req->state == xb_req_state_queued || + req->state == xb_req_state_wait_reply) + req->state = xb_req_state_aborted; +- else +- kfree(req); ++ ++ kref_put(&req->kref, xs_free_req); + mutex_unlock(&xb_write_mutex); + + return ret; +@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) + req->cb = xenbus_dev_queue_reply; + req->par = par; + req->user_req = true; ++ kref_init(&req->kref); + + xs_send(req, msg); + +@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t, + req->num_vecs = num_vecs; + req->cb = xs_wake_up; + req->user_req = false; ++ kref_init(&req->kref); + + msg.req_id = 0; + msg.tx_id = t.id; +diff --git a/fs/namespace.c b/fs/namespace.c +index 642baef4d9aaad..27ec6d0a68ff53 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -586,7 +586,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) + return 0; + mnt = real_mount(bastard); + mnt_add_count(mnt, 1); +- smp_mb(); // see mntput_no_expire() ++ smp_mb(); // see mntput_no_expire() and do_umount() + if (likely(!read_seqretry(&mount_lock, seq))) + return 0; + if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { +@@ -1664,6 +1664,7 @@ static int do_umount(struct mount *mnt, int flags) + umount_tree(mnt, UMOUNT_PROPAGATE); + retval = 0; + } else { ++ smp_mb(); // paired with __legitimize_mnt() + shrink_submounts(mnt); + retval = -EBUSY; + if (!propagate_mount_busy(mnt, 2)) { +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c +index 9b645137fb00b6..e707a679f44c6e 100644 +--- a/fs/ocfs2/journal.c ++++ b/fs/ocfs2/journal.c +@@ -173,7 +173,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) + struct ocfs2_recovery_map *rm; + + mutex_init(&osb->recovery_lock); +- osb->disable_recovery = 0; ++ osb->recovery_state = OCFS2_REC_ENABLED; + osb->recovery_thread_task = NULL; + init_waitqueue_head(&osb->recovery_event); + +@@ -192,31 +192,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) + return 0; + } + +-/* we can't grab the goofy sem lock from inside wait_event, so we use +- * memory barriers to make sure that we'll see the null task before +- * being woken up */ + static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) + { +- mb(); + return osb->recovery_thread_task != NULL; + } + +-void ocfs2_recovery_exit(struct ocfs2_super *osb) ++static void ocfs2_recovery_disable(struct ocfs2_super *osb, ++ enum ocfs2_recovery_state state) + { +- struct ocfs2_recovery_map *rm; +- +- /* disable any new recovery threads and wait for any currently +- * running ones to exit. Do this before setting the vol_state. */ + mutex_lock(&osb->recovery_lock); +- osb->disable_recovery = 1; ++ /* ++ * If recovery thread is not running, we can directly transition to ++ * final state. ++ */ ++ if (!ocfs2_recovery_thread_running(osb)) { ++ osb->recovery_state = state + 1; ++ goto out_lock; ++ } ++ osb->recovery_state = state; ++ /* Wait for recovery thread to acknowledge state transition */ ++ wait_event_cmd(osb->recovery_event, ++ !ocfs2_recovery_thread_running(osb) || ++ osb->recovery_state >= state + 1, ++ mutex_unlock(&osb->recovery_lock), ++ mutex_lock(&osb->recovery_lock)); ++out_lock: + mutex_unlock(&osb->recovery_lock); +- wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); + +- /* At this point, we know that no more recovery threads can be +- * launched, so wait for any recovery completion work to +- * complete. */ ++ /* ++ * At this point we know that no more recovery work can be queued so ++ * wait for any recovery completion work to complete. ++ */ + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); ++} ++ ++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb) ++{ ++ ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE); ++} ++ ++void ocfs2_recovery_exit(struct ocfs2_super *osb) ++{ ++ struct ocfs2_recovery_map *rm; ++ ++ /* disable any new recovery threads and wait for any currently ++ * running ones to exit. Do this before setting the vol_state. */ ++ ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE); + + /* + * Now that recovery is shut down, and the osb is about to be +@@ -1410,6 +1432,18 @@ static int __ocfs2_recovery_thread(void *arg) + } + } + restart: ++ if (quota_enabled) { ++ mutex_lock(&osb->recovery_lock); ++ /* Confirm that recovery thread will no longer recover quotas */ ++ if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) { ++ osb->recovery_state = OCFS2_REC_QUOTA_DISABLED; ++ wake_up(&osb->recovery_event); ++ } ++ if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED) ++ quota_enabled = 0; ++ mutex_unlock(&osb->recovery_lock); ++ } ++ + status = ocfs2_super_lock(osb, 1); + if (status < 0) { + mlog_errno(status); +@@ -1507,13 +1541,13 @@ static int __ocfs2_recovery_thread(void *arg) + + ocfs2_free_replay_slots(osb); + osb->recovery_thread_task = NULL; +- mb(); /* sync with ocfs2_recovery_thread_running */ ++ if (osb->recovery_state == OCFS2_REC_WANT_DISABLE) ++ osb->recovery_state = OCFS2_REC_DISABLED; + wake_up(&osb->recovery_event); + + mutex_unlock(&osb->recovery_lock); + +- if (quota_enabled) +- kfree(rm_quota); ++ kfree(rm_quota); + + /* no one is callint kthread_stop() for us so the kthread() api + * requires that we call do_exit(). And it isn't exported, but +@@ -1523,14 +1557,16 @@ static int __ocfs2_recovery_thread(void *arg) + + void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) + { ++ int was_set = -1; ++ + mutex_lock(&osb->recovery_lock); ++ if (osb->recovery_state < OCFS2_REC_WANT_DISABLE) ++ was_set = ocfs2_recovery_map_set(osb, node_num); + + trace_ocfs2_recovery_thread(node_num, osb->node_num, +- osb->disable_recovery, osb->recovery_thread_task, +- osb->disable_recovery ? +- -1 : ocfs2_recovery_map_set(osb, node_num)); ++ osb->recovery_state, osb->recovery_thread_task, was_set); + +- if (osb->disable_recovery) ++ if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE) + goto out; + + if (osb->recovery_thread_task) +diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h +index 405066a8779b28..6036eb150e1aeb 100644 +--- a/fs/ocfs2/journal.h ++++ b/fs/ocfs2/journal.h +@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb); + + int ocfs2_recovery_init(struct ocfs2_super *osb); + void ocfs2_recovery_exit(struct ocfs2_super *osb); ++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb); + + int ocfs2_compute_replay_slots(struct ocfs2_super *osb); + void ocfs2_free_replay_slots(struct ocfs2_super *osb); +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index cf21aecdf54760..adec276bf4c524 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -284,6 +284,21 @@ enum ocfs2_mount_options + #define OCFS2_OSB_ERROR_FS 0x0004 + #define OCFS2_DEFAULT_ATIME_QUANTUM 60 + ++enum ocfs2_recovery_state { ++ OCFS2_REC_ENABLED = 0, ++ OCFS2_REC_QUOTA_WANT_DISABLE, ++ /* ++ * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for ++ * ocfs2_recovery_disable_quota() to work. ++ */ ++ OCFS2_REC_QUOTA_DISABLED, ++ OCFS2_REC_WANT_DISABLE, ++ /* ++ * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work ++ */ ++ OCFS2_REC_DISABLED, ++}; ++ + struct ocfs2_journal; + struct ocfs2_slot_info; + struct ocfs2_recovery_map; +@@ -346,7 +361,7 @@ struct ocfs2_super + struct ocfs2_recovery_map *recovery_map; + struct ocfs2_replay_map *replay_map; + struct task_struct *recovery_thread_task; +- int disable_recovery; ++ enum ocfs2_recovery_state recovery_state; + wait_queue_head_t checkpoint_event; + struct ocfs2_journal *journal; + unsigned long osb_commit_interval; +diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c +index 77d5aa90338f1e..1baa68c01c6715 100644 +--- a/fs/ocfs2/quota_local.c ++++ b/fs/ocfs2/quota_local.c +@@ -453,8 +453,7 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( + + /* Sync changes in local quota file into global quota file and + * reinitialize local quota file. +- * The function expects local quota file to be already locked and +- * s_umount locked in shared mode. */ ++ * The function expects local quota file to be already locked. */ + static int ocfs2_recover_local_quota_file(struct inode *lqinode, + int type, + struct ocfs2_quota_recovery *rec) +@@ -585,7 +584,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + { + unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, + LOCAL_GROUP_QUOTA_SYSTEM_INODE }; +- struct super_block *sb = osb->sb; + struct ocfs2_local_disk_dqinfo *ldinfo; + struct buffer_head *bh; + handle_t *handle; +@@ -597,7 +595,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " + "slot %u\n", osb->dev_str, slot_num); + +- down_read(&sb->s_umount); + for (type = 0; type < OCFS2_MAXQUOTAS; type++) { + if (list_empty(&(rec->r_list[type]))) + continue; +@@ -674,7 +671,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, + break; + } + out: +- up_read(&sb->s_umount); + kfree(rec); + return status; + } +@@ -840,8 +836,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) + ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); + + /* +- * s_umount held in exclusive mode protects us against racing with +- * recovery thread... ++ * ocfs2_dismount_volume() has already aborted quota recovery... + */ + if (oinfo->dqi_rec) { + ocfs2_free_quota_recovery(oinfo->dqi_rec); +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 25f745f4b63a15..bb174009206e71 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -1872,6 +1872,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) + /* Orphan scan should be stopped as early as possible */ + ocfs2_orphan_scan_stop(osb); + ++ /* Stop quota recovery so that we can disable quotas */ ++ ocfs2_recovery_disable_quota(osb); ++ + ocfs2_disable_quotas(osb); + + /* All dquots should be freed by now */ +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index d908af5917339d..978769e545b5ff 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -979,6 +979,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) + #define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ + kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) + ++#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) ++#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr) ++ + #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME + #define kvfree_rcu_arg_2(ptr, rhf) \ + do { \ +diff --git a/include/linux/types.h b/include/linux/types.h +index ac825ad90e44ab..be939d088638bc 100644 +--- a/include/linux/types.h ++++ b/include/linux/types.h +@@ -109,8 +109,9 @@ typedef u64 u_int64_t; + typedef s64 int64_t; + #endif + +-/* this is a special 64bit data type that is 8-byte aligned */ ++/* These are the special 64-bit data types that are 8-byte aligned */ + #define aligned_u64 __aligned_u64 ++#define aligned_s64 __aligned_s64 + #define aligned_be64 __aligned_be64 + #define aligned_le64 __aligned_le64 + +diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h +index f6d2f83cbe297f..aa96c4589b71f1 100644 +--- a/include/uapi/linux/types.h ++++ b/include/uapi/linux/types.h +@@ -46,6 +46,7 @@ typedef __u32 __bitwise __wsum; + * No conversions are necessary between 32-bit user-space and a 64-bit kernel. + */ + #define __aligned_u64 __u64 __attribute__((aligned(8))) ++#define __aligned_s64 __s64 __attribute__((aligned(8))) + #define __aligned_be64 __be64 __attribute__((aligned(8))) + #define __aligned_le64 __le64 __attribute__((aligned(8))) + +diff --git a/kernel/params.c b/kernel/params.c +index 8299bd764e42e7..1b856942d82d40 100644 +--- a/kernel/params.c ++++ b/kernel/params.c +@@ -945,7 +945,9 @@ int module_sysfs_initialized; + static void module_kobj_release(struct kobject *kobj) + { + struct module_kobject *mk = to_module_kobject(kobj); +- complete(mk->kobj_completion); ++ ++ if (mk->kobj_completion) ++ complete(mk->kobj_completion); + } + + struct kobj_type module_ktype = { +diff --git a/net/can/gw.c b/net/can/gw.c +index d8861e862f157a..c48e8cf5e65062 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -130,7 +130,7 @@ struct cgw_job { + u32 handled_frames; + u32 dropped_frames; + u32 deleted_frames; +- struct cf_mod mod; ++ struct cf_mod __rcu *cf_mod; + union { + /* CAN frame data source */ + struct net_device *dev; +@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + struct cgw_job *gwj = (struct cgw_job *)data; + struct canfd_frame *cf; + struct sk_buff *nskb; ++ struct cf_mod *mod; + int modidx = 0; + + /* process strictly Classic CAN or CAN FD frames */ +@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + * When there is at least one modification function activated, + * we need to copy the skb as we want to modify skb->data. + */ +- if (gwj->mod.modfunc[0]) ++ mod = rcu_dereference(gwj->cf_mod); ++ if (mod->modfunc[0]) + nskb = skb_copy(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); +@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + cf = (struct canfd_frame *)nskb->data; + + /* perform preprocessed modification functions if there are any */ +- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) +- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); ++ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) ++ (*mod->modfunc[modidx++])(cf, mod); + + /* Has the CAN frame been modified? */ + if (modidx) { +@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + } + + /* check for checksum updates */ +- if (gwj->mod.csumfunc.crc8) +- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); ++ if (mod->csumfunc.crc8) ++ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); + +- if (gwj->mod.csumfunc.xor) +- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); ++ if (mod->csumfunc.xor) ++ (*mod->csumfunc.xor)(cf, &mod->csum.xor); + } + + /* clear the skb timestamp if not configured the other way */ +@@ -577,6 +579,24 @@ static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj) + gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj); + } + ++static void cgw_job_free_rcu(struct rcu_head *rcu_head) ++{ ++ struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); ++ ++ /* cgw_job::cf_mod is always accessed from the same cgw_job object within ++ * the same RCU read section. Once cgw_job is scheduled for removal, ++ * cf_mod can also be removed without mandating an additional grace period. ++ */ ++ kfree(rcu_access_pointer(gwj->cf_mod)); ++ kmem_cache_free(cgw_cache, gwj); ++} ++ ++/* Return cgw_job::cf_mod with RTNL protected section */ ++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) ++{ ++ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); ++} ++ + static int cgw_notifier(struct notifier_block *nb, + unsigned long msg, void *ptr) + { +@@ -596,8 +616,7 @@ static int cgw_notifier(struct notifier_block *nb, + if (gwj->src.dev == dev || gwj->dst.dev == dev) { + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + } + } + } +@@ -610,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + { + struct rtcanmsg *rtcan; + struct nlmsghdr *nlh; ++ struct cf_mod *mod; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); + if (!nlh) +@@ -644,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + goto cancel; + } + ++ mod = cgw_job_cf_mod(gwj); + if (gwj->flags & CGW_FLAGS_CAN_FD) { + struct cgw_fdframe_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } else { + struct cgw_frame_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } + +- if (gwj->mod.uid) { +- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) ++ if (mod->uid) { ++ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.crc8) { ++ if (mod->csumfunc.crc8) { + if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, +- &gwj->mod.csum.crc8) < 0) ++ &mod->csum.crc8) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.xor) { ++ if (mod->csumfunc.xor) { + if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, +- &gwj->mod.csum.xor) < 0) ++ &mod->csum.xor) < 0) + goto cancel; + } + +@@ -1053,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + struct net *net = sock_net(skb->sk); + struct rtcanmsg *r; + struct cgw_job *gwj; +- struct cf_mod mod; ++ struct cf_mod *mod; + struct can_can_gw ccgw; + u8 limhops = 0; + int err = 0; +@@ -1072,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (r->gwtype != CGW_TYPE_CAN_CAN) + return -EINVAL; + +- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); ++ mod = kmalloc(sizeof(*mod), GFP_KERNEL); ++ if (!mod) ++ return -ENOMEM; ++ ++ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + if (err < 0) +- return err; ++ goto out_free_cf; + +- if (mod.uid) { ++ if (mod->uid) { + ASSERT_RTNL(); + + /* check for updating an existing job with identical uid */ + hlist_for_each_entry(gwj, &net->can.cgw_list, list) { +- if (gwj->mod.uid != mod.uid) ++ struct cf_mod *old_cf; ++ ++ old_cf = cgw_job_cf_mod(gwj); ++ if (old_cf->uid != mod->uid) + continue; + + /* interfaces & filters must be identical */ +- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) +- return -EINVAL; ++ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { ++ err = -EINVAL; ++ goto out_free_cf; ++ } + +- /* update modifications with disabled softirq & quit */ +- local_bh_disable(); +- memcpy(&gwj->mod, &mod, sizeof(mod)); +- local_bh_enable(); ++ rcu_assign_pointer(gwj->cf_mod, mod); ++ kfree_rcu_mightsleep(old_cf); + return 0; + } + } + + /* ifindex == 0 is not allowed for job creation */ +- if (!ccgw.src_idx || !ccgw.dst_idx) +- return -ENODEV; ++ if (!ccgw.src_idx || !ccgw.dst_idx) { ++ err = -ENODEV; ++ goto out_free_cf; ++ } + + gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); +- if (!gwj) +- return -ENOMEM; ++ if (!gwj) { ++ err = -ENOMEM; ++ goto out_free_cf; ++ } + + gwj->handled_frames = 0; + gwj->dropped_frames = 0; +@@ -1112,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + gwj->limit_hops = limhops; + + /* insert already parsed information */ +- memcpy(&gwj->mod, &mod, sizeof(mod)); ++ RCU_INIT_POINTER(gwj->cf_mod, mod); + memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); + + err = -ENODEV; +@@ -1139,9 +1171,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (!err) + hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); + out: +- if (err) ++ if (err) { + kmem_cache_free(cgw_cache, gwj); +- ++out_free_cf: ++ kfree(mod); ++ } + return err; + } + +@@ -1155,8 +1189,7 @@ static void cgw_remove_all_jobs(struct net *net) + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + } + } + +@@ -1202,19 +1235,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + + /* remove only the first matching entry */ + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { ++ struct cf_mod *cf_mod; ++ + if (gwj->flags != r->flags) + continue; + + if (gwj->limit_hops != limhops) + continue; + ++ cf_mod = cgw_job_cf_mod(gwj); + /* we have a match when uid is enabled and identical */ +- if (gwj->mod.uid || mod.uid) { +- if (gwj->mod.uid != mod.uid) ++ if (cf_mod->uid || mod.uid) { ++ if (cf_mod->uid != mod.uid) + continue; + } else { + /* no uid => check for identical modifications */ +- if (memcmp(&gwj->mod, &mod, sizeof(mod))) ++ if (memcmp(cf_mod, &mod, sizeof(mod))) + continue; + } + +@@ -1224,8 +1260,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + err = 0; + break; + } +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 07b3487e3ae97a..47c4a3e72bcd92 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -3145,16 +3145,13 @@ static void add_v4_addrs(struct inet6_dev *idev) + struct in6_addr addr; + struct net_device *dev; + struct net *net = dev_net(idev->dev); +- int scope, plen, offset = 0; ++ int scope, plen; + u32 pflags = 0; + + ASSERT_RTNL(); + + memset(&addr, 0, sizeof(struct in6_addr)); +- /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ +- if (idev->dev->addr_len == sizeof(struct in6_addr)) +- offset = sizeof(struct in6_addr) - 4; +- memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); ++ memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); + + if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { + scope = IPV6_ADDR_COMPATv4; +@@ -3462,7 +3459,13 @@ static void addrconf_gre_config(struct net_device *dev) + return; + } + +- if (dev->type == ARPHRD_ETHER) { ++ /* Generate the IPv6 link-local address using addrconf_addr_gen(), ++ * unless we have an IPv4 GRE device not bound to an IP address and ++ * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this ++ * case). Such devices fall back to add_v4_addrs() instead. ++ */ ++ if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 && ++ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) { + addrconf_addr_gen(idev, true); + return; + } +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index ef04e556aadb41..0bd6bf46f05f3e 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -63,7 +63,7 @@ struct hbucket { + #define ahash_sizeof_regions(htable_bits) \ + (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) + #define ahash_region(n, htable_bits) \ +- ((n) % ahash_numof_locks(htable_bits)) ++ ((n) / jhash_size(HTABLE_REGION_BITS)) + #define ahash_bucket_start(h, htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? 0 \ + : (h) * jhash_size(HTABLE_REGION_BITS)) +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index fd484b38133ed6..0de165ed04ebae 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -954,8 +954,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, + upcall.cmd = OVS_PACKET_CMD_ACTION; + upcall.mru = OVS_CB(skb)->mru; + +- for (a = nla_data(attr), rem = nla_len(attr); rem > 0; +- a = nla_next(a, &rem)) { ++ nla_for_each_nested(a, attr, rem) { + switch (nla_type(a)) { + case OVS_USERSPACE_ATTR_USERDATA: + upcall.userdata = a;
