commit: 83cca7dd300b947197be6e9985981a75365e37c3 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Thu Jan 23 17:03:02 2025 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Thu Jan 23 17:03:02 2025 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=83cca7dd
Linux patch 6.6.74 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1073_linux-6.6.74.patch | 2591 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2595 insertions(+) diff --git a/0000_README b/0000_README index f5a8b027..cb5fec62 100644 --- a/0000_README +++ b/0000_README @@ -335,6 +335,10 @@ Patch: 1072_linux-6.6.73.patch From: https://www.kernel.org Desc: Linux 6.6.73 +Patch: 1073_linux-6.6.74.patch +From: https://www.kernel.org +Desc: Linux 6.6.74 + Patch: 1510_fs-enable-link-security-restrictions-by-default.patch From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch Desc: Enable link security restrictions by default. diff --git a/1073_linux-6.6.74.patch b/1073_linux-6.6.74.patch new file mode 100644 index 00000000..cc7a6c85 --- /dev/null +++ b/1073_linux-6.6.74.patch @@ -0,0 +1,2591 @@ +diff --git a/Makefile b/Makefile +index 2ba627f545901e..b8e5c65910862e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 73 ++SUBLEVEL = 74 + EXTRAVERSION = + NAME = Pinguïn Aangedreven + +diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h +index 48f8dd47cf6882..1c5513b04f0387 100644 +--- a/arch/x86/include/asm/special_insns.h ++++ b/arch/x86/include/asm/special_insns.h +@@ -217,7 +217,7 @@ static inline int write_user_shstk_64(u64 __user *addr, u64 val) + + #define nop() asm volatile ("nop") + +-static inline void serialize(void) ++static __always_inline void serialize(void) + { + /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ + asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); +diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S +index 901b605166834b..6231f6efb4ee13 100644 +--- a/arch/x86/xen/xen-asm.S ++++ b/arch/x86/xen/xen-asm.S +@@ -221,7 +221,7 @@ SYM_CODE_END(xen_early_idt_handler_array) + push %rax + mov $__HYPERVISOR_iret, %eax + syscall /* Do the IRET. */ +-#ifdef CONFIG_MITIGATION_SLS ++#ifdef CONFIG_SLS + int3 + #endif + .endm +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c +index 63e4812623361d..4990a19e601334 100644 +--- a/block/blk-sysfs.c ++++ b/block/blk-sysfs.c +@@ -842,10 +842,8 @@ int blk_register_queue(struct gendisk *disk) + * faster to shut down and is made fully functional here as + * request_queues for non-existent devices never get registered. + */ +- if (!blk_queue_init_done(q)) { +- blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); +- percpu_ref_switch_to_percpu(&q->q_usage_counter); +- } ++ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); ++ percpu_ref_switch_to_percpu(&q->q_usage_counter); + + return ret; + +diff --git a/block/genhd.c b/block/genhd.c +index 203c880c3e1cd2..6d704c37f26e71 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -710,13 +710,10 @@ void del_gendisk(struct gendisk *disk) + * If the disk does not own the queue, allow using passthrough requests + * again. Else leave the queue frozen to fail all I/O. + */ +- if (!test_bit(GD_OWNS_QUEUE, &disk->state)) { +- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); ++ if (!test_bit(GD_OWNS_QUEUE, &disk->state)) + __blk_mq_unfreeze_queue(q, true); +- } else { +- if (queue_is_mq(q)) +- blk_mq_exit_queue(q); +- } ++ else if (queue_is_mq(q)) ++ blk_mq_exit_queue(q); + } + EXPORT_SYMBOL(del_gendisk); + +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index c82b255f82bc41..64d83ff3c0d90c 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -680,11 +680,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, + for (i = 0; i < ARRAY_SIZE(override_table); i++) { + const struct irq_override_cmp *entry = &override_table[i]; + +- if (dmi_check_system(entry->system) && +- entry->irq == gsi && ++ if (entry->irq == gsi && + entry->triggering == triggering && + entry->polarity == polarity && +- entry->shareable == shareable) ++ entry->shareable == shareable && ++ dmi_check_system(entry->system)) + return entry->override; + } + +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index 1e257ecd624db1..b73038ad86f7f3 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -1262,6 +1262,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) + zram->mem_pool = zs_create_pool(zram->disk->disk_name); + if (!zram->mem_pool) { + vfree(zram->table); ++ zram->table = NULL; + return false; + } + +diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c +index a16945e8319e3f..956ea29578336c 100644 +--- a/drivers/gpio/gpio-xilinx.c ++++ b/drivers/gpio/gpio-xilinx.c +@@ -66,7 +66,7 @@ struct xgpio_instance { + DECLARE_BITMAP(state, 64); + DECLARE_BITMAP(last_irq_read, 64); + DECLARE_BITMAP(dir, 64); +- spinlock_t gpio_lock; /* For serializing operations */ ++ raw_spinlock_t gpio_lock; /* For serializing operations */ + int irq; + DECLARE_BITMAP(enable, 64); + DECLARE_BITMAP(rising_edge, 64); +@@ -180,14 +180,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) + struct xgpio_instance *chip = gpiochip_get_data(gc); + int bit = xgpio_to_bit(chip, gpio); + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + /* Write to GPIO signal and set its direction to output */ + __assign_bit(bit, chip->state, val); + + xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state); + +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + } + + /** +@@ -211,7 +211,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64); + bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64); + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + bitmap_replace(state, chip->state, hw_bits, hw_mask, 64); + +@@ -219,7 +219,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, + + bitmap_copy(chip->state, state, 64); + +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + } + + /** +@@ -237,13 +237,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) + struct xgpio_instance *chip = gpiochip_get_data(gc); + int bit = xgpio_to_bit(chip, gpio); + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + /* Set the GPIO bit in shadow register and set direction as input */ + __set_bit(bit, chip->dir); + xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); + +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + return 0; + } +@@ -266,7 +266,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) + struct xgpio_instance *chip = gpiochip_get_data(gc); + int bit = xgpio_to_bit(chip, gpio); + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + /* Write state of GPIO signal */ + __assign_bit(bit, chip->state, val); +@@ -276,7 +276,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) + __clear_bit(bit, chip->dir); + xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); + +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + return 0; + } +@@ -404,7 +404,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data) + int bit = xgpio_to_bit(chip, irq_offset); + u32 mask = BIT(bit / 32), temp; + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + __clear_bit(bit, chip->enable); + +@@ -414,7 +414,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data) + temp &= ~mask; + xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp); + } +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + gpiochip_disable_irq(&chip->gc, irq_offset); + } +@@ -434,7 +434,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data) + + gpiochip_enable_irq(&chip->gc, irq_offset); + +- spin_lock_irqsave(&chip->gpio_lock, flags); ++ raw_spin_lock_irqsave(&chip->gpio_lock, flags); + + __set_bit(bit, chip->enable); + +@@ -453,7 +453,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data) + xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val); + } + +- spin_unlock_irqrestore(&chip->gpio_lock, flags); ++ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + } + + /** +@@ -518,7 +518,7 @@ static void xgpio_irqhandler(struct irq_desc *desc) + + chained_irq_enter(irqchip, desc); + +- spin_lock(&chip->gpio_lock); ++ raw_spin_lock(&chip->gpio_lock); + + xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all); + +@@ -535,7 +535,7 @@ static void xgpio_irqhandler(struct irq_desc *desc) + bitmap_copy(chip->last_irq_read, all, 64); + bitmap_or(all, rising, falling, 64); + +- spin_unlock(&chip->gpio_lock); ++ raw_spin_unlock(&chip->gpio_lock); + + dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling); + +@@ -626,7 +626,7 @@ static int xgpio_probe(struct platform_device *pdev) + bitmap_set(chip->hw_map, 0, width[0]); + bitmap_set(chip->hw_map, 32, width[1]); + +- spin_lock_init(&chip->gpio_lock); ++ raw_spin_lock_init(&chip->gpio_lock); + + chip->gc.base = -1; + chip->gc.ngpio = bitmap_weight(chip->hw_map, 64); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index f4c1cc6df1c830..2e739b80cfccf1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3172,7 +3172,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) + * + * @adev: amdgpu_device pointer + * +- * Second resume function for hardware IPs. The list of all the hardware ++ * First resume function for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the resume callbacks are run for + * all blocks except COMMON, GMC, and IH. resume puts the hardware into a + * functional state after a suspend and updates the software state as +@@ -3190,7 +3190,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || +- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) + continue; + r = adev->ip_blocks[i].version->funcs->resume(adev); +@@ -3205,36 +3204,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) + return 0; + } + +-/** +- * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs +- * +- * @adev: amdgpu_device pointer +- * +- * Third resume function for hardware IPs. The list of all the hardware +- * IPs that make up the asic is walked and the resume callbacks are run for +- * all DCE. resume puts the hardware into a functional state after a suspend +- * and updates the software state as necessary. This function is also used +- * for restoring the GPU after a GPU reset. +- * +- * Returns 0 on success, negative error code on failure. +- */ +-static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev) +-{ +- int i, r; +- +- for (i = 0; i < adev->num_ip_blocks; i++) { +- if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) +- continue; +- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { +- r = adev->ip_blocks[i].version->funcs->resume(adev); +- if (r) +- return r; +- } +- } +- +- return 0; +-} +- + /** + * amdgpu_device_ip_resume - run resume for hardware IPs + * +@@ -3261,13 +3230,6 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) + + r = amdgpu_device_ip_resume_phase2(adev); + +- if (r) +- return r; +- +- amdgpu_fence_driver_hw_init(adev); +- +- r = amdgpu_device_ip_resume_phase3(adev); +- + return r; + } + +@@ -4267,6 +4229,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) + dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); + goto exit; + } ++ amdgpu_fence_driver_hw_init(adev); + + r = amdgpu_device_ip_late_init(adev); + if (r) +@@ -5036,10 +4999,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, + if (r) + goto out; + +- r = amdgpu_device_ip_resume_phase3(tmp_adev); +- if (r) +- goto out; +- + if (vram_lost) + amdgpu_device_fill_reset_magic(tmp_adev); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +index 6aa3b1d845abe1..806ec5d021995c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +@@ -193,8 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, + need_ctx_switch = ring->current_ctx != fence_ctx; + if (ring->funcs->emit_pipeline_sync && job && + ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || +- (amdgpu_sriov_vf(adev) && need_ctx_switch) || +- amdgpu_vm_need_pipeline_sync(ring, job))) { ++ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) { ++ + need_pipe_sync = true; + + if (tmp) +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +index d1a25fe6c44faa..8dffa5b6426e1c 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +@@ -1315,7 +1315,7 @@ static struct link_encoder *dcn21_link_encoder_create( + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + +- if (!enc21) ++ if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + return NULL; + + link_regs_id = +diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c +index 689b7c16d30072..22ae38bacb44a5 100644 +--- a/drivers/gpu/drm/i915/display/intel_fb.c ++++ b/drivers/gpu/drm/i915/display/intel_fb.c +@@ -1625,7 +1625,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * + * arithmetic related to alignment and offset calculation. + */ + if (is_gen12_ccs_cc_plane(&fb->base, i)) { +- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE)) ++ if (IS_ALIGNED(fb->base.offsets[i], 64)) + continue; + else + return -EINVAL; +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c +index 93f08f9479d89b..03eacb22648ef7 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c +@@ -386,11 +386,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + if (f) { + struct nouveau_channel *prev; + bool must_wait = true; ++ bool local; + + rcu_read_lock(); + prev = rcu_dereference(f->channel); +- if (prev && (prev == chan || +- fctx->sync(f, prev, chan) == 0)) ++ local = prev && prev->cli->drm == chan->cli->drm; ++ if (local && (prev == chan || ++ fctx->sync(f, prev, chan) == 0)) + must_wait = false; + rcu_read_unlock(); + if (!must_wait) +diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c +index e714d5318f3095..76806039691a2c 100644 +--- a/drivers/gpu/drm/v3d/v3d_irq.c ++++ b/drivers/gpu/drm/v3d/v3d_irq.c +@@ -103,6 +103,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->bin_job = NULL; + status = IRQ_HANDLED; + } + +@@ -112,6 +113,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->render_job = NULL; + status = IRQ_HANDLED; + } + +@@ -121,6 +123,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_csd_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->csd_job = NULL; + status = IRQ_HANDLED; + } + +@@ -157,6 +160,7 @@ v3d_hub_irq(int irq, void *arg) + + trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->tfu_job = NULL; + status = IRQ_HANDLED; + } + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +index fdc34283eeb97f..ec6ca264ce11ff 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +@@ -412,7 +412,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv, + + if (params->pin) + ttm_bo_pin(&vmw_bo->tbo); +- ttm_bo_unreserve(&vmw_bo->tbo); ++ if (!params->keep_resv) ++ ttm_bo_unreserve(&vmw_bo->tbo); + + return 0; + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +index 156ea612fc2a48..a3ac61b991bf66 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +@@ -53,8 +53,9 @@ struct vmw_bo_params { + u32 domain; + u32 busy_domain; + enum ttm_bo_type bo_type; +- size_t size; + bool pin; ++ bool keep_resv; ++ size_t size; + struct dma_resv *resv; + struct sg_table *sg; + }; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +index bea576434e475c..4655c266924fed 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +@@ -399,7 +399,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_kernel, + .size = PAGE_SIZE, +- .pin = true ++ .pin = true, ++ .keep_resv = true, + }; + + /* +@@ -411,10 +412,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) + if (unlikely(ret != 0)) + return ret; + +- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); +- BUG_ON(ret != 0); +- vmw_bo_pin_reserved(vbo, true); +- + ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); + if (likely(ret == 0)) { + result = ttm_kmap_obj_virtual(&map, &dummy); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +index d6bcaf078b1f40..0dc3dacc5beee8 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +@@ -163,6 +163,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev, + .bo_type = ttm_bo_type_sg, + .size = attach->dmabuf->size, + .pin = false, ++ .keep_resv = true, + .resv = attach->dmabuf->resv, + .sg = table, + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +index a01ca3226d0af8..7fb1c88bcc475f 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_device, + .size = size, +- .pin = true ++ .pin = true, ++ .keep_resv = true, + }; + + if (!vmw_shader_id_ok(user_key, shader_type)) +@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, + if (unlikely(ret != 0)) + goto out; + +- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL); +- if (unlikely(ret != 0)) +- goto no_reserve; +- + /* Map and copy shader bytecode. */ + ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map); + if (unlikely(ret != 0)) { +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +index fcb87d83760ef6..75cf9e76df2ed4 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +@@ -604,15 +604,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, + .busy_domain = domain, + .bo_type = ttm_bo_type_kernel, + .size = bo_size, +- .pin = true ++ .pin = true, ++ .keep_resv = true, + }; + + ret = vmw_bo_create(dev_priv, &bo_params, &vbo); + if (unlikely(ret != 0)) + return ret; + +- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); +- BUG_ON(ret != 0); + ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); + if (likely(ret == 0)) { + struct vmw_ttm_tt *vmw_tt = +diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c +index 070f93226ed696..62d31aadda4bb8 100644 +--- a/drivers/hwmon/tmp513.c ++++ b/drivers/hwmon/tmp513.c +@@ -203,7 +203,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos, + *val = sign_extend32(regval, + reg == TMP51X_SHUNT_CURRENT_RESULT ? + 16 - tmp51x_get_pga_shift(data) : 15); +- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms); ++ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms); ++ + break; + case TMP51X_BUS_VOLTAGE_RESULT: + case TMP51X_BUS_VOLTAGE_H_LIMIT: +@@ -219,7 +220,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos, + case TMP51X_BUS_CURRENT_RESULT: + // Current = (ShuntVoltage * CalibrationRegister) / 4096 + *val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua; +- *val = DIV_ROUND_CLOSEST(*val, MILLI); ++ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI); + break; + case TMP51X_LOCAL_TEMP_RESULT: + case TMP51X_REMOTE_TEMP_RESULT_1: +@@ -259,7 +260,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val) + * The user enter current value and we convert it to + * voltage. 1lsb = 10uV + */ +- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI); ++ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI); + max_val = U16_MAX >> tmp51x_get_pga_shift(data); + regval = clamp_val(val, -max_val, max_val); + break; +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 84fdd3f5cc8445..610df67cedaadc 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -110,6 +110,8 @@ + #define ID_P_PM_BLOCKED BIT(31) + #define ID_P_MASK GENMASK(31, 28) + ++#define ID_SLAVE_NACK BIT(0) ++ + enum rcar_i2c_type { + I2C_RCAR_GEN1, + I2C_RCAR_GEN2, +@@ -143,6 +145,7 @@ struct rcar_i2c_priv { + int irq; + + struct i2c_client *host_notify_client; ++ u8 slave_flags; + }; + + #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) +@@ -597,6 +600,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + { + u32 ssr_raw, ssr_filtered; + u8 value; ++ int ret; + + ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff; + ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER); +@@ -612,7 +616,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + rcar_i2c_write(priv, ICRXTX, value); + rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR); + } else { +- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); ++ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); ++ if (ret) ++ priv->slave_flags |= ID_SLAVE_NACK; ++ + rcar_i2c_read(priv, ICRXTX); /* dummy read */ + rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); + } +@@ -625,18 +632,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + if (ssr_filtered & SSR) { + i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ ++ priv->slave_flags &= ~ID_SLAVE_NACK; + rcar_i2c_write(priv, ICSIER, SAR); + rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); + } + + /* master wants to write to us */ + if (ssr_filtered & SDR) { +- int ret; +- + value = rcar_i2c_read(priv, ICRXTX); + ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value); +- /* Send NACK in case of error */ +- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0)); ++ if (ret) ++ priv->slave_flags |= ID_SLAVE_NACK; ++ ++ /* Send NACK in case of error, but it will come 1 byte late :( */ ++ rcar_i2c_write(priv, ICSCR, SIE | SDBS | ++ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0)); + rcar_i2c_write(priv, ICSSR, ~SDR & 0xff); + } + +diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c +index 8ca1daadec9373..c03196da116351 100644 +--- a/drivers/i2c/i2c-atr.c ++++ b/drivers/i2c/i2c-atr.c +@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb, + dev_name(dev), ret); + break; + +- case BUS_NOTIFY_DEL_DEVICE: ++ case BUS_NOTIFY_REMOVED_DEVICE: + i2c_atr_detach_client(client->adapter, client); + break; + +diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c +index 9f2e4aa2815933..299abb6dd9423d 100644 +--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c ++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c +@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) + pm_runtime_no_callbacks(&pdev->dev); + + /* switch to first parent as active master */ +- i2c_demux_activate_master(priv, 0); ++ err = i2c_demux_activate_master(priv, 0); ++ if (err) ++ goto err_rollback; + + err = device_create_file(&pdev->dev, &dev_attr_available_masters); + if (err) +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +index 0e290c807b0f91..94c0eb0bf8748a 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h +@@ -362,6 +362,7 @@ struct inv_icm42600_state { + typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *); + + extern const struct regmap_config inv_icm42600_regmap_config; ++extern const struct regmap_config inv_icm42600_spi_regmap_config; + extern const struct dev_pm_ops inv_icm42600_pm_ops; + + const struct iio_mount_matrix * +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +index d938bc45439729..da65aa4e27242f 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +@@ -44,6 +44,17 @@ const struct regmap_config inv_icm42600_regmap_config = { + }; + EXPORT_SYMBOL_NS_GPL(inv_icm42600_regmap_config, IIO_ICM42600); + ++/* define specific regmap for SPI not supporting burst write */ ++const struct regmap_config inv_icm42600_spi_regmap_config = { ++ .reg_bits = 8, ++ .val_bits = 8, ++ .max_register = 0x4FFF, ++ .ranges = inv_icm42600_regmap_ranges, ++ .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges), ++ .use_single_write = true, ++}; ++EXPORT_SYMBOL_NS_GPL(inv_icm42600_spi_regmap_config, IIO_ICM42600); ++ + struct inv_icm42600_hw { + uint8_t whoami; + const char *name; +diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c +index 6be4ac79493794..abfa1b73cf4d35 100644 +--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c ++++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c +@@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi) + return -EINVAL; + chip = (uintptr_t)match; + +- regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config); ++ /* use SPI specific regmap */ ++ regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index 13c65ec5825687..08da793969ee55 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -2220,6 +2220,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, + qp_attr->retry_cnt = qplib_qp->retry_cnt; + qp_attr->rnr_retry = qplib_qp->rnr_retry; + qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; ++ qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id); + qp_attr->rq_psn = qplib_qp->rq.psn; + qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; + qp_attr->sq_psn = qplib_qp->sq.psn; +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h +index 98baea98fc1761..ef910e6e2ccb73 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h +@@ -245,6 +245,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); + int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry); + ++static inline u32 __to_ib_port_num(u16 port_id) ++{ ++ return (u32)port_id + 1; ++} + + unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); + void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +index 871a49315c880f..c4f10498c79d87 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +@@ -1460,6 +1460,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) + qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); + memcpy(qp->smac, sb->src_mac, 6); + qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); ++ qp->port_id = le16_to_cpu(sb->port_id); + bail: + dma_free_coherent(&rcfw->pdev->dev, sbuf.size, + sbuf.sb, sbuf.dma_addr); +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h +index b5c53e864fbb39..55fd840359ef23 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h +@@ -297,6 +297,7 @@ struct bnxt_qplib_qp { + u32 dest_qpn; + u8 smac[6]; + u16 vlan_id; ++ u16 port_id; + u8 nw_type; + struct bnxt_qplib_ah ah; + +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index b1e60c13c1e1e7..a1934fe4ad5ab0 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -1970,7 +1970,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) + if (!is_v4(its_dev->its)) + return -EINVAL; + +- guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); ++ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock); + + /* Unmap request? */ + if (!info) +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index e7f000f90bb467..6c7943c516eb09 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -1460,7 +1460,7 @@ static int gic_retrigger(struct irq_data *data) + static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) + { +- if (cmd == CPU_PM_EXIT) { ++ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); +diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c +index 1eeb0d0156ce9e..0ee7b6b71f5fa5 100644 +--- a/drivers/irqchip/irqchip.c ++++ b/drivers/irqchip/irqchip.c +@@ -35,11 +35,10 @@ void __init irqchip_init(void) + int platform_irqchip_probe(struct platform_device *pdev) + { + struct device_node *np = pdev->dev.of_node; +- struct device_node *par_np = of_irq_find_parent(np); ++ struct device_node *par_np __free(device_node) = of_irq_find_parent(np); + of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev); + + if (!irq_init_cb) { +- of_node_put(par_np); + return -EINVAL; + } + +@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev) + * interrupt controller can check for specific domains as necessary. + */ + if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) { +- of_node_put(par_np); + return -EPROBE_DEFER; + } + +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c +index 8d75a66775cb1f..1b0c6770c14e46 100644 +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor, + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->dummy.nbytes) +- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto); ++ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->data.nbytes) + op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +index 6a716337f48be1..268399dfcf22f0 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) + + static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) + { +- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int phy_id = phy_data->phydev->phy_id; + +@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) + phy_write(phy_data->phydev, 0x04, 0x0d01); + phy_write(phy_data->phydev, 0x00, 0x9140); + +- linkmode_set_bit_array(phy_10_100_features_array, +- ARRAY_SIZE(phy_10_100_features_array), +- supported); +- linkmode_set_bit_array(phy_gbit_features_array, +- ARRAY_SIZE(phy_gbit_features_array), +- supported); +- +- linkmode_copy(phy_data->phydev->supported, supported); ++ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); + + phy_support_asym_pause(phy_data->phydev); + +@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) + + static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) + { +- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + unsigned int phy_id = phy_data->phydev->phy_id; +@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); + +- linkmode_set_bit_array(phy_10_100_features_array, +- ARRAY_SIZE(phy_10_100_features_array), +- supported); +- linkmode_set_bit_array(phy_gbit_features_array, +- ARRAY_SIZE(phy_gbit_features_array), +- supported); +- linkmode_copy(phy_data->phydev->supported, supported); ++ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); + phy_support_asym_pause(phy_data->phydev); + + netif_dbg(pdata, drv, pdata->netdev, +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index e8d9a0eba4d6b5..8f5cc1f2331884 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -1572,19 +1572,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget) + fec_enet_tx_queue(ndev, i, budget); + } + +-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, ++static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, + struct bufdesc *bdp, int index) + { + struct page *new_page; + dma_addr_t phys_addr; + + new_page = page_pool_dev_alloc_pages(rxq->page_pool); +- WARN_ON(!new_page); +- rxq->rx_skb_info[index].page = new_page; ++ if (unlikely(!new_page)) ++ return -ENOMEM; + ++ rxq->rx_skb_info[index].page = new_page; + rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; + phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; + bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); ++ ++ return 0; + } + + static u32 +@@ -1679,6 +1682,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) + int cpu = smp_processor_id(); + struct xdp_buff xdp; + struct page *page; ++ __fec32 cbd_bufaddr; + u32 sub_len = 4; + + #if !defined(CONFIG_M5272) +@@ -1743,12 +1747,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) + + index = fec_enet_get_bd_index(bdp, &rxq->bd); + page = rxq->rx_skb_info[index].page; ++ cbd_bufaddr = bdp->cbd_bufaddr; ++ if (fec_enet_update_cbd(rxq, bdp, index)) { ++ ndev->stats.rx_dropped++; ++ goto rx_processing_done; ++ } ++ + dma_sync_single_for_cpu(&fep->pdev->dev, +- fec32_to_cpu(bdp->cbd_bufaddr), ++ fec32_to_cpu(cbd_bufaddr), + pkt_len, + DMA_FROM_DEVICE); + prefetch(page_address(page)); +- fec_enet_update_cbd(rxq, bdp, index); + + if (xdp_prog) { + xdp_buff_clear_frags_flag(&xdp); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +index 015faddabc8e09..463c23ae0ad1ec 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +@@ -719,6 +719,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, + /* check esn */ + if (x->props.flags & XFRM_STATE_ESN) + mlx5e_ipsec_update_esn_state(sa_entry); ++ else ++ /* According to RFC4303, section "3.3.3. Sequence Number Generation", ++ * the first packet sent using a given SA will contain a sequence ++ * number of 1. ++ */ ++ sa_entry->esn_state.esn = 1; + + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs); + +@@ -763,9 +769,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, + MLX5_IPSEC_RESCHED); + + if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && +- x->props.mode == XFRM_MODE_TUNNEL) +- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, +- MLX5E_IPSEC_TUNNEL_SA); ++ x->props.mode == XFRM_MODE_TUNNEL) { ++ xa_lock_bh(&ipsec->sadb); ++ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, ++ MLX5E_IPSEC_TUNNEL_SA); ++ xa_unlock_bh(&ipsec->sadb); ++ } + + out: + x->xso.offload_handle = (unsigned long)sa_entry; +@@ -792,7 +801,6 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, + static void mlx5e_xfrm_del_state(struct xfrm_state *x) + { + struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); +- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5e_ipsec_sa_entry *old; + +@@ -801,12 +809,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) + + old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); + WARN_ON(old != sa_entry); +- +- if (attrs->mode == XFRM_MODE_TUNNEL && +- attrs->type == XFRM_DEV_OFFLOAD_PACKET) +- /* Make sure that no ARP requests are running in parallel */ +- flush_workqueue(ipsec->wq); +- + } + + static void mlx5e_xfrm_free_state(struct xfrm_state *x) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +index 61288066830d94..2382c712898574 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +@@ -1442,23 +1442,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) + goto err_alloc; + } + +- if (attrs->family == AF_INET) +- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); +- else +- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); +- + setup_fte_no_frags(spec); + setup_fte_upper_proto_match(spec, &attrs->upspec); + + switch (attrs->type) { + case XFRM_DEV_OFFLOAD_CRYPTO: ++ if (attrs->family == AF_INET) ++ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); ++ else ++ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + setup_fte_spi(spec, attrs->spi, false); + setup_fte_esp(spec); + setup_fte_reg_a(spec); + break; + case XFRM_DEV_OFFLOAD_PACKET: +- if (attrs->reqid) +- setup_fte_reg_c4(spec, attrs->reqid); ++ setup_fte_reg_c4(spec, attrs->reqid); + err = setup_pkt_reformat(ipsec, attrs, &flow_act); + if (err) + goto err_pkt_reformat; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +index de83567aae7913..940e350058d10e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +@@ -90,8 +90,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) + EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); + + static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, +- struct mlx5_accel_esp_xfrm_attrs *attrs) ++ struct mlx5e_ipsec_sa_entry *sa_entry) + { ++ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + void *aso_ctx; + + aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso); +@@ -119,8 +120,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn, + * active. + */ + MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5); +- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) ++ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) { + MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN); ++ if (!attrs->replay_esn.trigger) ++ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter, ++ sa_entry->esn_state.esn); ++ } + + if (attrs->lft.hard_packet_limit != XFRM_INF) { + MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt, +@@ -173,7 +178,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) + + res = &mdev->mlx5e_res.hw_objs; + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) +- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs); ++ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (!err) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 474e63d02ba492..d2dc375f5e49cb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2490,6 +2490,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX: + root_ns = steering->rdma_tx_root_ns; ++ prio = RDMA_TX_BYPASS_PRIO; + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: + root_ns = steering->rdma_rx_root_ns; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +index 005661248c7e9c..9faa9ef863a1b6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +@@ -540,7 +540,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + set_tt_map(port_sel, hash_type); + err = mlx5_lag_create_definers(ldev, hash_type, ports); + if (err) +- return err; ++ goto clear_port_sel; + + if (port_sel->tunnel) { + err = mlx5_lag_create_inner_ttc_table(ldev); +@@ -559,6 +559,8 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + mlx5_destroy_ttc_table(port_sel->inner.ttc); + destroy_definers: + mlx5_lag_destroy_definers(ldev); ++clear_port_sel: ++ memset(port_sel, 0, sizeof(*port_sel)); + return err; + } + +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +index 9d97cd281f18e4..c03558adda91eb 100644 +--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c ++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, + map_id_full = be64_to_cpu(cbe->map_ptr); + map_id = map_id_full; + +- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) ++ if (size_add(pkt_size, data_size) > INT_MAX || ++ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + return -EINVAL; + if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) + return -EINVAL; +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c +index 64bf22cd860c9a..9eccc7064c2b05 100644 +--- a/drivers/net/ethernet/ti/cpsw_ale.c ++++ b/drivers/net/ethernet/ti/cpsw_ale.c +@@ -106,15 +106,15 @@ struct cpsw_ale_dev_id { + + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + { +- int idx, idx2; ++ int idx, idx2, index; + u32 hi_val = 0; + + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be fetched exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- hi_val = ale_entry[idx2] << ((idx2 * 32) - start); ++ index = 2 - idx2; /* flip */ ++ hi_val = ale_entry[index] << ((idx2 * 32) - start); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +@@ -124,16 +124,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, + u32 value) + { +- int idx, idx2; ++ int idx, idx2, index; + + value &= BITMASK(bits); + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be set exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); +- ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); ++ index = 2 - idx2; /* flip */ ++ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32))); ++ ale_entry[index] |= (value >> ((idx2 * 32) - start)); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index 9f779653ed6225..02e11827440b5c 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1571,6 +1571,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, + return -EFAULT; + } + ++ if (ecoalesce->rx_max_coalesced_frames > 255 || ++ ecoalesce->tx_max_coalesced_frames > 255) { ++ NL_SET_ERR_MSG(extack, "frames must be less than 256"); ++ return -EINVAL; ++ } ++ + if (ecoalesce->rx_max_coalesced_frames) + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + if (ecoalesce->rx_coalesce_usecs) +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 9dd8f66610ce6b..47238c3ec82e75 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1095,8 +1095,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, + goto out_encap; + } + +- gn = net_generic(dev_net(dev), gtp_net_id); +- list_add_rcu(>p->list, &gn->gtp_dev_list); ++ gn = net_generic(src_net, gtp_net_id); ++ list_add(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; + + netdev_dbg(dev, "registered new GTP interface\n"); +@@ -1122,7 +1122,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) + hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); + +- list_del_rcu(>p->list); ++ list_del(>p->list); + unregister_netdevice_queue(dev, head); + } + +@@ -1690,16 +1690,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; + struct net *net = sock_net(skb->sk); ++ struct net_device *dev; + struct pdp_ctx *pctx; +- struct gtp_net *gn; +- +- gn = net_generic(net, gtp_net_id); + + if (cb->args[4]) + return 0; + + rcu_read_lock(); +- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { ++ for_each_netdev_rcu(net, dev) { ++ if (dev->rtnl_link_ops != >p_link_ops) ++ continue; ++ ++ gtp = netdev_priv(dev); ++ + if (last_gtp && last_gtp != gtp) + continue; + else +@@ -1884,23 +1887,28 @@ static int __net_init gtp_net_init(struct net *net) + return 0; + } + +-static void __net_exit gtp_net_exit(struct net *net) ++static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, ++ struct list_head *dev_to_kill) + { +- struct gtp_net *gn = net_generic(net, gtp_net_id); +- struct gtp_dev *gtp; +- LIST_HEAD(list); ++ struct net *net; + +- rtnl_lock(); +- list_for_each_entry(gtp, &gn->gtp_dev_list, list) +- gtp_dellink(gtp->dev, &list); ++ list_for_each_entry(net, net_list, exit_list) { ++ struct gtp_net *gn = net_generic(net, gtp_net_id); ++ struct gtp_dev *gtp, *gtp_next; ++ struct net_device *dev; + +- unregister_netdevice_many(&list); +- rtnl_unlock(); ++ for_each_netdev(net, dev) ++ if (dev->rtnl_link_ops == >p_link_ops) ++ gtp_dellink(dev, dev_to_kill); ++ ++ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) ++ gtp_dellink(gtp->dev, dev_to_kill); ++ } + } + + static struct pernet_operations gtp_net_ops = { + .init = gtp_net_init, +- .exit = gtp_net_exit, ++ .exit_batch_rtnl = gtp_net_exit_batch_rtnl, + .id = >p_net_id, + .size = sizeof(struct gtp_net), + }; +diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c +index 468833675cc949..c0b342cc93db39 100644 +--- a/drivers/nvme/target/io-cmd-bdev.c ++++ b/drivers/nvme/target/io-cmd-bdev.c +@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) + */ + id->nsfeat |= 1 << 4; + /* NPWG = Namespace Preferred Write Granularity. 0's based */ +- id->npwg = lpp0b; ++ id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); + /* NPWA = Namespace Preferred Write Alignment. 0's based */ + id->npwa = id->npwg; + /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ +diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c +index e2602e38ae4526..6be3266cd7b5b2 100644 +--- a/drivers/pci/controller/pci-host-common.c ++++ b/drivers/pci/controller/pci-host-common.c +@@ -73,6 +73,10 @@ int pci_host_common_probe(struct platform_device *pdev) + if (IS_ERR(cfg)) + return PTR_ERR(cfg); + ++ /* Do not reassign resources if probe only */ ++ if (!pci_has_flag(PCI_PROBE_ONLY)) ++ pci_add_flags(PCI_REASSIGN_ALL_BUS); ++ + bridge->sysdata = cfg; + bridge->ops = (struct pci_ops *)&ops->pci_ops; + bridge->msi_domain = true; +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 7e84e472b3383c..03b519a2284038 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -3096,18 +3096,20 @@ int pci_host_probe(struct pci_host_bridge *bridge) + + bus = bridge->bus; + +- /* If we must preserve the resource configuration, claim now */ +- if (bridge->preserve_config) +- pci_bus_claim_resources(bus); +- + /* +- * Assign whatever was left unassigned. If we didn't claim above, +- * this will reassign everything. ++ * We insert PCI resources into the iomem_resource and ++ * ioport_resource trees in either pci_bus_claim_resources() ++ * or pci_bus_assign_resources(). + */ +- pci_assign_unassigned_root_bus_resources(bus); ++ if (pci_has_flag(PCI_PROBE_ONLY)) { ++ pci_bus_claim_resources(bus); ++ } else { ++ pci_bus_size_bridges(bus); ++ pci_bus_assign_resources(bus); + +- list_for_each_entry(child, &bus->children, node) +- pcie_bus_configure_settings(child); ++ list_for_each_entry(child, &bus->children, node) ++ pcie_bus_configure_settings(child); ++ } + + pci_bus_add_devices(bus); + return 0; +diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c +index 31693add7d633f..faf643a4a5d06b 100644 +--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c ++++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c +@@ -767,7 +767,7 @@ static int imx8mp_blk_ctrl_remove(struct platform_device *pdev) + + of_genpd_del_provider(pdev->dev.of_node); + +- for (i = 0; bc->onecell_data.num_domains; i++) { ++ for (i = 0; i < bc->onecell_data.num_domains; i++) { + struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; + + pm_genpd_remove(&domain->genpd); +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 02696c7f9beff9..0ac0b6aaf9c62c 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -10483,14 +10483,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) + } + + /* +- * Set the default power management level for runtime and system PM. ++ * Set the default power management level for runtime and system PM if ++ * not set by the host controller drivers. + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ +- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( ++ if (!hba->rpm_lvl) ++ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); +- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( ++ if (!hba->spm_lvl) ++ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + +diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c +index 89b11336a83697..1806bff8e59bc3 100644 +--- a/fs/cachefiles/daemon.c ++++ b/fs/cachefiles/daemon.c +@@ -15,6 +15,7 @@ + #include <linux/namei.h> + #include <linux/poll.h> + #include <linux/mount.h> ++#include <linux/security.h> + #include <linux/statfs.h> + #include <linux/ctype.h> + #include <linux/string.h> +@@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) + */ + static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) + { +- char *secctx; ++ int err; + + _enter(",%s", args); + +@@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) + return -EINVAL; + } + +- if (cache->secctx) { ++ if (cache->have_secid) { + pr_err("Second security context specified\n"); + return -EINVAL; + } + +- secctx = kstrdup(args, GFP_KERNEL); +- if (!secctx) +- return -ENOMEM; ++ err = security_secctx_to_secid(args, strlen(args), &cache->secid); ++ if (err) ++ return err; + +- cache->secctx = secctx; ++ cache->have_secid = true; + return 0; + } + +@@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache) + put_cred(cache->cache_cred); + + kfree(cache->rootdirname); +- kfree(cache->secctx); + kfree(cache->tag); + + _leave(""); +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index 111ad6ecd4baf3..4421a12960a662 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -122,7 +122,6 @@ struct cachefiles_cache { + #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ + #define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ + char *rootdirname; /* name of cache root directory */ +- char *secctx; /* LSM security context */ + char *tag; /* cache binding tag */ + refcount_t unbind_pincount;/* refcount to do daemon unbind */ + struct xarray reqs; /* xarray of pending on-demand requests */ +@@ -130,6 +129,8 @@ struct cachefiles_cache { + struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ + u32 ondemand_id_next; + u32 msg_id_next; ++ u32 secid; /* LSM security id */ ++ bool have_secid; /* whether "secid" was set */ + }; + + static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) +diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c +index fe777164f1d894..fc6611886b3b5e 100644 +--- a/fs/cachefiles/security.c ++++ b/fs/cachefiles/security.c +@@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache) + struct cred *new; + int ret; + +- _enter("{%s}", cache->secctx); ++ _enter("{%u}", cache->have_secid ? cache->secid : 0); + + new = prepare_kernel_cred(current); + if (!new) { +@@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache) + goto error; + } + +- if (cache->secctx) { +- ret = set_security_override_from_ctx(new, cache->secctx); ++ if (cache->have_secid) { ++ ret = set_security_override(new, cache->secid); + if (ret < 0) { + put_cred(new); + pr_err("Security denies permission to nominate security context: error %d\n", +diff --git a/fs/file.c b/fs/file.c +index bd817e31d79866..a178efc8cf4b5c 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -21,6 +21,7 @@ + #include <linux/rcupdate.h> + #include <linux/close_range.h> + #include <net/sock.h> ++#include <linux/init_task.h> + + #include "internal.h" + +diff --git a/fs/hfs/super.c b/fs/hfs/super.c +index 6764afa98a6ff1..431bdc65f72312 100644 +--- a/fs/hfs/super.c ++++ b/fs/hfs/super.c +@@ -418,11 +418,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) + goto bail_no_root; + res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); + if (!res) { +- if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { ++ if (fd.entrylength != sizeof(rec.dir)) { + res = -EIO; + goto bail_hfs_find; + } + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); ++ if (rec.type != HFS_CDR_DIR) ++ res = -EIO; + } + if (res) + goto bail_hfs_find; +diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c +index a05ee2cbb77936..e7e6701806ad26 100644 +--- a/fs/iomap/buffered-io.c ++++ b/fs/iomap/buffered-io.c +@@ -1095,7 +1095,7 @@ static int iomap_write_delalloc_scan(struct inode *inode, + } + + /* move offset to start of next folio in range */ +- start_byte = folio_next_index(folio) << PAGE_SHIFT; ++ start_byte = folio_pos(folio) + folio_size(folio); + folio_unlock(folio); + folio_put(folio); + } +diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c +index 6f2bcbfde45e69..0ff07d53931f2f 100644 +--- a/fs/nfsd/filecache.c ++++ b/fs/nfsd/filecache.c +@@ -219,6 +219,7 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need, + return NULL; + + INIT_LIST_HEAD(&nf->nf_lru); ++ INIT_LIST_HEAD(&nf->nf_gc); + nf->nf_birthtime = ktime_get(); + nf->nf_file = NULL; + nf->nf_cred = get_current_cred(); +@@ -396,8 +397,8 @@ nfsd_file_dispose_list(struct list_head *dispose) + struct nfsd_file *nf; + + while (!list_empty(dispose)) { +- nf = list_first_entry(dispose, struct nfsd_file, nf_lru); +- list_del_init(&nf->nf_lru); ++ nf = list_first_entry(dispose, struct nfsd_file, nf_gc); ++ list_del_init(&nf->nf_gc); + nfsd_file_free(nf); + } + } +@@ -414,12 +415,12 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) + { + while(!list_empty(dispose)) { + struct nfsd_file *nf = list_first_entry(dispose, +- struct nfsd_file, nf_lru); ++ struct nfsd_file, nf_gc); + struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id); + struct nfsd_fcache_disposal *l = nn->fcache_disposal; + + spin_lock(&l->lock); +- list_move_tail(&nf->nf_lru, &l->freeme); ++ list_move_tail(&nf->nf_gc, &l->freeme); + spin_unlock(&l->lock); + queue_work(nfsd_filecache_wq, &l->work); + } +@@ -476,7 +477,8 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, + + /* Refcount went to zero. Unhash it and queue it to the dispose list */ + nfsd_file_unhash(nf); +- list_lru_isolate_move(lru, &nf->nf_lru, head); ++ list_lru_isolate(lru, &nf->nf_lru); ++ list_add(&nf->nf_gc, head); + this_cpu_inc(nfsd_file_evictions); + trace_nfsd_file_gc_disposed(nf); + return LRU_REMOVED; +@@ -555,7 +557,7 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose) + + /* If refcount goes to 0, then put on the dispose list */ + if (refcount_sub_and_test(decrement, &nf->nf_ref)) { +- list_add(&nf->nf_lru, dispose); ++ list_add(&nf->nf_gc, dispose); + trace_nfsd_file_closing(nf); + } + } +@@ -631,8 +633,8 @@ nfsd_file_close_inode_sync(struct inode *inode) + + nfsd_file_queue_for_close(inode, &dispose); + while (!list_empty(&dispose)) { +- nf = list_first_entry(&dispose, struct nfsd_file, nf_lru); +- list_del_init(&nf->nf_lru); ++ nf = list_first_entry(&dispose, struct nfsd_file, nf_gc); ++ list_del_init(&nf->nf_gc); + nfsd_file_free(nf); + } + flush_delayed_fput(); +diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h +index e54165a3224f0b..bf7a630f1a4561 100644 +--- a/fs/nfsd/filecache.h ++++ b/fs/nfsd/filecache.h +@@ -44,6 +44,7 @@ struct nfsd_file { + + struct nfsd_file_mark *nf_mark; + struct list_head nf_lru; ++ struct list_head nf_gc; + struct rcu_head nf_rcu; + ktime_t nf_birthtime; + }; +diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c +index 5c430736ec12c4..26655572975d3d 100644 +--- a/fs/notify/fdinfo.c ++++ b/fs/notify/fdinfo.c +@@ -51,10 +51,8 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode) + size = f.handle.handle_bytes >> 2; + + ret = exportfs_encode_fid(inode, (struct fid *)f.handle.f_handle, &size); +- if ((ret == FILEID_INVALID) || (ret < 0)) { +- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); ++ if ((ret == FILEID_INVALID) || (ret < 0)) + return; +- } + + f.handle.handle_type = ret; + f.handle.handle_bytes = size * sizeof(u32); +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c +index 70a768b623cf40..f7672472fa8279 100644 +--- a/fs/ocfs2/extent_map.c ++++ b/fs/ocfs2/extent_map.c +@@ -973,7 +973,13 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, + } + + while (done < nr) { +- down_read(&OCFS2_I(inode)->ip_alloc_sem); ++ if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) { ++ rc = -EAGAIN; ++ mlog(ML_ERROR, ++ "Inode #%llu ip_alloc_sem is temporarily unavailable\n", ++ (unsigned long long)OCFS2_I(inode)->ip_blkno); ++ break; ++ } + rc = ocfs2_extent_map_get_blocks(inode, v_block + done, + &p_block, &p_count, NULL); + up_read(&OCFS2_I(inode)->ip_alloc_sem); +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index ada3fcc9c6d501..18e018cb181179 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -371,13 +371,13 @@ int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry, + return err; + } + +-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, ++struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, + bool is_upper) + { + struct ovl_fh *fh; + int fh_type, dwords; + int buflen = MAX_HANDLE_SZ; +- uuid_t *uuid = &real->d_sb->s_uuid; ++ uuid_t *uuid = &realinode->i_sb->s_uuid; + int err; + + /* Make sure the real fid stays 32bit aligned */ +@@ -394,13 +394,13 @@ struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, + * the price or reconnecting the dentry. + */ + dwords = buflen >> 2; +- fh_type = exportfs_encode_fh(real, (void *)fh->fb.fid, &dwords, 0); ++ fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid, ++ &dwords, NULL, 0); + buflen = (dwords << 2); + + err = -EIO; +- if (WARN_ON(fh_type < 0) || +- WARN_ON(buflen > MAX_HANDLE_SZ) || +- WARN_ON(fh_type == FILEID_INVALID)) ++ if (fh_type < 0 || fh_type == FILEID_INVALID || ++ WARN_ON(buflen > MAX_HANDLE_SZ)) + goto out_err; + + fh->fb.version = OVL_FH_VERSION; +@@ -438,7 +438,7 @@ int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower, + * up and a pure upper inode. + */ + if (ovl_can_decode_fh(lower->d_sb)) { +- fh = ovl_encode_real_fh(ofs, lower, false); ++ fh = ovl_encode_real_fh(ofs, d_inode(lower), false); + if (IS_ERR(fh)) + return PTR_ERR(fh); + } +@@ -461,7 +461,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, + const struct ovl_fh *fh; + int err; + +- fh = ovl_encode_real_fh(ofs, upper, true); ++ fh = ovl_encode_real_fh(ofs, d_inode(upper), true); + if (IS_ERR(fh)) + return PTR_ERR(fh); + +diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c +index 611ff567a1aa6f..3a17e4366f28c0 100644 +--- a/fs/overlayfs/export.c ++++ b/fs/overlayfs/export.c +@@ -181,35 +181,37 @@ static int ovl_connect_layer(struct dentry *dentry) + * + * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error. + */ +-static int ovl_check_encode_origin(struct dentry *dentry) ++static int ovl_check_encode_origin(struct inode *inode) + { +- struct ovl_fs *ofs = OVL_FS(dentry->d_sb); ++ struct ovl_fs *ofs = OVL_FS(inode->i_sb); + bool decodable = ofs->config.nfs_export; ++ struct dentry *dentry; ++ int err; + + /* No upper layer? */ + if (!ovl_upper_mnt(ofs)) + return 1; + + /* Lower file handle for non-upper non-decodable */ +- if (!ovl_dentry_upper(dentry) && !decodable) ++ if (!ovl_inode_upper(inode) && !decodable) + return 1; + + /* Upper file handle for pure upper */ +- if (!ovl_dentry_lower(dentry)) ++ if (!ovl_inode_lower(inode)) + return 0; + + /* + * Root is never indexed, so if there's an upper layer, encode upper for + * root. + */ +- if (dentry == dentry->d_sb->s_root) ++ if (inode == d_inode(inode->i_sb->s_root)) + return 0; + + /* + * Upper decodable file handle for non-indexed upper. + */ +- if (ovl_dentry_upper(dentry) && decodable && +- !ovl_test_flag(OVL_INDEX, d_inode(dentry))) ++ if (ovl_inode_upper(inode) && decodable && ++ !ovl_test_flag(OVL_INDEX, inode)) + return 0; + + /* +@@ -218,14 +220,23 @@ static int ovl_check_encode_origin(struct dentry *dentry) + * ovl_connect_layer() will try to make origin's layer "connected" by + * copying up a "connectable" ancestor. + */ +- if (d_is_dir(dentry) && decodable) +- return ovl_connect_layer(dentry); ++ if (!decodable || !S_ISDIR(inode->i_mode)) ++ return 1; ++ ++ dentry = d_find_any_alias(inode); ++ if (!dentry) ++ return -ENOENT; ++ ++ err = ovl_connect_layer(dentry); ++ dput(dentry); ++ if (err < 0) ++ return err; + + /* Lower file handle for indexed and non-upper dir/non-dir */ + return 1; + } + +-static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry, ++static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct inode *inode, + u32 *fid, int buflen) + { + struct ovl_fh *fh = NULL; +@@ -236,13 +247,13 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry, + * Check if we should encode a lower or upper file handle and maybe + * copy up an ancestor to make lower file handle connectable. + */ +- err = enc_lower = ovl_check_encode_origin(dentry); ++ err = enc_lower = ovl_check_encode_origin(inode); + if (enc_lower < 0) + goto fail; + + /* Encode an upper or lower file handle */ +- fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_dentry_lower(dentry) : +- ovl_dentry_upper(dentry), !enc_lower); ++ fh = ovl_encode_real_fh(ofs, enc_lower ? ovl_inode_lower(inode) : ++ ovl_inode_upper(inode), !enc_lower); + if (IS_ERR(fh)) + return PTR_ERR(fh); + +@@ -256,8 +267,8 @@ static int ovl_dentry_to_fid(struct ovl_fs *ofs, struct dentry *dentry, + return err; + + fail: +- pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n", +- dentry, err); ++ pr_warn_ratelimited("failed to encode file handle (ino=%lu, err=%i)\n", ++ inode->i_ino, err); + goto out; + } + +@@ -265,19 +276,13 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len, + struct inode *parent) + { + struct ovl_fs *ofs = OVL_FS(inode->i_sb); +- struct dentry *dentry; + int bytes, buflen = *max_len << 2; + + /* TODO: encode connectable file handles */ + if (parent) + return FILEID_INVALID; + +- dentry = d_find_any_alias(inode); +- if (!dentry) +- return FILEID_INVALID; +- +- bytes = ovl_dentry_to_fid(ofs, dentry, fid, buflen); +- dput(dentry); ++ bytes = ovl_dentry_to_fid(ofs, inode, fid, buflen); + if (bytes <= 0) + return FILEID_INVALID; + +diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c +index 80391c687c2ad8..273a39d3e95133 100644 +--- a/fs/overlayfs/namei.c ++++ b/fs/overlayfs/namei.c +@@ -523,7 +523,7 @@ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry, + struct ovl_fh *fh; + int err; + +- fh = ovl_encode_real_fh(ofs, real, is_upper); ++ fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper); + err = PTR_ERR(fh); + if (IS_ERR(fh)) { + fh = NULL; +@@ -720,7 +720,7 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin, + struct ovl_fh *fh; + int err; + +- fh = ovl_encode_real_fh(ofs, origin, false); ++ fh = ovl_encode_real_fh(ofs, d_inode(origin), false); + if (IS_ERR(fh)) + return PTR_ERR(fh); + +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h +index 09ca82ed0f8ced..981967e507b3e1 100644 +--- a/fs/overlayfs/overlayfs.h ++++ b/fs/overlayfs/overlayfs.h +@@ -821,7 +821,7 @@ int ovl_copy_up_with_data(struct dentry *dentry); + int ovl_maybe_copy_up(struct dentry *dentry, int flags); + int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentry *new); + int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat); +-struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real, ++struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, + bool is_upper); + int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower, + struct dentry *upper); +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c +index 8319bcbe3ee36b..3303cb04e12c36 100644 +--- a/fs/proc/vmcore.c ++++ b/fs/proc/vmcore.c +@@ -404,6 +404,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) + if (!iov_iter_count(iter)) + return acc; + } ++ ++ cond_resched(); + } + + return acc; +diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c +index 20f303f2a5d75a..dbcaaa274abdbf 100644 +--- a/fs/smb/client/connect.c ++++ b/fs/smb/client/connect.c +@@ -1061,6 +1061,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server) + /* Release netns reference for this server. */ + put_net(cifs_net_ns(server)); + kfree(server->leaf_fullpath); ++ kfree(server->hostname); + kfree(server); + + length = atomic_dec_return(&tcpSesAllocCount); +@@ -1684,8 +1685,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) + kfree_sensitive(server->session_key.response); + server->session_key.response = NULL; + server->session_key.len = 0; +- kfree(server->hostname); +- server->hostname = NULL; + + task = xchg(&server->tsk, NULL); + if (task) +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h +index 254d4a898179c0..8f77bb0f4ae0ca 100644 +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h +@@ -532,6 +532,7 @@ extern void __init hrtimers_init(void); + extern void sysrq_timer_list_show(void); + + int hrtimers_prepare_cpu(unsigned int cpu); ++int hrtimers_cpu_starting(unsigned int cpu); + #ifdef CONFIG_HOTPLUG_CPU + int hrtimers_cpu_dying(unsigned int cpu); + #else +diff --git a/include/linux/poll.h b/include/linux/poll.h +index d1ea4f3714a848..fc641b50f1298e 100644 +--- a/include/linux/poll.h ++++ b/include/linux/poll.h +@@ -41,8 +41,16 @@ typedef struct poll_table_struct { + + static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) + { +- if (p && p->_qproc && wait_address) ++ if (p && p->_qproc && wait_address) { + p->_qproc(filp, wait_address, p); ++ /* ++ * This memory barrier is paired in the wq_has_sleeper(). ++ * See the comment above prepare_to_wait(), we need to ++ * ensure that subsequent tests in this thread can't be ++ * reordered with __add_wait_queue() in _qproc() paths. ++ */ ++ smp_mb(); ++ } + } + + /* +diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h +index c9a31c567e85bf..2e18fef1a2e109 100644 +--- a/include/linux/pruss_driver.h ++++ b/include/linux/pruss_driver.h +@@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss, + static inline int pruss_cfg_get_gpmux(struct pruss *pruss, + enum pruss_pru_id pru_id, u8 *mux) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return -EOPNOTSUPP; + } + + static inline int pruss_cfg_set_gpmux(struct pruss *pruss, + enum pruss_pru_id pru_id, u8 mux) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return -EOPNOTSUPP; + } + + static inline int pruss_cfg_gpimode(struct pruss *pruss, + enum pruss_pru_id pru_id, + enum pruss_gpi_mode mode) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return -EOPNOTSUPP; + } + + static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return -EOPNOTSUPP; + } + + static inline int pruss_cfg_xfr_enable(struct pruss *pruss, + enum pru_type pru_type, +- bool enable); ++ bool enable) + { +- return ERR_PTR(-EOPNOTSUPP); ++ return -EOPNOTSUPP; + } + + #endif /* CONFIG_TI_PRUSS */ +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index 958c805df1915b..1befad79a67349 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -442,6 +442,9 @@ struct pernet_operations { + void (*pre_exit)(struct net *net); + void (*exit)(struct net *net); + void (*exit_batch)(struct list_head *net_exit_list); ++ /* Following method is called with RTNL held. */ ++ void (*exit_batch_rtnl)(struct list_head *net_exit_list, ++ struct list_head *dev_kill_list); + unsigned int *id; + size_t size; + }; +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 0c72b94ed076a3..7ab11b45976842 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -2206,7 +2206,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { + }, + [CPUHP_AP_HRTIMERS_DYING] = { + .name = "hrtimers:dying", +- .startup.single = NULL, ++ .startup.single = hrtimers_cpu_starting, + .teardown.single = hrtimers_cpu_dying, + }, + +diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh +index 383fd43ac61222..7e1340da5acae6 100755 +--- a/kernel/gen_kheaders.sh ++++ b/kernel/gen_kheaders.sh +@@ -89,6 +89,7 @@ find $cpio_dir -type f -print0 | + + # Create archive and try to normalize metadata for reproducibility. + tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \ ++ --exclude=".__afs*" --exclude=".nfs*" \ + --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \ + -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 57e5cb36f1bc93..e99b1305e1a5f4 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2180,6 +2180,15 @@ int hrtimers_prepare_cpu(unsigned int cpu) + } + + cpu_base->cpu = cpu; ++ hrtimer_cpu_base_init_expiry_lock(cpu_base); ++ return 0; ++} ++ ++int hrtimers_cpu_starting(unsigned int cpu) ++{ ++ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); ++ ++ /* Clear out any left over state from a CPU down operation */ + cpu_base->active_bases = 0; + cpu_base->hres_active = 0; + cpu_base->hang_detected = 0; +@@ -2188,7 +2197,6 @@ int hrtimers_prepare_cpu(unsigned int cpu) + cpu_base->expires_next = KTIME_MAX; + cpu_base->softirq_expires_next = KTIME_MAX; + cpu_base->online = 1; +- hrtimer_cpu_base_init_expiry_lock(cpu_base); + return 0; + } + +@@ -2266,6 +2274,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu) + void __init hrtimers_init(void) + { + hrtimers_prepare_cpu(smp_processor_id()); ++ hrtimers_cpu_starting(smp_processor_id()); + open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq); + } + +diff --git a/mm/filemap.c b/mm/filemap.c +index 2c308413387ffb..6a3d62de1cca7b 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -3037,7 +3037,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas, + if (ops->is_partially_uptodate(folio, offset, bsz) == + seek_data) + break; +- start = (start + bsz) & ~(bsz - 1); ++ start = (start + bsz) & ~((u64)bsz - 1); + offset += bsz; + } while (offset < folio_size(folio)); + unlock: +diff --git a/net/core/filter.c b/net/core/filter.c +index 34320ce70096ac..5881944f1681c9 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -11190,6 +11190,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; + struct sock_reuseport *reuse; + struct sock *selected_sk; ++ int err; + + selected_sk = map->ops->map_lookup_elem(map, key); + if (!selected_sk) +@@ -11197,10 +11198,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + + reuse = rcu_dereference(selected_sk->sk_reuseport_cb); + if (!reuse) { +- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ +- if (sk_is_refcounted(selected_sk)) +- sock_put(selected_sk); +- + /* reuseport_array has only sk with non NULL sk_reuseport_cb. + * The only (!reuse) case here is - the sk has already been + * unhashed (e.g. by close()), so treat it as -ENOENT. +@@ -11208,24 +11205,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + * Other maps (e.g. sock_map) do not provide this guarantee and + * the sk may never be in the reuseport group to begin with. + */ +- return is_sockarray ? -ENOENT : -EINVAL; ++ err = is_sockarray ? -ENOENT : -EINVAL; ++ goto error; + } + + if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { + struct sock *sk = reuse_kern->sk; + +- if (sk->sk_protocol != selected_sk->sk_protocol) +- return -EPROTOTYPE; +- else if (sk->sk_family != selected_sk->sk_family) +- return -EAFNOSUPPORT; +- +- /* Catch all. Likely bound to a different sockaddr. */ +- return -EBADFD; ++ if (sk->sk_protocol != selected_sk->sk_protocol) { ++ err = -EPROTOTYPE; ++ } else if (sk->sk_family != selected_sk->sk_family) { ++ err = -EAFNOSUPPORT; ++ } else { ++ /* Catch all. Likely bound to a different sockaddr. */ ++ err = -EBADFD; ++ } ++ goto error; + } + + reuse_kern->selected_sk = selected_sk; + + return 0; ++error: ++ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ ++ if (sk_is_refcounted(selected_sk)) ++ sock_put(selected_sk); ++ ++ return err; + } + + static const struct bpf_func_proto sk_select_reuseport_proto = { +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index 92b7fea4d495cf..70ac9d9bc87708 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -321,8 +321,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + { + /* Must be called with pernet_ops_rwsem held */ + const struct pernet_operations *ops, *saved_ops; +- int error = 0; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); ++ int error = 0; + + refcount_set(&net->ns.count, 1); + ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt"); +@@ -360,6 +361,15 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + + synchronize_rcu(); + ++ ops = saved_ops; ++ rtnl_lock(); ++ list_for_each_entry_continue_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + ops = saved_ops; + list_for_each_entry_continue_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -588,6 +598,7 @@ static void cleanup_net(struct work_struct *work) + struct net *net, *tmp, *last; + struct llist_node *net_kill_list; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); + + /* Atomically snapshot the list of namespaces to cleanup */ + net_kill_list = llist_del_all(&cleanup_list); +@@ -628,6 +639,14 @@ static void cleanup_net(struct work_struct *work) + */ + synchronize_rcu(); + ++ rtnl_lock(); ++ list_for_each_entry_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + /* Run all of the network namespace exit methods */ + list_for_each_entry_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -1170,7 +1189,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_ + { + ops_pre_exit_list(ops, net_exit_list); + synchronize_rcu(); ++ ++ if (ops->exit_batch_rtnl) { ++ LIST_HEAD(dev_kill_list); ++ ++ rtnl_lock(); ++ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ } + ops_exit_list(ops, net_exit_list); ++ + ops_free_list(ops, net_exit_list); + } + +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 0e472f6fab8538..359e24c3f22cab 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -850,6 +850,9 @@ static ssize_t get_imix_entries(const char __user *buffer, + unsigned long weight; + unsigned long size; + ++ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) ++ return -E2BIG; ++ + len = num_arg(&buffer[i], max_digits, &size); + if (len < 0) + return len; +@@ -879,9 +882,6 @@ static ssize_t get_imix_entries(const char __user *buffer, + + i++; + pkt_dev->n_imix_entries++; +- +- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES) +- return -E2BIG; + } while (c == ' '); + + return i; +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index d25e962b18a53e..2839ca8053ba6d 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -616,7 +616,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + by tcp. Feel free to propose better solution. + --ANK (980728) + */ +- if (np->rxopt.all) ++ if (np->rxopt.all && sk->sk_state != DCCP_LISTEN) + opt_skb = skb_clone_and_charge_r(skb, sk); + + if (sk->sk_state == DCCP_OPEN) { /* Fast path */ +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 64bdb6d978eed4..f285e52b8b8579 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1456,7 +1456,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + by tcp. Feel free to propose better solution. + --ANK (980728) + */ +- if (np->rxopt.all) ++ if (np->rxopt.all && sk->sk_state != TCP_LISTEN) + opt_skb = skb_clone_and_charge_r(skb, sk); + + reason = SKB_DROP_REASON_NOT_SPECIFIED; +@@ -1495,8 +1495,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) + if (nsk != sk) { + if (tcp_child_process(sk, nsk, skb)) + goto reset; +- if (opt_skb) +- __kfree_skb(opt_skb); + return 0; + } + } else +diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c +index c0e2da5072bea2..9e4631fade90c9 100644 +--- a/net/mac802154/iface.c ++++ b/net/mac802154/iface.c +@@ -684,6 +684,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata) + ASSERT_RTNL(); + + mutex_lock(&sdata->local->iflist_mtx); ++ if (list_empty(&sdata->local->interfaces)) { ++ mutex_unlock(&sdata->local->iflist_mtx); ++ return; ++ } + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->local->iflist_mtx); + +diff --git a/net/mptcp/options.c b/net/mptcp/options.c +index 2e1539027e6d33..8e6a6dc6e0a409 100644 +--- a/net/mptcp/options.c ++++ b/net/mptcp/options.c +@@ -607,7 +607,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, + } + opts->ext_copy.use_ack = 1; + opts->suboptions = OPTION_MPTCP_DSS; +- WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk)); + + /* Add kind/length/subtype/flag overhead if mapping is not populated */ + if (dss_size == 0) +@@ -1287,7 +1286,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th) + } + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT); + } +- return; ++ goto update_wspace; + } + + if (rcv_wnd_new != rcv_wnd_old) { +@@ -1312,6 +1311,9 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th) + th->window = htons(new_win); + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED); + } ++ ++update_wspace: ++ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); + } + + __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum) +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 89d1c299ff2b9f..88c762de772875 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -685,10 +685,15 @@ static inline u64 mptcp_data_avail(const struct mptcp_sock *msk) + + static inline bool mptcp_epollin_ready(const struct sock *sk) + { ++ u64 data_avail = mptcp_data_avail(mptcp_sk(sk)); ++ ++ if (!data_avail) ++ return false; ++ + /* mptcp doesn't have to deal with small skbs in the receive queue, +- * at it can always coalesce them ++ * as it can always coalesce them + */ +- return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) || ++ return (data_avail >= sk->sk_rcvlowat) || + (mem_cgroup_sockets_enabled && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) || + READ_ONCE(tcp_memory_pressure); +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 4f5cbcaa38386f..9445ca97163b40 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -918,7 +918,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, + { + struct vport *vport = ovs_vport_rcu(dp, out_port); + +- if (likely(vport && netif_carrier_ok(vport->dev))) { ++ if (likely(vport && ++ netif_running(vport->dev) && ++ netif_carrier_ok(vport->dev))) { + u16 mru = OVS_CB(skb)->mru; + u32 cutlen = OVS_CB(skb)->cutlen; + +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index 6e1cd71d33a599..2050d888df2ae1 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -490,6 +490,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) + */ + vsk->transport->release(vsk); + vsock_deassign_transport(vsk); ++ ++ /* transport's release() and destruct() can touch some socket ++ * state, since we are reassigning the socket to a new transport ++ * during vsock_connect(), let's reset these fields to have a ++ * clean state. ++ */ ++ sock_reset_flag(sk, SOCK_DONE); ++ sk->sk_state = TCP_CLOSE; ++ vsk->peer_shutdown = 0; + } + + /* We increase the module refcnt to prevent the transport unloading +@@ -866,6 +875,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected); + + s64 vsock_stream_has_data(struct vsock_sock *vsk) + { ++ if (WARN_ON(!vsk->transport)) ++ return 0; ++ + return vsk->transport->stream_has_data(vsk); + } + EXPORT_SYMBOL_GPL(vsock_stream_has_data); +@@ -874,6 +886,9 @@ s64 vsock_connectible_has_data(struct vsock_sock *vsk) + { + struct sock *sk = sk_vsock(vsk); + ++ if (WARN_ON(!vsk->transport)) ++ return 0; ++ + if (sk->sk_type == SOCK_SEQPACKET) + return vsk->transport->seqpacket_has_data(vsk); + else +@@ -883,6 +898,9 @@ EXPORT_SYMBOL_GPL(vsock_connectible_has_data); + + s64 vsock_stream_has_space(struct vsock_sock *vsk) + { ++ if (WARN_ON(!vsk->transport)) ++ return 0; ++ + return vsk->transport->stream_has_space(vsk); + } + EXPORT_SYMBOL_GPL(vsock_stream_has_space); +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index 43495820b64fb1..c57fe7ddcf73bf 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -26,6 +26,9 @@ + /* Threshold for detecting small packets to copy */ + #define GOOD_COPY_LEN 128 + ++static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, ++ bool cancel_timeout); ++ + static const struct virtio_transport * + virtio_transport_get_ops(struct vsock_sock *vsk) + { +@@ -922,6 +925,8 @@ void virtio_transport_destruct(struct vsock_sock *vsk) + { + struct virtio_vsock_sock *vvs = vsk->trans; + ++ virtio_transport_cancel_close_work(vsk, true); ++ + kfree(vvs); + vsk->trans = NULL; + } +@@ -1004,17 +1009,11 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout) + } + } + +-static void virtio_transport_do_close(struct vsock_sock *vsk, +- bool cancel_timeout) ++static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, ++ bool cancel_timeout) + { + struct sock *sk = sk_vsock(vsk); + +- sock_set_flag(sk, SOCK_DONE); +- vsk->peer_shutdown = SHUTDOWN_MASK; +- if (vsock_stream_has_data(vsk) <= 0) +- sk->sk_state = TCP_CLOSING; +- sk->sk_state_change(sk); +- + if (vsk->close_work_scheduled && + (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { + vsk->close_work_scheduled = false; +@@ -1026,6 +1025,20 @@ static void virtio_transport_do_close(struct vsock_sock *vsk, + } + } + ++static void virtio_transport_do_close(struct vsock_sock *vsk, ++ bool cancel_timeout) ++{ ++ struct sock *sk = sk_vsock(vsk); ++ ++ sock_set_flag(sk, SOCK_DONE); ++ vsk->peer_shutdown = SHUTDOWN_MASK; ++ if (vsock_stream_has_data(vsk) <= 0) ++ sk->sk_state = TCP_CLOSING; ++ sk->sk_state_change(sk); ++ ++ virtio_transport_cancel_close_work(vsk, cancel_timeout); ++} ++ + static void virtio_transport_close_timeout(struct work_struct *work) + { + struct vsock_sock *vsk = +@@ -1428,8 +1441,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, + + lock_sock(sk); + +- /* Check if sk has been closed before lock_sock */ +- if (sock_flag(sk, SOCK_DONE)) { ++ /* Check if sk has been closed or assigned to another transport before ++ * lock_sock (note: listener sockets are not assigned to any transport) ++ */ ++ if (sock_flag(sk, SOCK_DONE) || ++ (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) { + (void)virtio_transport_reset_no_sock(t, skb); + release_sock(sk); + sock_put(sk); +diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c +index 4aa6e74ec2957b..f201d9eca1df2f 100644 +--- a/net/vmw_vsock/vsock_bpf.c ++++ b/net/vmw_vsock/vsock_bpf.c +@@ -77,6 +77,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg, + size_t len, int flags, int *addr_len) + { + struct sk_psock *psock; ++ struct vsock_sock *vsk; + int copied; + + psock = sk_psock_get(sk); +@@ -84,6 +85,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg, + return __vsock_recvmsg(sk, msg, len, flags); + + lock_sock(sk); ++ vsk = vsock_sk(sk); ++ ++ if (!vsk->transport) { ++ copied = -ENODEV; ++ goto out; ++ } ++ + if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) { + release_sock(sk); + sk_psock_put(sk, psock); +@@ -108,6 +116,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg, + copied = sk_msg_recvmsg(sk, psock, msg, len, flags); + } + ++out: + release_sock(sk); + sk_psock_put(sk, psock); + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index fc93af80f0bffe..739f8fd1792bd5 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -10430,6 +10430,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), + SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c +index 4209b95690394b..414addef9a4514 100644 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c +@@ -25,6 +25,8 @@ + #include <sys/types.h> + #include <sys/mman.h> + ++#include <arpa/inet.h> ++ + #include <netdb.h> + #include <netinet/in.h> + +@@ -1211,23 +1213,42 @@ static void parse_setsock_options(const char *name) + exit(1); + } + +-void xdisconnect(int fd, int addrlen) ++void xdisconnect(int fd) + { +- struct sockaddr_storage empty; ++ socklen_t addrlen = sizeof(struct sockaddr_storage); ++ struct sockaddr_storage addr, empty; + int msec_sleep = 10; +- int queued = 1; +- int i; ++ void *raw_addr; ++ int i, cmdlen; ++ char cmd[128]; ++ ++ /* get the local address and convert it to string */ ++ if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0) ++ xerror("getsockname"); ++ ++ if (addr.ss_family == AF_INET) ++ raw_addr = &(((struct sockaddr_in *)&addr)->sin_addr); ++ else if (addr.ss_family == AF_INET6) ++ raw_addr = &(((struct sockaddr_in6 *)&addr)->sin6_addr); ++ else ++ xerror("bad family"); ++ ++ strcpy(cmd, "ss -M | grep -q "); ++ cmdlen = strlen(cmd); ++ if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen], ++ sizeof(cmd) - cmdlen)) ++ xerror("inet_ntop"); + + shutdown(fd, SHUT_WR); + +- /* while until the pending data is completely flushed, the later ++ /* ++ * wait until the pending data is completely flushed and all ++ * the MPTCP sockets reached the closed status. + * disconnect will bypass/ignore/drop any pending data. + */ + for (i = 0; ; i += msec_sleep) { +- if (ioctl(fd, SIOCOUTQ, &queued) < 0) +- xerror("can't query out socket queue: %d", errno); +- +- if (!queued) ++ /* closed socket are not listed by 'ss' */ ++ if (system(cmd) != 0) + break; + + if (i > poll_timeout) +@@ -1281,9 +1302,9 @@ int main_loop(void) + return ret; + + if (cfg_truncate > 0) { +- xdisconnect(fd, peer->ai_addrlen); ++ xdisconnect(fd); + } else if (--cfg_repeat > 0) { +- xdisconnect(fd, peer->ai_addrlen); ++ xdisconnect(fd); + + /* the socket could be unblocking at this point, we need the + * connect to be blocking +diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json +index 58189327f6444a..383fbda07245c8 100644 +--- a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json ++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json +@@ -78,10 +78,10 @@ + "setup": [ + "$TC qdisc add dev $DEV1 ingress" + ], +- "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0xff", ++ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0x1f", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow", +- "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 255 baseclass", ++ "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 31 baseclass", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 ingress"