commit: 1169eada98fc086c3d9e56cfa8212f7ee5f73dc7 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed May 27 16:24:47 2020 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed May 27 16:24:47 2020 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1169eada
Linux patch 4.19.125 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1124_linux-4.19.125.patch | 2436 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2440 insertions(+) diff --git a/0000_README b/0000_README index 63f9a11..a36c8a4 100644 --- a/0000_README +++ b/0000_README @@ -535,6 +535,10 @@ Patch: 1123_linux-4.19.124.patch From: https://www.kernel.org Desc: Linux 4.19.124 +Patch: 1124_linux-4.19.125.patch +From: https://www.kernel.org +Desc: Linux 4.19.125 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1124_linux-4.19.125.patch b/1124_linux-4.19.125.patch new file mode 100644 index 0000000..d340902 --- /dev/null +++ b/1124_linux-4.19.125.patch @@ -0,0 +1,2436 @@ +diff --git a/Makefile b/Makefile +index 292c92c8369d..93c63bda7115 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 124 ++SUBLEVEL = 125 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h +index ffebe7b7a5b7..91ca80035fc4 100644 +--- a/arch/arm/include/asm/futex.h ++++ b/arch/arm/include/asm/futex.h +@@ -163,8 +163,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + preempt_enable(); + #endif + +- if (!ret) +- *oval = oldval; ++ /* ++ * Store unconditionally. If ret != 0 the extra store is the least ++ * of the worries but GCC cannot figure out that __futex_atomic_op() ++ * is either setting ret to -EFAULT or storing the old value in ++ * oldval which results in a uninitialized warning at the call site. ++ */ ++ *oval = oldval; + + return ret; + } +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index 6f475dc5829b..f38d153d2586 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -139,7 +139,7 @@ config PPC + select ARCH_HAS_MEMBARRIER_CALLBACKS + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE + select ARCH_HAS_SG_CHAIN +- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) ++ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION) + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 + select ARCH_HAS_UBSAN_SANITIZE_ALL +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c +index 9713d4e8c22b..6558617bd2ce 100644 +--- a/arch/riscv/kernel/setup.c ++++ b/arch/riscv/kernel/setup.c +@@ -19,6 +19,7 @@ + * to the Free Software Foundation, Inc., + */ + ++#include <linux/bootmem.h> + #include <linux/init.h> + #include <linux/mm.h> + #include <linux/memblock.h> +@@ -187,6 +188,7 @@ static void __init setup_bootmem(void) + + set_max_mapnr(PFN_DOWN(mem_size)); + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); ++ max_pfn = max_low_pfn; + + #ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 1ca76ca944ba..53dc8492f02f 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -345,8 +345,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) + * According to Intel, MFENCE can do the serialization here. + */ + asm volatile("mfence" : : : "memory"); +- +- printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); + return; + } + +@@ -545,7 +543,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + #define DEADLINE_MODEL_MATCH_REV(model, rev) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev } + +-static u32 hsx_deadline_rev(void) ++static __init u32 hsx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x02: return 0x3a; /* EP */ +@@ -555,7 +553,7 @@ static u32 hsx_deadline_rev(void) + return ~0U; + } + +-static u32 bdx_deadline_rev(void) ++static __init u32 bdx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x02: return 0x00000011; +@@ -567,7 +565,7 @@ static u32 bdx_deadline_rev(void) + return ~0U; + } + +-static u32 skx_deadline_rev(void) ++static __init u32 skx_deadline_rev(void) + { + switch (boot_cpu_data.x86_stepping) { + case 0x03: return 0x01000136; +@@ -580,7 +578,7 @@ static u32 skx_deadline_rev(void) + return ~0U; + } + +-static const struct x86_cpu_id deadline_match[] = { ++static const struct x86_cpu_id deadline_match[] __initconst = { + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), + DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), + DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), +@@ -602,18 +600,19 @@ static const struct x86_cpu_id deadline_match[] = { + {}, + }; + +-static void apic_check_deadline_errata(void) ++static __init bool apic_validate_deadline_timer(void) + { + const struct x86_cpu_id *m; + u32 rev; + +- if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || +- boot_cpu_has(X86_FEATURE_HYPERVISOR)) +- return; ++ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) ++ return false; ++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ return true; + + m = x86_match_cpu(deadline_match); + if (!m) +- return; ++ return true; + + /* + * Function pointers will have the MSB set due to address layout, +@@ -625,11 +624,12 @@ static void apic_check_deadline_errata(void) + rev = (u32)m->driver_data; + + if (boot_cpu_data.microcode >= rev) +- return; ++ return true; + + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); + pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; " + "please update microcode to version: 0x%x (or later)\n", rev); ++ return false; + } + + /* +@@ -2023,7 +2023,8 @@ void __init init_apic_mappings(void) + { + unsigned int new_apicid; + +- apic_check_deadline_errata(); ++ if (apic_validate_deadline_timer()) ++ pr_debug("TSC deadline timer available\n"); + + if (x2apic_mode) { + boot_cpu_physical_apicid = read_apic_id(); +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c +index b48e2686440b..2701b370e58f 100644 +--- a/arch/x86/kernel/unwind_orc.c ++++ b/arch/x86/kernel/unwind_orc.c +@@ -300,12 +300,19 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address); + + unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) + { ++ struct task_struct *task = state->task; ++ + if (unwind_done(state)) + return NULL; + + if (state->regs) + return &state->regs->ip; + ++ if (task != current && state->sp == task->thread.sp) { ++ struct inactive_task_frame *frame = (void *)task->thread.sp; ++ return &frame->ret_addr; ++ } ++ + if (state->sp) + return (unsigned long *)state->sp - 1; + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index df2274414640..226db3dc490b 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -998,33 +998,32 @@ static void svm_cpu_uninit(int cpu) + static int svm_cpu_init(int cpu) + { + struct svm_cpu_data *sd; +- int r; + + sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); + if (!sd) + return -ENOMEM; + sd->cpu = cpu; +- r = -ENOMEM; + sd->save_area = alloc_page(GFP_KERNEL); + if (!sd->save_area) +- goto err_1; ++ goto free_cpu_data; + + if (svm_sev_enabled()) { +- r = -ENOMEM; + sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, + sizeof(void *), + GFP_KERNEL); + if (!sd->sev_vmcbs) +- goto err_1; ++ goto free_save_area; + } + + per_cpu(svm_data, cpu) = sd; + + return 0; + +-err_1: ++free_save_area: ++ __free_page(sd->save_area); ++free_cpu_data: + kfree(sd); +- return r; ++ return -ENOMEM; + + } + +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index 8340c81b258b..dd4c7289610e 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -1773,9 +1773,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, + dev_set_drvdata(&adev_dimm->dev, nfit_mem); + + /* +- * Until standardization materializes we need to consider 4 +- * different command sets. Note, that checking for function0 (bit0) +- * tells us if any commands are reachable through this GUID. ++ * There are 4 "legacy" NVDIMM command sets ++ * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before ++ * an EFI working group was established to constrain this ++ * proliferation. The nfit driver probes for the supported command ++ * set by GUID. Note, if you're a platform developer looking to add ++ * a new command set to this probe, consider using an existing set, ++ * or otherwise seek approval to publish the command set at ++ * http://www.uefi.org/RFIC_LIST. ++ * ++ * Note, that checking for function0 (bit0) tells us if any commands ++ * are reachable through this GUID. + */ + for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) +@@ -1798,6 +1806,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, + dsm_mask &= ~(1 << 8); + } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { + dsm_mask = 0xffffffff; ++ } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { ++ dsm_mask = 0x1f; + } else { + dev_dbg(dev, "unknown dimm command family\n"); + nfit_mem->family = -1; +@@ -3622,6 +3632,7 @@ static __init int nfit_init(void) + guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); + guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); + guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); ++ guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); + + nfit_wq = create_singlethread_workqueue("nfit"); + if (!nfit_wq) +diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h +index 68848fc4b7c9..cc2ec62951de 100644 +--- a/drivers/acpi/nfit/nfit.h ++++ b/drivers/acpi/nfit/nfit.h +@@ -34,11 +34,14 @@ + /* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ + #define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" + ++/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */ ++#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80" ++ + #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ + | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ + | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) + +-#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT ++#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV + + #define NVDIMM_STANDARD_CMDMASK \ + (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ +@@ -75,6 +78,7 @@ enum nfit_uuids { + NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, + NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, + NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, ++ NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, + NFIT_SPA_VOLATILE, + NFIT_SPA_PM, + NFIT_SPA_DCR, +diff --git a/drivers/base/component.c b/drivers/base/component.c +index 7f7c4233cd31..ee4d3b388f44 100644 +--- a/drivers/base/component.c ++++ b/drivers/base/component.c +@@ -235,7 +235,8 @@ static int try_to_bring_up_master(struct master *master, + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); +- dev_info(master->dev, "master bind failed: %d\n", ret); ++ if (ret != -EPROBE_DEFER) ++ dev_info(master->dev, "master bind failed: %d\n", ret); + return ret; + } + +@@ -506,8 +507,9 @@ static int component_bind(struct component *component, struct master *master, + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + +- dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", +- dev_name(component->dev), component->ops, ret); ++ if (ret != -EPROBE_DEFER) ++ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", ++ dev_name(component->dev), component->ops, ret); + } + + return ret; +diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c +index 7812a6338acd..7ff04bf04b31 100644 +--- a/drivers/dma/owl-dma.c ++++ b/drivers/dma/owl-dma.c +@@ -172,13 +172,11 @@ struct owl_dma_txd { + * @id: physical index to this channel + * @base: virtual memory base for the dma channel + * @vchan: the virtual channel currently being served by this physical channel +- * @lock: a lock to use when altering an instance of this struct + */ + struct owl_dma_pchan { + u32 id; + void __iomem *base; + struct owl_dma_vchan *vchan; +- spinlock_t lock; + }; + + /** +@@ -396,14 +394,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, + for (i = 0; i < od->nr_pchans; i++) { + pchan = &od->pchans[i]; + +- spin_lock_irqsave(&pchan->lock, flags); ++ spin_lock_irqsave(&od->lock, flags); + if (!pchan->vchan) { + pchan->vchan = vchan; +- spin_unlock_irqrestore(&pchan->lock, flags); ++ spin_unlock_irqrestore(&od->lock, flags); + break; + } + +- spin_unlock_irqrestore(&pchan->lock, flags); ++ spin_unlock_irqrestore(&od->lock, flags); + } + + return pchan; +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c +index 4f4733d831a1..045351f3549c 100644 +--- a/drivers/dma/tegra210-adma.c ++++ b/drivers/dma/tegra210-adma.c +@@ -793,7 +793,7 @@ static int tegra_adma_probe(struct platform_device *pdev) + ret = dma_async_device_register(&tdma->dma_dev); + if (ret < 0) { + dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); +- goto irq_dispose; ++ goto rpm_put; + } + + ret = of_dma_controller_register(pdev->dev.of_node, +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +index 3ce77cbad4ae..b3464d2dc2b4 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +@@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, + if (!(gpu->identity.features & meta->feature)) + continue; + +- if (meta->nr_domains < (index - offset)) { ++ if (index - offset >= meta->nr_domains) { + offset += meta->nr_domains; + continue; + } +diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c +index 3019dbc39aef..83f30d7b6abe 100644 +--- a/drivers/gpu/drm/i915/gvt/display.c ++++ b/drivers/gpu/drm/i915/gvt/display.c +@@ -206,14 +206,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); +- vgpu_vreg_t(vgpu, LCPLL1_CTL) |= +- LCPLL_PLL_ENABLE | +- LCPLL_PLL_LOCK; +- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; +- ++ /* ++ * Only 1 PIPE enabled in current vGPU display and PIPE_A is ++ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, ++ * TRANSCODER_A can be enabled. PORT_x depends on the input of ++ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x ++ * so we fixed to DPLL0 here. ++ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode ++ */ ++ vgpu_vreg_t(vgpu, DPLL_CTRL1) = ++ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); ++ vgpu_vreg_t(vgpu, DPLL_CTRL1) |= ++ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); ++ vgpu_vreg_t(vgpu, LCPLL1_CTL) = ++ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; ++ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); ++ /* ++ * Golden M/N are calculated based on: ++ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), ++ * DP link clk 1620 MHz and non-constant_n. ++ * TODO: calculate DP link symbol clk and stream clk m/n. ++ */ ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; ++ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; ++ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; ++ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +@@ -234,6 +261,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +@@ -254,6 +287,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) &= ++ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); ++ vgpu_vreg_t(vgpu, DPLL_CTRL2) |= ++ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c +index 895f49b565ee..3489f0af7409 100644 +--- a/drivers/hid/hid-alps.c ++++ b/drivers/hid/hid-alps.c +@@ -806,6 +806,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id) + break; + case HID_DEVICE_ID_ALPS_U1_DUAL: + case HID_DEVICE_ID_ALPS_U1: ++ case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY: + data->dev_type = U1; + break; + default: +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index b2fff44c8109..c1fed1aaecdf 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -81,10 +81,10 @@ + #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F + #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 + #define HID_DEVICE_ID_ALPS_U1 0x1215 ++#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E + #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C + #define HID_DEVICE_ID_ALPS_1222 0x1222 + +- + #define USB_VENDOR_ID_AMI 0x046b + #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 + +@@ -378,6 +378,7 @@ + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 + + #define USB_VENDOR_ID_ELAN 0x04f3 + #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 +@@ -1063,6 +1064,9 @@ + #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 + #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 + ++#define I2C_VENDOR_ID_SYNAPTICS 0x06cb ++#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13 ++ + #define USB_VENDOR_ID_SYNAPTICS 0x06cb + #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 + #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 +@@ -1077,6 +1081,7 @@ + #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 + #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 + #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 ++#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 + #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 + #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 + +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 19dfd8acd0da..8baf10beb1d5 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -1909,6 +1909,9 @@ static const struct hid_device_id mt_devices[] = { + { .driver_data = MT_CLS_EGALAX_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, ++ { .driver_data = MT_CLS_EGALAX, ++ MT_USB_DEVICE(USB_VENDOR_ID_DWAV, ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, + + /* Elitegroup panel */ + { .driver_data = MT_CLS_SERIAL, +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index b9529bed4d76..e5beee3e8582 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index f2c8c59fc582..f17ebbe53abf 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -187,6 +187,8 @@ static const struct i2c_hid_quirks { + I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, + I2C_HID_QUIRK_RESET_ON_RESUME }, ++ { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393, ++ I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, + { 0, 0 } +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index cb07651f4b46..cbda91a0cb5f 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -48,7 +48,7 @@ + struct i2c_dev { + struct list_head list; + struct i2c_adapter *adap; +- struct device *dev; ++ struct device dev; + struct cdev cdev; + }; + +@@ -92,12 +92,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) + return i2c_dev; + } + +-static void put_i2c_dev(struct i2c_dev *i2c_dev) ++static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) + { + spin_lock(&i2c_dev_list_lock); + list_del(&i2c_dev->list); + spin_unlock(&i2c_dev_list_lock); +- kfree(i2c_dev); ++ if (del_cdev) ++ cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); ++ put_device(&i2c_dev->dev); + } + + static ssize_t name_show(struct device *dev, +@@ -636,6 +638,14 @@ static const struct file_operations i2cdev_fops = { + + static struct class *i2c_dev_class; + ++static void i2cdev_dev_release(struct device *dev) ++{ ++ struct i2c_dev *i2c_dev; ++ ++ i2c_dev = container_of(dev, struct i2c_dev, dev); ++ kfree(i2c_dev); ++} ++ + static int i2cdev_attach_adapter(struct device *dev, void *dummy) + { + struct i2c_adapter *adap; +@@ -652,27 +662,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy) + + cdev_init(&i2c_dev->cdev, &i2cdev_fops); + i2c_dev->cdev.owner = THIS_MODULE; +- res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1); +- if (res) +- goto error_cdev; +- +- /* register this i2c device with the driver core */ +- i2c_dev->dev = device_create(i2c_dev_class, &adap->dev, +- MKDEV(I2C_MAJOR, adap->nr), NULL, +- "i2c-%d", adap->nr); +- if (IS_ERR(i2c_dev->dev)) { +- res = PTR_ERR(i2c_dev->dev); +- goto error; ++ ++ device_initialize(&i2c_dev->dev); ++ i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); ++ i2c_dev->dev.class = i2c_dev_class; ++ i2c_dev->dev.parent = &adap->dev; ++ i2c_dev->dev.release = i2cdev_dev_release; ++ dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); ++ ++ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); ++ if (res) { ++ put_i2c_dev(i2c_dev, false); ++ return res; + } + + pr_debug("i2c-dev: adapter [%s] registered as minor %d\n", + adap->name, adap->nr); + return 0; +-error: +- cdev_del(&i2c_dev->cdev); +-error_cdev: +- put_i2c_dev(i2c_dev); +- return res; + } + + static int i2cdev_detach_adapter(struct device *dev, void *dummy) +@@ -688,9 +694,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) + if (!i2c_dev) /* attach_adapter must have failed */ + return 0; + +- cdev_del(&i2c_dev->cdev); +- put_i2c_dev(i2c_dev); +- device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr)); ++ put_i2c_dev(i2c_dev, true); + + pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name); + return 0; +diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c +index 035032e20327..9ba9ce5696e1 100644 +--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c ++++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c +@@ -273,6 +273,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) + err_rollback_available: + device_remove_file(&pdev->dev, &dev_attr_available_masters); + err_rollback: ++ i2c_demux_deactivate_master(priv); + for (j = 0; j < i; j++) { + of_node_put(priv->chan[j].parent_np); + of_changeset_destroy(&priv->chan[j].chgset); +diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c +index 4964561595f5..7218acf1a907 100644 +--- a/drivers/iio/accel/sca3000.c ++++ b/drivers/iio/accel/sca3000.c +@@ -982,7 +982,7 @@ static int sca3000_read_data(struct sca3000_state *st, + st->tx[0] = SCA3000_READ_REG(reg_address_high); + ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer)); + if (ret) { +- dev_err(get_device(&st->us->dev), "problem reading register"); ++ dev_err(&st->us->dev, "problem reading register\n"); + return ret; + } + +diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c +index 24d5d049567a..59fd8b620c50 100644 +--- a/drivers/iio/adc/stm32-adc.c ++++ b/drivers/iio/adc/stm32-adc.c +@@ -1682,15 +1682,27 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) + return 0; + } + +-static int stm32_adc_dma_request(struct iio_dev *indio_dev) ++static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev) + { + struct stm32_adc *adc = iio_priv(indio_dev); + struct dma_slave_config config; + int ret; + +- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); +- if (!adc->dma_chan) ++ adc->dma_chan = dma_request_chan(dev, "rx"); ++ if (IS_ERR(adc->dma_chan)) { ++ ret = PTR_ERR(adc->dma_chan); ++ if (ret != -ENODEV) { ++ if (ret != -EPROBE_DEFER) ++ dev_err(dev, ++ "DMA channel request failed with %d\n", ++ ret); ++ return ret; ++ } ++ ++ /* DMA is optional: fall back to IRQ mode */ ++ adc->dma_chan = NULL; + return 0; ++ } + + adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, + STM32_DMA_BUFFER_SIZE, +@@ -1804,7 +1816,7 @@ static int stm32_adc_probe(struct platform_device *pdev) + if (ret < 0) + goto err_clk_disable; + +- ret = stm32_adc_dma_request(indio_dev); ++ ret = stm32_adc_dma_request(dev, indio_dev); + if (ret < 0) + goto err_clk_disable; + +diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c +index f5586dd6414d..1c492a7f4587 100644 +--- a/drivers/iio/adc/stm32-dfsdm-adc.c ++++ b/drivers/iio/adc/stm32-dfsdm-adc.c +@@ -45,7 +45,7 @@ enum sd_converter_type { + + struct stm32_dfsdm_dev_data { + int type; +- int (*init)(struct iio_dev *indio_dev); ++ int (*init)(struct device *dev, struct iio_dev *indio_dev); + unsigned int num_channels; + const struct regmap_config *regmap_cfg; + }; +@@ -923,7 +923,8 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev) + } + } + +-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev) ++static int stm32_dfsdm_dma_request(struct device *dev, ++ struct iio_dev *indio_dev) + { + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); + struct dma_slave_config config = { +@@ -933,9 +934,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev) + }; + int ret; + +- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx"); +- if (!adc->dma_chan) +- return -EINVAL; ++ adc->dma_chan = dma_request_chan(dev, "rx"); ++ if (IS_ERR(adc->dma_chan)) { ++ int ret = PTR_ERR(adc->dma_chan); ++ ++ adc->dma_chan = NULL; ++ return ret; ++ } + + adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev, + DFSDM_DMA_BUFFER_SIZE, +@@ -993,7 +998,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, + &adc->dfsdm->ch_list[ch->channel]); + } + +-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) ++static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev) + { + struct iio_chan_spec *ch; + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +@@ -1023,10 +1028,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) + indio_dev->num_channels = 1; + indio_dev->channels = ch; + +- return stm32_dfsdm_dma_request(indio_dev); ++ return stm32_dfsdm_dma_request(dev, indio_dev); + } + +-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) ++static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev) + { + struct iio_chan_spec *ch; + struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); +@@ -1170,7 +1175,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev) + adc->dfsdm->fl_list[adc->fl_id].sync_mode = val; + + adc->dev_data = dev_data; +- ret = dev_data->init(iio); ++ ret = dev_data->init(dev, iio); + if (ret < 0) + return ret; + +diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c +index 5dccdd16cab3..9cf2e2933b66 100644 +--- a/drivers/iio/dac/vf610_dac.c ++++ b/drivers/iio/dac/vf610_dac.c +@@ -234,6 +234,7 @@ static int vf610_dac_probe(struct platform_device *pdev) + return 0; + + error_iio_device_register: ++ vf610_dac_exit(info); + clk_disable_unprepare(info->clk); + + return ret; +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c +index 2557ed112bc2..c7d0bb3b4a30 100644 +--- a/drivers/iommu/amd_iommu_init.c ++++ b/drivers/iommu/amd_iommu_init.c +@@ -1334,8 +1334,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + } + case IVHD_DEV_ACPI_HID: { + u16 devid; +- u8 hid[ACPIHID_HID_LEN] = {0}; +- u8 uid[ACPIHID_UID_LEN] = {0}; ++ u8 hid[ACPIHID_HID_LEN]; ++ u8 uid[ACPIHID_UID_LEN]; + int ret; + + if (h->type != 0x40) { +@@ -1352,6 +1352,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + } + ++ uid[0] = '\0'; + switch (e->uidf) { + case UID_NOT_PRESENT: + +@@ -1366,8 +1367,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, + break; + case UID_IS_CHARACTER: + +- memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); +- uid[ACPIHID_UID_LEN - 1] = '\0'; ++ memcpy(uid, &e->uid, e->uidl); ++ uid[e->uidl] = '\0'; + + break; + default: +diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c +index 8a9c169b6f99..b5eec18ad59a 100644 +--- a/drivers/ipack/carriers/tpci200.c ++++ b/drivers/ipack/carriers/tpci200.c +@@ -309,6 +309,7 @@ static int tpci200_register(struct tpci200_board *tpci200) + "(bn 0x%X, sn 0x%X) failed to map driver user space!", + tpci200->info->pdev->bus->number, + tpci200->info->pdev->devfn); ++ res = -ENOMEM; + goto out_release_mem8_space; + } + +diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c +index 5a30f1d84fe1..2bd5898a6204 100644 +--- a/drivers/media/platform/rcar_fdp1.c ++++ b/drivers/media/platform/rcar_fdp1.c +@@ -2368,7 +2368,7 @@ static int fdp1_probe(struct platform_device *pdev) + dprintk(fdp1, "FDP1 Version R-Car H3\n"); + break; + case FD1_IP_M3N: +- dprintk(fdp1, "FDP1 Version R-Car M3N\n"); ++ dprintk(fdp1, "FDP1 Version R-Car M3-N\n"); + break; + case FD1_IP_E3: + dprintk(fdp1, "FDP1 Version R-Car E3\n"); +diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c +index da445223f4cc..5c5d0241603a 100644 +--- a/drivers/misc/cardreader/rtsx_pcr.c ++++ b/drivers/misc/cardreader/rtsx_pcr.c +@@ -155,6 +155,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) + + rtsx_disable_aspm(pcr); + ++ /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ ++ msleep(1); ++ + if (option->ltr_enabled) + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + +diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c +index ebdcf0b450e2..524b8c0d371b 100644 +--- a/drivers/misc/mei/client.c ++++ b/drivers/misc/mei/client.c +@@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid(dev, uuid); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +@@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) + down_write(&dev->me_clients_rwsem); + me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); + __mei_me_cl_del(dev, me_cl); ++ mei_me_cl_put(me_cl); + up_write(&dev->me_clients_rwsem); + } + +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index a2f38b3b9776..1d61ae7aaa66 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -1045,6 +1045,10 @@ static int spinand_init(struct spinand_device *spinand) + + mtd->oobavail = ret; + ++ /* Propagate ECC information to mtd_info */ ++ mtd->ecc_strength = nand->eccreq.strength; ++ mtd->ecc_step_size = nand->eccreq.step_size; ++ + return 0; + + err_cleanup_nanddev: +diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c +index 7bc96294ae4d..b108e1f04bf6 100644 +--- a/drivers/mtd/ubi/debug.c ++++ b/drivers/mtd/ubi/debug.c +@@ -405,9 +405,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos) + { + struct ubi_device *ubi = s->private; + +- if (*pos == 0) +- return SEQ_START_TOKEN; +- + if (*pos < ubi->peb_count) + return pos; + +@@ -421,8 +418,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos) + { + struct ubi_device *ubi = s->private; + +- if (v == SEQ_START_TOKEN) +- return pos; + (*pos)++; + + if (*pos < ubi->peb_count) +@@ -444,11 +439,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter) + int err; + + /* If this is the start, print a header */ +- if (iter == SEQ_START_TOKEN) { +- seq_puts(s, +- "physical_block_number\terase_count\tblock_status\tread_status\n"); +- return 0; +- } ++ if (*block_number == 0) ++ seq_puts(s, "physical_block_number\terase_count\n"); + + err = ubi_io_is_bad(ubi, *block_number); + if (err) +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +index 750007513f9d..43dbfb228b0e 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +@@ -60,7 +60,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = { + { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, + +- { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, ++ { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, + { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, + { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, + { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 89cc146d2c5c..047fc0cf0263 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -3616,36 +3616,6 @@ static int bcmgenet_remove(struct platform_device *pdev) + } + + #ifdef CONFIG_PM_SLEEP +-static int bcmgenet_suspend(struct device *d) +-{ +- struct net_device *dev = dev_get_drvdata(d); +- struct bcmgenet_priv *priv = netdev_priv(dev); +- int ret = 0; +- +- if (!netif_running(dev)) +- return 0; +- +- netif_device_detach(dev); +- +- bcmgenet_netif_stop(dev); +- +- if (!device_may_wakeup(d)) +- phy_suspend(dev->phydev); +- +- /* Prepare the device for Wake-on-LAN and switch to the slow clock */ +- if (device_may_wakeup(d) && priv->wolopts) { +- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); +- clk_prepare_enable(priv->clk_wol); +- } else if (priv->internal_phy) { +- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); +- } +- +- /* Turn off the clocks */ +- clk_disable_unprepare(priv->clk); +- +- return ret; +-} +- + static int bcmgenet_resume(struct device *d) + { + struct net_device *dev = dev_get_drvdata(d); +@@ -3724,6 +3694,39 @@ out_clk_disable: + clk_disable_unprepare(priv->clk); + return ret; + } ++ ++static int bcmgenet_suspend(struct device *d) ++{ ++ struct net_device *dev = dev_get_drvdata(d); ++ struct bcmgenet_priv *priv = netdev_priv(dev); ++ int ret = 0; ++ ++ if (!netif_running(dev)) ++ return 0; ++ ++ netif_device_detach(dev); ++ ++ bcmgenet_netif_stop(dev); ++ ++ if (!device_may_wakeup(d)) ++ phy_suspend(dev->phydev); ++ ++ /* Prepare the device for Wake-on-LAN and switch to the slow clock */ ++ if (device_may_wakeup(d) && priv->wolopts) { ++ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); ++ clk_prepare_enable(priv->clk_wol); ++ } else if (priv->internal_phy) { ++ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); ++ } ++ ++ /* Turn off the clocks */ ++ clk_disable_unprepare(priv->clk); ++ ++ if (ret) ++ bcmgenet_resume(d); ++ ++ return ret; ++} + #endif /* CONFIG_PM_SLEEP */ + + static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +index 2fbd027f0148..b3596e0ee47b 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +@@ -186,9 +186,15 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + } + + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); ++ if (!(reg & MPD_EN)) ++ return; /* already powered up so skip the rest */ + reg &= ~MPD_EN; + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + ++ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); ++ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); ++ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); ++ + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index c81d6c330548..9d1d77125826 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2281,8 +2281,6 @@ static int cxgb_up(struct adapter *adap) + #if IS_ENABLED(CONFIG_IPV6) + update_clip(adap); + #endif +- /* Initialize hash mac addr list*/ +- INIT_LIST_HEAD(&adap->mac_hlist); + return err; + + irq_err: +@@ -2304,6 +2302,7 @@ static void cxgb_down(struct adapter *adapter) + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); ++ + adapter->flags &= ~FULL_INIT_DONE; + } + +@@ -5602,6 +5601,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + (is_t5(adapter->params.chip) ? STATMODE_V(0) : + T6_STATMODE_V(0))); + ++ /* Initialize hash mac addr list */ ++ INIT_LIST_HEAD(&adapter->mac_hlist); ++ + for_each_port(adapter, i) { + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); +@@ -5876,6 +5878,7 @@ fw_attach_fail: + static void remove_one(struct pci_dev *pdev) + { + struct adapter *adapter = pci_get_drvdata(pdev); ++ struct hash_mac_addr *entry, *tmp; + + if (!adapter) { + pci_release_regions(pdev); +@@ -5923,6 +5926,12 @@ static void remove_one(struct pci_dev *pdev) + if (adapter->num_uld || adapter->num_ofld_uld) + t4_uld_mem_free(adapter); + free_some_resources(adapter); ++ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, ++ list) { ++ list_del(&entry->list); ++ kfree(entry); ++ } ++ + #if IS_ENABLED(CONFIG_IPV6) + t4_cleanup_clip_tbl(adapter); + #endif +diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +index 972dc7bd721d..15029a5e62b9 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +@@ -723,9 +723,6 @@ static int adapter_up(struct adapter *adapter) + if (adapter->flags & USING_MSIX) + name_msix_vecs(adapter); + +- /* Initialize hash mac addr list*/ +- INIT_LIST_HEAD(&adapter->mac_hlist); +- + adapter->flags |= FULL_INIT_DONE; + } + +@@ -3038,6 +3035,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, + if (err) + goto err_unmap_bar; + ++ /* Initialize hash mac addr list */ ++ INIT_LIST_HEAD(&adapter->mac_hlist); ++ + /* + * Allocate our "adapter ports" and stitch everything together. + */ +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 8a1916443235..abfd990ba4d8 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -2731,10 +2731,12 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, + + if (adapter->resetting && + adapter->reset_reason == VNIC_RESET_MOBILITY) { +- struct irq_desc *desc = irq_to_desc(scrq->irq); +- struct irq_chip *chip = irq_desc_get_chip(desc); ++ u64 val = (0xff000000) | scrq->hw_irq; + +- chip->irq_eoi(&desc->irq_data); ++ rc = plpar_hcall_norets(H_EOI, val); ++ if (rc) ++ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", ++ val, rc); + } + + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 9c7b1d8e8220..c41879a955b5 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3684,7 +3684,7 @@ static int stmmac_set_features(struct net_device *netdev, + /** + * stmmac_interrupt - main ISR + * @irq: interrupt number. +- * @dev_id: to pass the net device pointer. ++ * @dev_id: to pass the net device pointer (must be valid). + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission +@@ -3708,11 +3708,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + +- if (unlikely(!dev)) { +- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); +- return IRQ_NONE; +- } +- + /* Check if adapter is up */ + if (test_bit(STMMAC_DOWN, &priv->state)) + return IRQ_HANDLED; +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index eab9984f73a8..d73850ebb671 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1177,11 +1177,11 @@ out_unlock: + static struct genl_family gtp_genl_family; + + static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, +- u32 type, struct pdp_ctx *pctx) ++ int flags, u32 type, struct pdp_ctx *pctx) + { + void *genlh; + +- genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, 0, ++ genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, + type); + if (genlh == NULL) + goto nlmsg_failure; +@@ -1235,8 +1235,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) + goto err_unlock; + } + +- err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, +- info->snd_seq, info->nlhdr->nlmsg_type, pctx); ++ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, ++ 0, info->nlhdr->nlmsg_type, pctx); + if (err < 0) + goto err_unlock_free; + +@@ -1279,6 +1279,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + gtp_genl_fill_info(skb, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, ++ NLM_F_MULTI, + cb->nlh->nlmsg_type, pctx)) { + cb->args[0] = i; + cb->args[1] = j; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +index e0211321fe9e..96870d1b3b73 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +@@ -1933,6 +1933,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes) + if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new, + BRCMF_SDIO_FT_NORMAL)) { + rd->len = 0; ++ brcmf_sdio_rxfail(bus, true, true); ++ sdio_release_host(bus->sdiodev->func1); + brcmu_pkt_buf_free_skb(pkt); + continue; + } +diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c +index 75ae2c508a04..1064a703ccec 100644 +--- a/drivers/nvdimm/btt.c ++++ b/drivers/nvdimm/btt.c +@@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) + + static int btt_freelist_init(struct arena_info *arena) + { +- int old, new, ret; +- u32 i, map_entry; +- struct log_entry log_new, log_old; ++ int new, ret; ++ struct log_entry log_new; ++ u32 i, map_entry, log_oldmap, log_newmap; + + arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), + GFP_KERNEL); +@@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena) + return -ENOMEM; + + for (i = 0; i < arena->nfree; i++) { +- old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); +- if (old < 0) +- return old; +- + new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); + if (new < 0) + return new; + ++ /* old and new map entries with any flags stripped out */ ++ log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); ++ log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); ++ + /* sub points to the next one to be overwritten */ + arena->freelist[i].sub = 1 - new; + arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); +- arena->freelist[i].block = le32_to_cpu(log_new.old_map); ++ arena->freelist[i].block = log_oldmap; + + /* + * FIXME: if error clearing fails during init, we want to make + * the BTT read-only + */ +- if (ent_e_flag(log_new.old_map)) { ++ if (ent_e_flag(log_new.old_map) && ++ !ent_normal(log_new.old_map)) { ++ arena->freelist[i].has_err = 1; + ret = arena_clear_freelist_error(arena, i); + if (ret) + dev_err_ratelimited(to_dev(arena), +@@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena) + } + + /* This implies a newly created or untouched flog entry */ +- if (log_new.old_map == log_new.new_map) ++ if (log_oldmap == log_newmap) + continue; + + /* Check if map recovery is needed */ +@@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena) + NULL, NULL, 0); + if (ret) + return ret; +- if ((le32_to_cpu(log_new.new_map) != map_entry) && +- (le32_to_cpu(log_new.old_map) == map_entry)) { ++ ++ /* ++ * The map_entry from btt_read_map is stripped of any flag bits, ++ * so use the stripped out versions from the log as well for ++ * testing whether recovery is needed. For restoration, use the ++ * 'raw' version of the log entries as that captured what we ++ * were going to write originally. ++ */ ++ if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { + /* + * Last transaction wrote the flog, but wasn't able + * to complete the map write. So fix up the map. +diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h +index db3cb6d4d0d4..ddff49c707b0 100644 +--- a/drivers/nvdimm/btt.h ++++ b/drivers/nvdimm/btt.h +@@ -44,6 +44,8 @@ + #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) + #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) + #define set_e_flag(ent) (ent |= MAP_ERR_MASK) ++/* 'normal' is both e and z flags set */ ++#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent)) + + enum btt_init_state { + INIT_UNCHECKED = 0, +diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c +index e341498876ca..9486acc08402 100644 +--- a/drivers/nvdimm/btt_devs.c ++++ b/drivers/nvdimm/btt_devs.c +@@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev, + } + static DEVICE_ATTR_RO(size); + ++static ssize_t log_zero_flags_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Y\n"); ++} ++static DEVICE_ATTR_RO(log_zero_flags); ++ + static struct attribute *nd_btt_attributes[] = { + &dev_attr_sector_size.attr, + &dev_attr_namespace.attr, + &dev_attr_uuid.attr, + &dev_attr_size.attr, ++ &dev_attr_log_zero_flags.attr, + NULL, + }; + +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c +index 59f3a37a44d7..8db2dc05b8cf 100644 +--- a/drivers/platform/x86/asus-nb-wmi.c ++++ b/drivers/platform/x86/asus-nb-wmi.c +@@ -517,9 +517,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { + .detect_quirks = asus_nb_wmi_quirks, + }; + ++static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { ++ { ++ /* ++ * asus-nb-wm adds no functionality. The T100TA has a detachable ++ * USB kbd, so no hotkeys and it has no WMI rfkill; and loading ++ * asus-nb-wm causes the camera LED to turn and _stay_ on. ++ */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), ++ }, ++ }, ++ { ++ /* The Asus T200TA has the same issue as the T100TA */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), ++ }, ++ }, ++ {} /* Terminating entry */ ++}; + + static int __init asus_nb_wmi_init(void) + { ++ if (dmi_check_system(asus_nb_wmi_blacklist)) ++ return -ENODEV; ++ + return asus_wmi_register_driver(&asus_nb_wmi_driver); + } + +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c +index fa0bbda4b3f2..5940780648e0 100644 +--- a/drivers/rapidio/devices/rio_mport_cdev.c ++++ b/drivers/rapidio/devices/rio_mport_cdev.c +@@ -879,6 +879,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); + ret = -EFAULT; ++ /* ++ * Set nr_pages up to mean "how many pages to unpin, in ++ * the error handler: ++ */ ++ nr_pages = pinned; + goto err_pg; + } + +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index e60822f07653..b99ded6b9e0b 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -2296,16 +2296,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) + static int ibmvscsi_remove(struct vio_dev *vdev) + { + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); +- unsigned long flags; + + srp_remove_host(hostdata->host); + scsi_remove_host(hostdata->host); + + purge_requests(hostdata, DID_ERROR); +- +- spin_lock_irqsave(hostdata->host->host_lock, flags); + release_event_pool(&hostdata->pool, hostdata); +- spin_unlock_irqrestore(hostdata->host->host_lock, flags); + + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, + max_events); +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index b008d583dd6e..0ab9d2fd4a14 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -2162,11 +2162,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) + test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) + msleep(1000); + +- qla_nvme_delete(vha); + + qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); + ++ qla_nvme_delete(vha); + vha->flags.delete_progress = 1; + + qlt_remove_target(ha, vha); +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c +index bef9faea5eee..ac5d2d34aeea 100644 +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -3077,7 +3077,7 @@ qla24xx_abort_command(srb_t *sp) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, + "Entered %s.\n", __func__); + +- if (vha->flags.qpairs_available && sp->qpair) ++ if (sp->qpair) + req = sp->qpair->req; + + if (ql2xasynctmfenable) +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c +index 8a006323c3c1..f36d470aed24 100644 +--- a/drivers/staging/greybus/uart.c ++++ b/drivers/staging/greybus/uart.c +@@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty, + } + + if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) +- newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN; + else +- newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN; ++ newline.flow_control = 0; + + if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) { + memcpy(&gb_tty->line_coding, &newline, sizeof(newline)); +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c +index ac13b99bd9cb..aca983f34f5e 100644 +--- a/drivers/staging/iio/resolver/ad2s1210.c ++++ b/drivers/staging/iio/resolver/ad2s1210.c +@@ -114,17 +114,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) + static int ad2s1210_config_read(struct ad2s1210_state *st, + unsigned char address) + { +- struct spi_transfer xfer = { +- .len = 2, +- .rx_buf = st->rx, +- .tx_buf = st->tx, ++ struct spi_transfer xfers[] = { ++ { ++ .len = 1, ++ .rx_buf = &st->rx[0], ++ .tx_buf = &st->tx[0], ++ .cs_change = 1, ++ }, { ++ .len = 1, ++ .rx_buf = &st->rx[1], ++ .tx_buf = &st->tx[1], ++ }, + }; + int ret = 0; + + ad2s1210_set_mode(MOD_CONFIG, st); + st->tx[0] = address | AD2S1210_MSB_IS_HIGH; + st->tx[1] = AD2S1210_REG_FAULT; +- ret = spi_sync_transfer(st->sdev, &xfer, 1); ++ ret = spi_sync_transfer(st->sdev, xfers, 2); + if (ret < 0) + return ret; + +diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c +index 25a077f4ea94..08f60ce6293d 100644 +--- a/drivers/staging/most/core.c ++++ b/drivers/staging/most/core.c +@@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface) + + INIT_LIST_HEAD(&iface->p->channel_list); + iface->p->dev_id = id; +- strcpy(iface->p->name, iface->description); ++ strscpy(iface->p->name, iface->description, sizeof(iface->p->name)); + iface->dev.init_name = iface->p->name; + iface->dev.bus = &mc.bus; + iface->dev.parent = &mc.dev; +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c +index 8490a1b6b615..2b83d8b02f81 100644 +--- a/drivers/thunderbolt/icm.c ++++ b/drivers/thunderbolt/icm.c +@@ -801,9 +801,11 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) + * connected another host to the same port, remove the switch + * first. + */ +- sw = get_switch_at_route(tb->root_switch, route); +- if (sw) ++ sw = tb_switch_find_by_route(tb, route); ++ if (sw) { + remove_switch(sw); ++ tb_switch_put(sw); ++ } + + sw = tb_switch_find_by_link_depth(tb, link, depth); + if (!sw) { +@@ -1146,9 +1148,11 @@ icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) + * connected another host to the same port, remove the switch + * first. + */ +- sw = get_switch_at_route(tb->root_switch, route); +- if (sw) ++ sw = tb_switch_find_by_route(tb, route); ++ if (sw) { + remove_switch(sw); ++ tb_switch_put(sw); ++ } + + sw = tb_switch_find_by_route(tb, get_parent_route(route)); + if (!sw) { +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index 42d90ceec279..010a50ac4881 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -664,24 +664,6 @@ int tb_switch_reset(struct tb *tb, u64 route) + return res.err; + } + +-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) +-{ +- u8 next_port = route; /* +- * Routes use a stride of 8 bits, +- * eventhough a port index has 6 bits at most. +- * */ +- if (route == 0) +- return sw; +- if (next_port > sw->config.max_port_number) +- return NULL; +- if (tb_is_upstream_port(&sw->ports[next_port])) +- return NULL; +- if (!sw->ports[next_port].remote) +- return NULL; +- return get_switch_at_route(sw->ports[next_port].remote->sw, +- route >> TB_ROUTE_SHIFT); +-} +- + /** + * tb_plug_events_active() - enable/disable plug events on a switch + * +diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c +index 1424581fd9af..146f261bf2c3 100644 +--- a/drivers/thunderbolt/tb.c ++++ b/drivers/thunderbolt/tb.c +@@ -258,7 +258,7 @@ static void tb_handle_hotplug(struct work_struct *work) + if (!tcm->hotplug_active) + goto out; /* during init, suspend or shutdown */ + +- sw = get_switch_at_route(tb->root_switch, ev->route); ++ sw = tb_switch_find_by_route(tb, ev->route); + if (!sw) { + tb_warn(tb, + "hotplug event from non existent switch %llx:%x (unplug: %d)\n", +@@ -269,14 +269,14 @@ static void tb_handle_hotplug(struct work_struct *work) + tb_warn(tb, + "hotplug event from non existent port %llx:%x (unplug: %d)\n", + ev->route, ev->port, ev->unplug); +- goto out; ++ goto put_sw; + } + port = &sw->ports[ev->port]; + if (tb_is_upstream_port(port)) { + tb_warn(tb, + "hotplug event for upstream port %llx:%x (unplug: %d)\n", + ev->route, ev->port, ev->unplug); +- goto out; ++ goto put_sw; + } + if (ev->unplug) { + if (port->remote) { +@@ -306,6 +306,9 @@ static void tb_handle_hotplug(struct work_struct *work) + tb_activate_pcie_devices(tb); + } + } ++ ++put_sw: ++ tb_switch_put(sw); + out: + mutex_unlock(&tb->lock); + kfree(ev); +diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h +index 7a0ee9836a8a..d927cf7b14d2 100644 +--- a/drivers/thunderbolt/tb.h ++++ b/drivers/thunderbolt/tb.h +@@ -397,7 +397,6 @@ void tb_switch_suspend(struct tb_switch *sw); + int tb_switch_resume(struct tb_switch *sw); + int tb_switch_reset(struct tb *tb, u64 route); + void tb_sw_set_unplugged(struct tb_switch *sw); +-struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); + struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, + u8 depth); + struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c +index 4458419f053b..0d405cc58e72 100644 +--- a/drivers/tty/serial/qcom_geni_serial.c ++++ b/drivers/tty/serial/qcom_geni_serial.c +@@ -705,7 +705,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done, + avail *= port->tx_bytes_pw; + + tail = xmit->tail; +- chunk = min3(avail, pending, (size_t)(UART_XMIT_SIZE - tail)); ++ chunk = min(avail, pending); + if (!chunk) + goto out_write_wakeup; + +@@ -727,19 +727,21 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done, + + memset(buf, 0, ARRAY_SIZE(buf)); + tx_bytes = min_t(size_t, remaining, port->tx_bytes_pw); +- for (c = 0; c < tx_bytes ; c++) +- buf[c] = xmit->buf[tail + c]; ++ ++ for (c = 0; c < tx_bytes ; c++) { ++ buf[c] = xmit->buf[tail++]; ++ tail &= UART_XMIT_SIZE - 1; ++ } + + iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); + + i += tx_bytes; +- tail += tx_bytes; + uport->icount.tx += tx_bytes; + remaining -= tx_bytes; + port->tx_remaining -= tx_bytes; + } + +- xmit->tail = tail & (UART_XMIT_SIZE - 1); ++ xmit->tail = tail; + + /* + * The tx fifo watermark is level triggered and latched. Though we had +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index fcf84bfc08e3..f705ea52eb97 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1143,11 +1143,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, + + if (usb_endpoint_out(epaddr)) { + ep = dev->ep_out[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_out[epnum] = NULL; + } else { + ep = dev->ep_in[epnum]; +- if (reset_hardware) ++ if (reset_hardware && epnum != 0) + dev->ep_in[epnum] = NULL; + } + if (ep) { +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index bac1365cc81b..7891bd40ebd8 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -182,14 +182,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + break; + } + +- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); +- added = true; +- +- /* Deliver to monitoring devices all correctly transmitted +- * packets. ++ /* Deliver to monitoring devices all packets that we ++ * will transmit. + */ + virtio_transport_deliver_tap_pkt(pkt); + ++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); ++ added = true; ++ + pkt->off += payload_len; + total_len += payload_len; + +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 5241102b81a8..a2d4eed27f80 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -3632,6 +3632,7 @@ retry: + WARN_ON(1); + tsession = NULL; + target = -1; ++ mutex_lock(&session->s_mutex); + } + goto retry; + +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index 2cc6b1c49d34..f9628fc20fec 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1537,6 +1537,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) + spin_lock(&configfs_dirent_lock); + configfs_detach_rollback(dentry); + spin_unlock(&configfs_dirent_lock); ++ config_item_put(parent_item); + return -EINTR; + } + frag->frag_dead = true; +diff --git a/fs/file.c b/fs/file.c +index 780d29e58847..3762a3f136fd 100644 +--- a/fs/file.c ++++ b/fs/file.c +@@ -70,7 +70,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, + */ + static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) + { +- unsigned int cpy, set; ++ size_t cpy, set; + + BUG_ON(nfdt->max_fds < ofdt->max_fds); + +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index f8a5eef3d014..ccdd8c821abd 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -636,9 +636,6 @@ __acquires(&gl->gl_lockref.lock) + goto out_unlock; + if (nonblock) + goto out_sched; +- smp_mb(); +- if (atomic_read(&gl->gl_revokes) != 0) +- goto out_sched; + set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); + GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); + gl->gl_target = gl->gl_demote_state; +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c +index 65b4f63349c7..d7d2fdda4bbd 100644 +--- a/fs/ubifs/file.c ++++ b/fs/ubifs/file.c +@@ -1391,7 +1391,6 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, + struct ubifs_info *c = inode->i_sb->s_fs_info; + struct ubifs_budget_req req = { .dirtied_ino = 1, + .dirtied_ino_d = ALIGN(ui->data_len, 8) }; +- int iflags = I_DIRTY_TIME; + int err, release; + + err = ubifs_budget_space(c, &req); +@@ -1406,11 +1405,8 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, + if (flags & S_MTIME) + inode->i_mtime = *time; + +- if (!(inode->i_sb->s_flags & SB_LAZYTIME)) +- iflags |= I_DIRTY_SYNC; +- + release = ui->dirty; +- __mark_inode_dirty(inode, iflags); ++ __mark_inode_dirty(inode, I_DIRTY_SYNC); + mutex_unlock(&ui->ui_mutex); + if (release) + ubifs_release_budget(c, &req); +diff --git a/include/linux/padata.h b/include/linux/padata.h +index 5d13d25da2c8..d803397a28f7 100644 +--- a/include/linux/padata.h ++++ b/include/linux/padata.h +@@ -24,7 +24,6 @@ + #include <linux/workqueue.h> + #include <linux/spinlock.h> + #include <linux/list.h> +-#include <linux/timer.h> + #include <linux/notifier.h> + #include <linux/kobject.h> + +@@ -85,18 +84,14 @@ struct padata_serial_queue { + * @serial: List to wait for serialization after reordering. + * @pwork: work struct for parallelization. + * @swork: work struct for serialization. +- * @pd: Backpointer to the internal control structure. + * @work: work struct for parallelization. +- * @reorder_work: work struct for reordering. + * @num_obj: Number of objects that are processed by this cpu. + * @cpu_index: Index of the cpu. + */ + struct padata_parallel_queue { + struct padata_list parallel; + struct padata_list reorder; +- struct parallel_data *pd; + struct work_struct work; +- struct work_struct reorder_work; + atomic_t num_obj; + int cpu_index; + }; +@@ -122,10 +117,10 @@ struct padata_cpumask { + * @reorder_objects: Number of objects waiting in the reorder queues. + * @refcnt: Number of objects holding a reference on this parallel_data. + * @max_seq_nr: Maximal used sequence number. ++ * @cpu: Next CPU to be processed. + * @cpumask: The cpumasks in use for parallel and serial workers. ++ * @reorder_work: work struct for reordering. + * @lock: Reorder lock. +- * @processed: Number of already processed objects. +- * @timer: Reorder timer. + */ + struct parallel_data { + struct padata_instance *pinst; +@@ -134,10 +129,10 @@ struct parallel_data { + atomic_t reorder_objects; + atomic_t refcnt; + atomic_t seq_nr; ++ int cpu; + struct padata_cpumask cpumask; ++ struct work_struct reorder_work; + spinlock_t lock ____cacheline_aligned; +- unsigned int processed; +- struct timer_list timer; + }; + + /** +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h +index 0924119bcfa4..bc5b232440b6 100644 +--- a/include/trace/events/rxrpc.h ++++ b/include/trace/events/rxrpc.h +@@ -1549,6 +1549,41 @@ TRACE_EVENT(rxrpc_notify_socket, + __entry->serial) + ); + ++TRACE_EVENT(rxrpc_rx_discard_ack, ++ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, ++ rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, ++ rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), ++ ++ TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, ++ prev_pkt, call_ackr_prev), ++ ++ TP_STRUCT__entry( ++ __field(unsigned int, debug_id ) ++ __field(rxrpc_serial_t, serial ) ++ __field(rxrpc_seq_t, first_soft_ack) ++ __field(rxrpc_seq_t, call_ackr_first) ++ __field(rxrpc_seq_t, prev_pkt) ++ __field(rxrpc_seq_t, call_ackr_prev) ++ ), ++ ++ TP_fast_assign( ++ __entry->debug_id = debug_id; ++ __entry->serial = serial; ++ __entry->first_soft_ack = first_soft_ack; ++ __entry->call_ackr_first = call_ackr_first; ++ __entry->prev_pkt = prev_pkt; ++ __entry->call_ackr_prev = call_ackr_prev; ++ ), ++ ++ TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", ++ __entry->debug_id, ++ __entry->serial, ++ __entry->first_soft_ack, ++ __entry->call_ackr_first, ++ __entry->prev_pkt, ++ __entry->call_ackr_prev) ++ ); ++ + #endif /* _TRACE_RXRPC_H */ + + /* This part must be outside protection */ +diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h +index 2f2c43d633c5..7b0189d6dfa9 100644 +--- a/include/uapi/linux/ndctl.h ++++ b/include/uapi/linux/ndctl.h +@@ -247,6 +247,7 @@ struct nd_cmd_pkg { + #define NVDIMM_FAMILY_HPE1 1 + #define NVDIMM_FAMILY_HPE2 2 + #define NVDIMM_FAMILY_MSFT 3 ++#define NVDIMM_FAMILY_HYPERV 4 + + #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ + struct nd_cmd_pkg) +diff --git a/kernel/padata.c b/kernel/padata.c +index c280cb153915..93e4fb2d9f2e 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -167,23 +167,12 @@ EXPORT_SYMBOL(padata_do_parallel); + */ + static struct padata_priv *padata_get_next(struct parallel_data *pd) + { +- int cpu, num_cpus; +- unsigned int next_nr, next_index; + struct padata_parallel_queue *next_queue; + struct padata_priv *padata; + struct padata_list *reorder; ++ int cpu = pd->cpu; + +- num_cpus = cpumask_weight(pd->cpumask.pcpu); +- +- /* +- * Calculate the percpu reorder queue and the sequence +- * number of the next object. +- */ +- next_nr = pd->processed; +- next_index = next_nr % num_cpus; +- cpu = padata_index_to_cpu(pd, next_index); + next_queue = per_cpu_ptr(pd->pqueue, cpu); +- + reorder = &next_queue->reorder; + + spin_lock(&reorder->lock); +@@ -194,7 +183,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) + list_del_init(&padata->list); + atomic_dec(&pd->reorder_objects); + +- pd->processed++; ++ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, ++ false); + + spin_unlock(&reorder->lock); + goto out; +@@ -217,6 +207,7 @@ static void padata_reorder(struct parallel_data *pd) + struct padata_priv *padata; + struct padata_serial_queue *squeue; + struct padata_instance *pinst = pd->pinst; ++ struct padata_parallel_queue *next_queue; + + /* + * We need to ensure that only one cpu can work on dequeueing of +@@ -248,7 +239,6 @@ static void padata_reorder(struct parallel_data *pd) + * so exit immediately. + */ + if (PTR_ERR(padata) == -ENODATA) { +- del_timer(&pd->timer); + spin_unlock_bh(&pd->lock); + return; + } +@@ -267,70 +257,29 @@ static void padata_reorder(struct parallel_data *pd) + + /* + * The next object that needs serialization might have arrived to +- * the reorder queues in the meantime, we will be called again +- * from the timer function if no one else cares for it. ++ * the reorder queues in the meantime. + * +- * Ensure reorder_objects is read after pd->lock is dropped so we see +- * an increment from another task in padata_do_serial. Pairs with ++ * Ensure reorder queue is read after pd->lock is dropped so we see ++ * new objects from another task in padata_do_serial. Pairs with + * smp_mb__after_atomic in padata_do_serial. + */ + smp_mb(); +- if (atomic_read(&pd->reorder_objects) +- && !(pinst->flags & PADATA_RESET)) +- mod_timer(&pd->timer, jiffies + HZ); +- else +- del_timer(&pd->timer); + +- return; ++ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); ++ if (!list_empty(&next_queue->reorder.list)) ++ queue_work(pinst->wq, &pd->reorder_work); + } + + static void invoke_padata_reorder(struct work_struct *work) + { +- struct padata_parallel_queue *pqueue; + struct parallel_data *pd; + + local_bh_disable(); +- pqueue = container_of(work, struct padata_parallel_queue, reorder_work); +- pd = pqueue->pd; ++ pd = container_of(work, struct parallel_data, reorder_work); + padata_reorder(pd); + local_bh_enable(); + } + +-static void padata_reorder_timer(struct timer_list *t) +-{ +- struct parallel_data *pd = from_timer(pd, t, timer); +- unsigned int weight; +- int target_cpu, cpu; +- +- cpu = get_cpu(); +- +- /* We don't lock pd here to not interfere with parallel processing +- * padata_reorder() calls on other CPUs. We just need any CPU out of +- * the cpumask.pcpu set. It would be nice if it's the right one but +- * it doesn't matter if we're off to the next one by using an outdated +- * pd->processed value. +- */ +- weight = cpumask_weight(pd->cpumask.pcpu); +- target_cpu = padata_index_to_cpu(pd, pd->processed % weight); +- +- /* ensure to call the reorder callback on the correct CPU */ +- if (cpu != target_cpu) { +- struct padata_parallel_queue *pqueue; +- struct padata_instance *pinst; +- +- /* The timer function is serialized wrt itself -- no locking +- * needed. +- */ +- pinst = pd->pinst; +- pqueue = per_cpu_ptr(pd->pqueue, target_cpu); +- queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work); +- } else { +- padata_reorder(pd); +- } +- +- put_cpu(); +-} +- + static void padata_serial_worker(struct work_struct *serial_work) + { + struct padata_serial_queue *squeue; +@@ -375,47 +324,23 @@ static void padata_serial_worker(struct work_struct *serial_work) + */ + void padata_do_serial(struct padata_priv *padata) + { +- int cpu; +- struct padata_parallel_queue *pqueue; +- struct parallel_data *pd; +- int reorder_via_wq = 0; +- +- pd = padata->pd; +- +- cpu = get_cpu(); +- +- /* We need to run on the same CPU padata_do_parallel(.., padata, ..) +- * was called on -- or, at least, enqueue the padata object into the +- * correct per-cpu queue. +- */ +- if (cpu != padata->cpu) { +- reorder_via_wq = 1; +- cpu = padata->cpu; +- } +- +- pqueue = per_cpu_ptr(pd->pqueue, cpu); ++ struct parallel_data *pd = padata->pd; ++ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, ++ padata->cpu); + + spin_lock(&pqueue->reorder.lock); +- atomic_inc(&pd->reorder_objects); + list_add_tail(&padata->list, &pqueue->reorder.list); ++ atomic_inc(&pd->reorder_objects); + spin_unlock(&pqueue->reorder.lock); + + /* +- * Ensure the atomic_inc of reorder_objects above is ordered correctly ++ * Ensure the addition to the reorder list is ordered correctly + * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb + * in padata_reorder. + */ + smp_mb__after_atomic(); + +- put_cpu(); +- +- /* If we're running on the wrong CPU, call padata_reorder() via a +- * kernel worker. +- */ +- if (reorder_via_wq) +- queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); +- else +- padata_reorder(pd); ++ padata_reorder(pd); + } + EXPORT_SYMBOL(padata_do_serial); + +@@ -471,14 +396,12 @@ static void padata_init_pqueues(struct parallel_data *pd) + continue; + } + +- pqueue->pd = pd; + pqueue->cpu_index = cpu_index; + cpu_index++; + + __padata_list_init(&pqueue->reorder); + __padata_list_init(&pqueue->parallel); + INIT_WORK(&pqueue->work, padata_parallel_worker); +- INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder); + atomic_set(&pqueue->num_obj, 0); + } + } +@@ -506,12 +429,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, + + padata_init_pqueues(pd); + padata_init_squeues(pd); +- timer_setup(&pd->timer, padata_reorder_timer, 0); + atomic_set(&pd->seq_nr, -1); + atomic_set(&pd->reorder_objects, 0); + atomic_set(&pd->refcnt, 1); + pd->pinst = pinst; + spin_lock_init(&pd->lock); ++ pd->cpu = cpumask_first(pd->cpumask.pcpu); ++ INIT_WORK(&pd->reorder_work, invoke_padata_reorder); + + return pd; + +diff --git a/lib/Makefile b/lib/Makefile +index 0ab808318202..1d7a705d7207 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -269,6 +269,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o + obj-$(CONFIG_UBSAN) += ubsan.o + + UBSAN_SANITIZE_ubsan.o := n ++KASAN_SANITIZE_ubsan.o := n ++CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN) + + obj-$(CONFIG_SBITMAP) += sbitmap.o + +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c +index d9beb28fc32f..e65b230fce4c 100644 +--- a/net/rxrpc/input.c ++++ b/net/rxrpc/input.c +@@ -814,6 +814,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, + } + } + ++/* ++ * Return true if the ACK is valid - ie. it doesn't appear to have regressed ++ * with respect to the ack state conveyed by preceding ACKs. ++ */ ++static bool rxrpc_is_ack_valid(struct rxrpc_call *call, ++ rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) ++{ ++ rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); ++ ++ if (after(first_pkt, base)) ++ return true; /* The window advanced */ ++ ++ if (before(first_pkt, base)) ++ return false; /* firstPacket regressed */ ++ ++ if (after_eq(prev_pkt, call->ackr_prev_seq)) ++ return true; /* previousPacket hasn't regressed. */ ++ ++ /* Some rx implementations put a serial number in previousPacket. */ ++ if (after_eq(prev_pkt, base + call->tx_winsize)) ++ return false; ++ return true; ++} ++ + /* + * Process an ACK packet. + * +@@ -878,9 +902,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, + } + + /* Discard any out-of-order or duplicate ACKs (outside lock). */ +- if (before(first_soft_ack, call->ackr_first_seq) || +- before(prev_pkt, call->ackr_prev_seq)) ++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { ++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, ++ first_soft_ack, call->ackr_first_seq, ++ prev_pkt, call->ackr_prev_seq); + return; ++ } + + buf.info.rxMTU = 0; + ioffset = offset + nr_acks + 3; +@@ -891,9 +918,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, + spin_lock(&call->input_lock); + + /* Discard any out-of-order or duplicate ACKs (inside lock). */ +- if (before(first_soft_ack, call->ackr_first_seq) || +- before(prev_pkt, call->ackr_prev_seq)) ++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { ++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, ++ first_soft_ack, call->ackr_first_seq, ++ prev_pkt, call->ackr_prev_seq); + goto out; ++ } + call->acks_latest_ts = skb->tstamp; + call->acks_latest = sp->hdr.serial; + +diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c +index cea16838d588..dce7bdc73de4 100644 +--- a/net/rxrpc/rxkad.c ++++ b/net/rxrpc/rxkad.c +@@ -1118,7 +1118,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, + ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, + &expiry, _abort_code); + if (ret < 0) +- goto temporary_error_free_resp; ++ goto temporary_error_free_ticket; + + /* use the session key from inside the ticket to decrypt the + * response */ +@@ -1200,7 +1200,6 @@ protocol_error: + + temporary_error_free_ticket: + kfree(ticket); +-temporary_error_free_resp: + kfree(response); + temporary_error: + /* Ignore the response packet if we got a temporary error such as +diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile +index aa0d0ec6936d..9e95862f2788 100644 +--- a/scripts/gcc-plugins/Makefile ++++ b/scripts/gcc-plugins/Makefile +@@ -11,6 +11,7 @@ else + HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti + HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb + HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable ++ HOST_EXTRACXXFLAGS += -Wno-format-diag + export HOST_EXTRACXXFLAGS + endif + +diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h +index 17f06079a712..9ad76b7f3f10 100644 +--- a/scripts/gcc-plugins/gcc-common.h ++++ b/scripts/gcc-plugins/gcc-common.h +@@ -35,7 +35,9 @@ + #include "ggc.h" + #include "timevar.h" + ++#if BUILDING_GCC_VERSION < 10000 + #include "params.h" ++#endif + + #if BUILDING_GCC_VERSION <= 4009 + #include "pointer-set.h" +@@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l + return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); + } + ++#if BUILDING_GCC_VERSION < 10000 + template <> + template <> + inline bool is_a_helper<const ggoto *>::test(const_gimple gs) +@@ -860,6 +863,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs) + { + return gs->code == GIMPLE_RETURN; + } ++#endif + + static inline gasm *as_a_gasm(gimple stmt) + { +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index 0a57d105cc5b..1ec1e928cc09 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -424,7 +424,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, + */ + error = aa_may_manage_policy(label, ns, mask); + if (error) +- return error; ++ goto end_section; + + data = aa_simple_write_to_buffer(buf, size, size, pos); + error = PTR_ERR(data); +@@ -432,6 +432,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, + error = aa_replace_profiles(ns, label, mask, data); + aa_put_loaddata(data); + } ++end_section: + end_current_label_crit_section(label); + + return error; +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c +index eeaddfe0c0fb..70b9730c0be6 100644 +--- a/security/apparmor/audit.c ++++ b/security/apparmor/audit.c +@@ -201,8 +201,9 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) + rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr, + GFP_KERNEL, true, false); + if (IS_ERR(rule->label)) { ++ int err = PTR_ERR(rule->label); + aa_audit_rule_free(rule); +- return PTR_ERR(rule->label); ++ return err; + } + + *vrule = rule; +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index bdf9e4cefe25..b9d5b3459705 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -1338,6 +1338,7 @@ int aa_change_profile(const char *fqname, int flags) + ctx->nnp = aa_get_label(label); + + if (!fqname || !*fqname) { ++ aa_put_label(label); + AA_DEBUG("no profile name"); + return -EINVAL; + } +@@ -1356,8 +1357,6 @@ int aa_change_profile(const char *fqname, int flags) + op = OP_CHANGE_PROFILE; + } + +- label = aa_get_current_label(); +- + if (*fqname == '&') { + stack = true; + /* don't have label_parse() do stacking */ +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index 6a314fb0d480..f0878d81dcef 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -96,7 +96,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) + algo = hash_algo_name[hash_algo]; + } + +- if (*tfm == NULL) { ++ if (IS_ERR_OR_NULL(*tfm)) { + mutex_lock(&mutex); + if (*tfm) + goto out; +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index f63b4bd45d60..6a6d19ada66a 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -415,7 +415,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + loff_t i_size; + int rc; + struct file *f = file; +- bool new_file_instance = false, modified_flags = false; ++ bool new_file_instance = false, modified_mode = false; + + /* + * For consistency, fail file's opened with the O_DIRECT flag on +@@ -435,13 +435,13 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + f = dentry_open(&file->f_path, flags, file->f_cred); + if (IS_ERR(f)) { + /* +- * Cannot open the file again, lets modify f_flags ++ * Cannot open the file again, lets modify f_mode + * of original and continue + */ + pr_info_ratelimited("Unable to reopen file for reading.\n"); + f = file; +- f->f_flags |= FMODE_READ; +- modified_flags = true; ++ f->f_mode |= FMODE_READ; ++ modified_mode = true; + } else { + new_file_instance = true; + } +@@ -459,8 +459,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + out: + if (new_file_instance) + fput(f); +- else if (modified_flags) +- f->f_flags &= ~FMODE_READ; ++ else if (modified_mode) ++ f->f_mode &= ~FMODE_READ; + return rc; + } + +diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c +index cfb8cc3b975e..604cdac63d84 100644 +--- a/security/integrity/ima/ima_fs.c ++++ b/security/integrity/ima/ima_fs.c +@@ -343,8 +343,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf, + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, + "policy_update", "signed policy required", + 1, 0); +- if (ima_appraise & IMA_APPRAISE_ENFORCE) +- result = -EACCES; ++ result = -EACCES; + } else { + result = ima_parse_add_rule(data); + } +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index ad52126d3d22..56295936387c 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -438,6 +438,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, + + no_delta_check: + if (runtime->status->hw_ptr == new_hw_ptr) { ++ runtime->hw_ptr_jiffies = curr_jiffies; + update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); + return 0; + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index ffe1340890c9..09a37d4c81ec 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2443,6 +2443,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), ++ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), +@@ -2458,6 +2459,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), +diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c +index f1fe497c2f9d..7702b62dc18b 100644 +--- a/sound/pci/ice1712/ice1712.c ++++ b/sound/pci/ice1712/ice1712.c +@@ -2377,7 +2377,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice) + pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]); + pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]); + pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]); +- if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) { ++ if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 && ++ ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) { + ice->gpio.write_mask = ice->eeprom.gpiomask; + ice->gpio.direction = ice->eeprom.gpiodir; + snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK,