commit:     b4205fc874c8fda736c920edbb6ec18d708ecb78
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 16 12:26:54 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 16 12:26:54 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b4205fc8

Linux patch 5.2.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1014_linux-5.2.15.patch | 1595 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1599 insertions(+)

diff --git a/0000_README b/0000_README
index 6458e28..e8d3287 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-5.2.14.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.2.14
 
+Patch:  1014_linux-5.2.15.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.2.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-5.2.15.patch b/1014_linux-5.2.15.patch
new file mode 100644
index 0000000..19f9e8e
--- /dev/null
+++ b/1014_linux-5.2.15.patch
@@ -0,0 +1,1595 @@
+diff --git a/Makefile b/Makefile
+index d019994462ba..3c977aa66650 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index f0fbbf6a6a1f..4f9bfe9fd960 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct 
task_struct *tsk)
+       }
+ }
+ 
+-static bool tm_active_with_fp(struct task_struct *tsk)
+-{
+-      return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+-              (tsk->thread.ckpt_regs.msr & MSR_FP);
+-}
+-
+-static bool tm_active_with_altivec(struct task_struct *tsk)
+-{
+-      return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+-              (tsk->thread.ckpt_regs.msr & MSR_VEC);
+-}
+ #else
+ static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; 
}
+-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return 
false; }
+ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+ 
+ bool strict_msr_control;
+@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
+ 
+ static int restore_fp(struct task_struct *tsk)
+ {
+-      if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
++      if (tsk->thread.load_fp) {
+               load_fp_state(&current->thread.fp_state);
+               current->thread.load_fp++;
+               return 1;
+@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
+ 
+ static int restore_altivec(struct task_struct *tsk)
+ {
+-      if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+-              (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
++      if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
+               load_vr_state(&tsk->thread.vr_state);
+               tsk->thread.used_vr = 1;
+               tsk->thread.load_vec++;
+@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
+       if (!tsk->thread.regs)
+               return;
+ 
++      check_if_tm_restore_required(tsk);
++
+       usermsr = tsk->thread.regs->msr;
+ 
+       if ((usermsr & msr_all_available) == 0)
+               return;
+ 
+       msr_check_and_set(msr_all_available);
+-      check_if_tm_restore_required(tsk);
+ 
+       WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & 
MSR_VEC)));
+ 
+diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
+index d4acf6fa0596..bf60983a58c7 100644
+--- a/arch/powerpc/mm/nohash/tlb.c
++++ b/arch/powerpc/mm/nohash/tlb.c
+@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
+ #ifdef CONFIG_PPC_FSL_BOOK3E
+       if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+               unsigned int num_cams;
+-              int __maybe_unused cpu = smp_processor_id();
+               bool map = true;
+ 
+               /* use a quarter of the TLBCAM for bolted linear map */
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index cfe827cefad8..96d42f571a18 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data 
*d)
+       u8 new_irqs;
+       int level, i;
+       u8 invert_irq_mask[MAX_BANK];
+-      int reg_direction[MAX_BANK];
++      u8 reg_direction[MAX_BANK];
+ 
+-      regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+-                       NBANK(chip));
++      pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+ 
+       if (chip->driver_data & PCA_PCAL) {
+               /* Enable latch on interrupt-enabled inputs */
+@@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, 
u8 *pending)
+       bool pending_seen = false;
+       bool trigger_seen = false;
+       u8 trigger[MAX_BANK];
+-      int reg_direction[MAX_BANK];
++      u8 reg_direction[MAX_BANK];
+       int ret, i;
+ 
+       if (chip->driver_data & PCA_PCAL) {
+@@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, 
u8 *pending)
+               return false;
+ 
+       /* Remove output pins from the equation */
+-      regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+-                       NBANK(chip));
++      pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+       for (i = 0; i < NBANK(chip); i++)
+               cur_stat[i] &= reg_direction[i];
+ 
+@@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
+ {
+       struct i2c_client *client = chip->client;
+       struct irq_chip *irq_chip = &chip->irq_chip;
+-      int reg_direction[MAX_BANK];
++      u8 reg_direction[MAX_BANK];
+       int ret, i;
+ 
+       if (!client->irq)
+@@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
+        * interrupt.  We have to rely on the previous read for
+        * this purpose.
+        */
+-      regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+-                       NBANK(chip));
++      pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+       for (i = 0; i < NBANK(chip); i++)
+               chip->irq_stat[i] &= reg_direction[i];
+       mutex_init(&chip->irq_lock);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 13d6bd4e17b2..cf748b80e640 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2510,6 +2510,13 @@ enum i915_power_well_id {
+ #define   RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+ 
+ #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
++#define   RING_FORCE_TO_NONPRIV_RW            (0 << 28)    /* CFL+ & Gen11+ */
++#define   RING_FORCE_TO_NONPRIV_RD            (1 << 28)
++#define   RING_FORCE_TO_NONPRIV_WR            (2 << 28)
++#define   RING_FORCE_TO_NONPRIV_RANGE_1               (0 << 0)     /* CFL+ & 
Gen11+ */
++#define   RING_FORCE_TO_NONPRIV_RANGE_4               (1 << 0)
++#define   RING_FORCE_TO_NONPRIV_RANGE_16      (2 << 0)
++#define   RING_FORCE_TO_NONPRIV_RANGE_64      (3 << 0)
+ #define   RING_MAX_NONPRIV_SLOTS  12
+ 
+ #define GEN7_TLB_RD_ADDR      _MMIO(0x4700)
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c 
b/drivers/gpu/drm/i915/intel_cdclk.c
+index ae40a8679314..fd5236da039f 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2269,6 +2269,17 @@ int intel_crtc_compute_min_cdclk(const struct 
intel_crtc_state *crtc_state)
+       if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+               min_cdclk = max(2 * 96000, min_cdclk);
+ 
++      /*
++       * "For DP audio configuration, cdclk frequency shall be set to
++       *  meet the following requirements:
++       *  DP Link Frequency(MHz) | Cdclk frequency(MHz)
++       *  270                    | 320 or higher
++       *  162                    | 200 or higher"
++       */
++      if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
++          intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
++              min_cdclk = max(crtc_state->port_clock, min_cdclk);
++
+       /*
+        * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+        * than 320000KHz.
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c 
b/drivers/gpu/drm/i915/intel_workarounds.c
+index 841b8e515f4d..edd57a5e0495 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -981,7 +981,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private 
*i915,
+ }
+ 
+ static void
+-whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
+ {
+       struct i915_wa wa = {
+               .reg = reg
+@@ -990,9 +990,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+       if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+               return;
+ 
++      wa.reg.reg |= flags;
+       _wa_add(wal, &wa);
+ }
+ 
++static void
++whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++{
++      whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
++}
++
+ static void gen9_whitelist_build(struct i915_wa_list *w)
+ {
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+@@ -1005,56 +1012,131 @@ static void gen9_whitelist_build(struct i915_wa_list 
*w)
+       whitelist_reg(w, GEN8_HDC_CHICKEN1);
+ }
+ 
+-static void skl_whitelist_build(struct i915_wa_list *w)
++static void skl_whitelist_build(struct intel_engine_cs *engine)
+ {
++      struct i915_wa_list *w = &engine->whitelist;
++
++      if (engine->class != RENDER_CLASS)
++              return;
++
+       gen9_whitelist_build(w);
+ 
+       /* WaDisableLSQCROPERFforOCL:skl */
+       whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+ 
+-static void bxt_whitelist_build(struct i915_wa_list *w)
++static void bxt_whitelist_build(struct intel_engine_cs *engine)
+ {
+-      gen9_whitelist_build(w);
++      if (engine->class != RENDER_CLASS)
++              return;
++
++      gen9_whitelist_build(&engine->whitelist);
+ }
+ 
+-static void kbl_whitelist_build(struct i915_wa_list *w)
++static void kbl_whitelist_build(struct intel_engine_cs *engine)
+ {
++      struct i915_wa_list *w = &engine->whitelist;
++
++      if (engine->class != RENDER_CLASS)
++              return;
++
+       gen9_whitelist_build(w);
+ 
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+ 
+-static void glk_whitelist_build(struct i915_wa_list *w)
++static void glk_whitelist_build(struct intel_engine_cs *engine)
+ {
++      struct i915_wa_list *w = &engine->whitelist;
++
++      if (engine->class != RENDER_CLASS)
++              return;
++
+       gen9_whitelist_build(w);
+ 
+       /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+       whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ }
+ 
+-static void cfl_whitelist_build(struct i915_wa_list *w)
++static void cfl_whitelist_build(struct intel_engine_cs *engine)
+ {
++      struct i915_wa_list *w = &engine->whitelist;
++
++      if (engine->class != RENDER_CLASS)
++              return;
++
+       gen9_whitelist_build(w);
++
++      /*
++       * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
++       *
++       * This covers 4 register which are next to one another :
++       *   - PS_INVOCATION_COUNT
++       *   - PS_INVOCATION_COUNT_UDW
++       *   - PS_DEPTH_COUNT
++       *   - PS_DEPTH_COUNT_UDW
++       */
++      whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++                        RING_FORCE_TO_NONPRIV_RD |
++                        RING_FORCE_TO_NONPRIV_RANGE_4);
+ }
+ 
+-static void cnl_whitelist_build(struct i915_wa_list *w)
++static void cnl_whitelist_build(struct intel_engine_cs *engine)
+ {
++      struct i915_wa_list *w = &engine->whitelist;
++
++      if (engine->class != RENDER_CLASS)
++              return;
++
+       /* WaEnablePreemptionGranularityControlByUMD:cnl */
+       whitelist_reg(w, GEN8_CS_CHICKEN1);
+ }
+ 
+-static void icl_whitelist_build(struct i915_wa_list *w)
++static void icl_whitelist_build(struct intel_engine_cs *engine)
+ {
+-      /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+-      whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
++      struct i915_wa_list *w = &engine->whitelist;
+ 
+-      /* WaAllowUMDToModifySamplerMode:icl */
+-      whitelist_reg(w, GEN10_SAMPLER_MODE);
++      switch (engine->class) {
++      case RENDER_CLASS:
++              /* WaAllowUMDToModifyHalfSliceChicken7:icl */
++              whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+ 
+-      /* WaEnableStateCacheRedirectToCS:icl */
+-      whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++              /* WaAllowUMDToModifySamplerMode:icl */
++              whitelist_reg(w, GEN10_SAMPLER_MODE);
++
++              /* WaEnableStateCacheRedirectToCS:icl */
++              whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++
++              /*
++               * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
++               *
++               * This covers 4 register which are next to one another :
++               *   - PS_INVOCATION_COUNT
++               *   - PS_INVOCATION_COUNT_UDW
++               *   - PS_DEPTH_COUNT
++               *   - PS_DEPTH_COUNT_UDW
++               */
++              whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++                                RING_FORCE_TO_NONPRIV_RD |
++                                RING_FORCE_TO_NONPRIV_RANGE_4);
++              break;
++
++      case VIDEO_DECODE_CLASS:
++              /* hucStatusRegOffset */
++              whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
++                                RING_FORCE_TO_NONPRIV_RD);
++              /* hucUKernelHdrInfoRegOffset */
++              whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
++                                RING_FORCE_TO_NONPRIV_RD);
++              /* hucStatus2RegOffset */
++              whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
++                                RING_FORCE_TO_NONPRIV_RD);
++              break;
++
++      default:
++              break;
++      }
+ }
+ 
+ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+@@ -1062,24 +1144,22 @@ void intel_engine_init_whitelist(struct 
intel_engine_cs *engine)
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_wa_list *w = &engine->whitelist;
+ 
+-      GEM_BUG_ON(engine->id != RCS0);
+-
+       wa_init_start(w, "whitelist");
+ 
+       if (IS_GEN(i915, 11))
+-              icl_whitelist_build(w);
++              icl_whitelist_build(engine);
+       else if (IS_CANNONLAKE(i915))
+-              cnl_whitelist_build(w);
++              cnl_whitelist_build(engine);
+       else if (IS_COFFEELAKE(i915))
+-              cfl_whitelist_build(w);
++              cfl_whitelist_build(engine);
+       else if (IS_GEMINILAKE(i915))
+-              glk_whitelist_build(w);
++              glk_whitelist_build(engine);
+       else if (IS_KABYLAKE(i915))
+-              kbl_whitelist_build(w);
++              kbl_whitelist_build(engine);
+       else if (IS_BROXTON(i915))
+-              bxt_whitelist_build(w);
++              bxt_whitelist_build(engine);
+       else if (IS_SKYLAKE(i915))
+-              skl_whitelist_build(w);
++              skl_whitelist_build(engine);
+       else if (INTEL_GEN(i915) <= 8)
+               return;
+       else
+@@ -1167,8 +1247,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, 
struct i915_wa_list *wal)
+               if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+                       wa_write_or(wal,
+                                   GEN7_SARCHKMD,
+-                                  GEN7_DISABLE_DEMAND_PREFETCH |
+-                                  GEN7_DISABLE_SAMPLER_PREFETCH);
++                                  GEN7_DISABLE_DEMAND_PREFETCH);
++
++              /* Wa_1606682166:icl */
++              wa_write_or(wal,
++                          GEN7_SARCHKMD,
++                          GEN7_DISABLE_SAMPLER_PREFETCH);
+       }
+ 
+       if (IS_GEN_RANGE(i915, 9, 11)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+index 84a2f243ed9b..4695f1c8e33f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+index 59e9d05ab928..0af048d1a815 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void 
**msg,
+                                    !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
+               if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
+                       kfree(reply);
+-
++                      reply = NULL;
+                       if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+                               /* A checkpoint occurred. Retry. */
+                               continue;
+@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void 
**msg,
+ 
+               if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+                       kfree(reply);
+-
++                      reply = NULL;
+                       if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
+                               /* A checkpoint occurred. Retry. */
+                               continue;
+@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void 
**msg,
+               break;
+       }
+ 
+-      if (retries == RETRIES) {
+-              kfree(reply);
++      if (!reply)
+               return -EINVAL;
+-      }
+ 
+       *msg_len = reply_len;
+       *msg     = reply;
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 7c8cfb149da0..5c0d90418e8c 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1830,23 +1830,13 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct 
hfi1_opa_header *opah)
+       }
+ 
+       while (qp->s_last != qp->s_acked) {
+-              u32 s_last;
+-
+               wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+               if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+                   cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+                       break;
+               trdma_clean_swqe(qp, wqe);
+-              rvt_qp_wqe_unreserve(qp, wqe);
+-              s_last = qp->s_last;
+-              trace_hfi1_qp_send_completion(qp, wqe, s_last);
+-              if (++s_last >= qp->s_size)
+-                      s_last = 0;
+-              qp->s_last = s_last;
+-              /* see post_send() */
+-              barrier();
+-              rvt_put_qp_swqe(qp, wqe);
+-              rvt_qp_swqe_complete(qp,
++              trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++              rvt_qp_complete_swqe(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
+@@ -1890,19 +1880,9 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+       trace_hfi1_rc_completion(qp, wqe->lpsn);
+       if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+           cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+-              u32 s_last;
+-
+               trdma_clean_swqe(qp, wqe);
+-              rvt_put_qp_swqe(qp, wqe);
+-              rvt_qp_wqe_unreserve(qp, wqe);
+-              s_last = qp->s_last;
+-              trace_hfi1_qp_send_completion(qp, wqe, s_last);
+-              if (++s_last >= qp->s_size)
+-                      s_last = 0;
+-              qp->s_last = s_last;
+-              /* see post_send() */
+-              barrier();
+-              rvt_qp_swqe_complete(qp,
++              trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++              rvt_qp_complete_swqe(qp,
+                                    wqe,
+                                    ib_hfi1_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c 
b/drivers/infiniband/hw/qib/qib_rc.c
+index 2ac4c67f5ba1..8d9a94d6f685 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -921,20 +921,11 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct 
ib_header *hdr)
+               rvt_add_retry_timer(qp);
+ 
+       while (qp->s_last != qp->s_acked) {
+-              u32 s_last;
+-
+               wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+               if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+                   qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+                       break;
+-              s_last = qp->s_last;
+-              if (++s_last >= qp->s_size)
+-                      s_last = 0;
+-              qp->s_last = s_last;
+-              /* see post_send() */
+-              barrier();
+-              rvt_put_qp_swqe(qp, wqe);
+-              rvt_qp_swqe_complete(qp,
++              rvt_qp_complete_swqe(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
+@@ -972,21 +963,12 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp 
*qp,
+        * is finished.
+        */
+       if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+-          qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+-              u32 s_last;
+-
+-              rvt_put_qp_swqe(qp, wqe);
+-              s_last = qp->s_last;
+-              if (++s_last >= qp->s_size)
+-                      s_last = 0;
+-              qp->s_last = s_last;
+-              /* see post_send() */
+-              barrier();
+-              rvt_qp_swqe_complete(qp,
++          qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
++              rvt_qp_complete_swqe(qp,
+                                    wqe,
+                                    ib_qib_wc_opcode[wqe->wr.opcode],
+                                    IB_WC_SUCCESS);
+-      } else
++      else
+               this_cpu_inc(*ibp->rvp.rc_delayed_comp);
+ 
+       qp->s_retry = qp->s_retry_cnt;
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c 
b/drivers/infiniband/sw/rdmavt/qp.c
+index c5a50614a6c6..cb9e171d7e7b 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -1856,10 +1856,9 @@ static inline int rvt_qp_is_avail(
+ 
+       /* see rvt_qp_wqe_unreserve() */
+       smp_mb__before_atomic();
+-      reserved_used = atomic_read(&qp->s_reserved_used);
+       if (unlikely(reserved_op)) {
+               /* see rvt_qp_wqe_unreserve() */
+-              smp_mb__before_atomic();
++              reserved_used = atomic_read(&qp->s_reserved_used);
+               if (reserved_used >= rdi->dparms.reserved_operations)
+                       return -ENOMEM;
+               return 0;
+@@ -1867,14 +1866,13 @@ static inline int rvt_qp_is_avail(
+       /* non-reserved operations */
+       if (likely(qp->s_avail))
+               return 0;
+-      slast = READ_ONCE(qp->s_last);
++      /* See rvt_qp_complete_swqe() */
++      slast = smp_load_acquire(&qp->s_last);
+       if (qp->s_head >= slast)
+               avail = qp->s_size - (qp->s_head - slast);
+       else
+               avail = slast - qp->s_head;
+ 
+-      /* see rvt_qp_wqe_unreserve() */
+-      smp_mb__before_atomic();
+       reserved_used = atomic_read(&qp->s_reserved_used);
+       avail =  avail - 1 -
+               (rdi->dparms.reserved_operations - reserved_used);
+@@ -2667,27 +2665,16 @@ void rvt_send_complete(struct rvt_qp *qp, struct 
rvt_swqe *wqe,
+                      enum ib_wc_status status)
+ {
+       u32 old_last, last;
+-      struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
++      struct rvt_dev_info *rdi;
+ 
+       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+               return;
++      rdi = ib_to_rvt(qp->ibqp.device);
+ 
+-      last = qp->s_last;
+-      old_last = last;
+-      trace_rvt_qp_send_completion(qp, wqe, last);
+-      if (++last >= qp->s_size)
+-              last = 0;
+-      trace_rvt_qp_send_completion(qp, wqe, last);
+-      qp->s_last = last;
+-      /* See post_send() */
+-      barrier();
+-      rvt_put_qp_swqe(qp, wqe);
+-
+-      rvt_qp_swqe_complete(qp,
+-                           wqe,
+-                           rdi->wc_opcode[wqe->wr.opcode],
+-                           status);
+-
++      old_last = qp->s_last;
++      trace_rvt_qp_send_completion(qp, wqe, old_last);
++      last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
++                                  status);
+       if (qp->s_acked == old_last)
+               qp->s_acked = last;
+       if (qp->s_cur == old_last)
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 773f5fdad25f..5cf3247e8afb 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -35,7 +35,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/sched/clock.h>
+ #include <linux/rculist.h>
+-
++#include <linux/delay.h>
+ #include <trace/events/bcache.h>
+ 
+ /*
+@@ -655,7 +655,25 @@ static int mca_reap(struct btree *b, unsigned int 
min_order, bool flush)
+               up(&b->io_mutex);
+       }
+ 
++retry:
++      /*
++       * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
++       * __bch_btree_node_write(). To avoid an extra flush, acquire
++       * b->write_lock before checking BTREE_NODE_dirty bit.
++       */
+       mutex_lock(&b->write_lock);
++      /*
++       * If this btree node is selected in btree_flush_write() by journal
++       * code, delay and retry until the node is flushed by journal code
++       * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
++       */
++      if (btree_node_journal_flush(b)) {
++              pr_debug("bnode %p is flushing by journal, retry", b);
++              mutex_unlock(&b->write_lock);
++              udelay(1);
++              goto retry;
++      }
++
+       if (btree_node_dirty(b))
+               __bch_btree_node_write(b, &cl);
+       mutex_unlock(&b->write_lock);
+@@ -778,10 +796,15 @@ void bch_btree_cache_free(struct cache_set *c)
+       while (!list_empty(&c->btree_cache)) {
+               b = list_first_entry(&c->btree_cache, struct btree, list);
+ 
+-              if (btree_node_dirty(b))
++              /*
++               * This function is called by cache_set_free(), no I/O
++               * request on cache now, it is unnecessary to acquire
++               * b->write_lock before clearing BTREE_NODE_dirty anymore.
++               */
++              if (btree_node_dirty(b)) {
+                       btree_complete_write(b, btree_current_write(b));
+-              clear_bit(BTREE_NODE_dirty, &b->flags);
+-
++                      clear_bit(BTREE_NODE_dirty, &b->flags);
++              }
+               mca_data_free(b);
+       }
+ 
+@@ -1067,11 +1090,25 @@ static void btree_node_free(struct btree *b)
+ 
+       BUG_ON(b == b->c->root);
+ 
++retry:
+       mutex_lock(&b->write_lock);
++      /*
++       * If the btree node is selected and flushing in btree_flush_write(),
++       * delay and retry until the BTREE_NODE_journal_flush bit cleared,
++       * then it is safe to free the btree node here. Otherwise this btree
++       * node will be in race condition.
++       */
++      if (btree_node_journal_flush(b)) {
++              mutex_unlock(&b->write_lock);
++              pr_debug("bnode %p journal_flush set, retry", b);
++              udelay(1);
++              goto retry;
++      }
+ 
+-      if (btree_node_dirty(b))
++      if (btree_node_dirty(b)) {
+               btree_complete_write(b, btree_current_write(b));
+-      clear_bit(BTREE_NODE_dirty, &b->flags);
++              clear_bit(BTREE_NODE_dirty, &b->flags);
++      }
+ 
+       mutex_unlock(&b->write_lock);
+ 
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index d1c72ef64edf..76cfd121a486 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -158,11 +158,13 @@ enum btree_flags {
+       BTREE_NODE_io_error,
+       BTREE_NODE_dirty,
+       BTREE_NODE_write_idx,
++      BTREE_NODE_journal_flush,
+ };
+ 
+ BTREE_FLAG(io_error);
+ BTREE_FLAG(dirty);
+ BTREE_FLAG(write_idx);
++BTREE_FLAG(journal_flush);
+ 
+ static inline struct btree_write *btree_current_write(struct btree *b)
+ {
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index cae2aff5e27a..33556acdcf9c 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -405,6 +405,7 @@ static void btree_flush_write(struct cache_set *c)
+ retry:
+       best = NULL;
+ 
++      mutex_lock(&c->bucket_lock);
+       for_each_cached_btree(b, c, i)
+               if (btree_current_write(b)->journal) {
+                       if (!best)
+@@ -417,9 +418,14 @@ retry:
+               }
+ 
+       b = best;
++      if (b)
++              set_btree_node_journal_flush(b);
++      mutex_unlock(&c->bucket_lock);
++
+       if (b) {
+               mutex_lock(&b->write_lock);
+               if (!btree_current_write(b)->journal) {
++                      clear_bit(BTREE_NODE_journal_flush, &b->flags);
+                       mutex_unlock(&b->write_lock);
+                       /* We raced */
+                       atomic_long_inc(&c->retry_flush_write);
+@@ -427,6 +433,7 @@ retry:
+               }
+ 
+               __bch_btree_node_write(b, NULL);
++              clear_bit(BTREE_NODE_journal_flush, &b->flags);
+               mutex_unlock(&b->write_lock);
+       }
+ }
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b3a130a9ee23..1604f512c7bd 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
+ 
+       sdhci_acpi_byt_setting(&c->pdev->dev);
+ 
+-      return sdhci_runtime_resume_host(c->host);
++      return sdhci_runtime_resume_host(c->host, 0);
+ }
+ 
+ #endif
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c 
b/drivers/mmc/host/sdhci-esdhc-imx.c
+index c391510e9ef4..776a94216248 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1705,7 +1705,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
+               esdhc_pltfm_set_clock(host, imx_data->actual_clock);
+       }
+ 
+-      err = sdhci_runtime_resume_host(host);
++      err = sdhci_runtime_resume_host(host, 0);
+       if (err)
+               goto disable_ipg_clk;
+ 
+diff --git a/drivers/mmc/host/sdhci-of-at91.c 
b/drivers/mmc/host/sdhci-of-at91.c
+index d4993582f0f6..e7d1920729fb 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
+       }
+ 
+ out:
+-      return sdhci_runtime_resume_host(host);
++      return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif /* CONFIG_PM */
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c 
b/drivers/mmc/host/sdhci-pci-core.c
+index 4154ee11b47d..267b90374fa4 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct 
sdhci_pci_chip *chip)
+ 
+ err_pci_runtime_suspend:
+       while (--i >= 0)
+-              sdhci_runtime_resume_host(chip->slots[i]->host);
++              sdhci_runtime_resume_host(chip->slots[i]->host, 0);
+       return ret;
+ }
+ 
+@@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct 
sdhci_pci_chip *chip)
+               if (!slot)
+                       continue;
+ 
+-              ret = sdhci_runtime_resume_host(slot->host);
++              ret = sdhci_runtime_resume_host(slot->host, 0);
+               if (ret)
+                       return ret;
+       }
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 3ddecf479295..e55037ceda73 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
+       if (!IS_ERR(pxa->clk_core))
+               clk_prepare_enable(pxa->clk_core);
+ 
+-      return sdhci_runtime_resume_host(host);
++      return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif
+ 
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 8e4a8ba33f05..f5753aef7151 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -745,7 +745,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
+       clk_prepare_enable(busclk);
+       if (ourhost->cur_clk >= 0)
+               clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
+-      ret = sdhci_runtime_resume_host(host);
++      ret = sdhci_runtime_resume_host(host, 0);
+       return ret;
+ }
+ #endif
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index fc892a8d882f..53f3af53b3fb 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -497,7 +497,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
+               return ret;
+       }
+ 
+-      sdhci_runtime_resume_host(host);
++      sdhci_runtime_resume_host(host, 1);
+ 
+       return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 8a18f14cf842..1dea1ba66f7b 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev)
+               priv->restore_needed = false;
+       }
+ 
+-      ret = sdhci_runtime_resume_host(host);
++      ret = sdhci_runtime_resume_host(host, 0);
+       if (ret)
+               goto out;
+       return 0;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 59acf8e3331e..a5dc5aae973e 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3320,7 +3320,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+ 
+-int sdhci_runtime_resume_host(struct sdhci_host *host)
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
+ {
+       struct mmc_host *mmc = host->mmc;
+       unsigned long flags;
+@@ -3331,7 +3331,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
+                       host->ops->enable_dma(host);
+       }
+ 
+-      sdhci_init(host, 0);
++      sdhci_init(host, soft_reset);
+ 
+       if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
+           mmc->ios.power_mode != MMC_POWER_OFF) {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 199712e7adbb..d2c7c9c436c9 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -781,7 +781,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void 
**desc,
+ int sdhci_suspend_host(struct sdhci_host *host);
+ int sdhci_resume_host(struct sdhci_host *host);
+ int sdhci_runtime_suspend_host(struct sdhci_host *host);
+-int sdhci_runtime_resume_host(struct sdhci_host *host);
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+ #endif
+ 
+ void sdhci_cqe_enable(struct mmc_host *mmc);
+diff --git a/drivers/s390/virtio/virtio_ccw.c 
b/drivers/s390/virtio/virtio_ccw.c
+index 6a3076881321..8d47ad61bac3 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -132,6 +132,7 @@ struct airq_info {
+       struct airq_iv *aiv;
+ };
+ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
++static DEFINE_MUTEX(airq_areas_lock);
+ 
+ #define CCW_CMD_SET_VQ 0x13
+ #define CCW_CMD_VDEV_RESET 0x33
+@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue 
*vqs[], int nvqs,
+       unsigned long bit, flags;
+ 
+       for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
++              mutex_lock(&airq_areas_lock);
+               if (!airq_areas[i])
+                       airq_areas[i] = new_airq_info();
+               info = airq_areas[i];
++              mutex_unlock(&airq_areas_lock);
+               if (!info)
+                       return 0;
+               write_lock_irqsave(&info->lock, flags);
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c 
b/drivers/usb/chipidea/ci_hdrc_imx.c
+index ceec8d5985d4..5faae96735e6 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -13,6 +13,7 @@
+ #include <linux/usb/of.h>
+ #include <linux/clk.h>
+ #include <linux/pinctrl/consumer.h>
++#include <linux/pm_qos.h>
+ 
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+@@ -63,6 +64,11 @@ static const struct ci_hdrc_imx_platform_flag 
imx7d_usb_data = {
+       .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+ };
+ 
++static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
++      .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++              CI_HDRC_PMQOS,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+       { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+       { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -72,6 +78,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+       { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
+       { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+       { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
++      { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
+       { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+@@ -93,6 +100,8 @@ struct ci_hdrc_imx_data {
+       struct clk *clk_ahb;
+       struct clk *clk_per;
+       /* --------------------------------- */
++      struct pm_qos_request pm_qos_req;
++      const struct ci_hdrc_imx_platform_flag *plat_data;
+ };
+ 
+ /* Common functions shared by usbmisc drivers */
+@@ -309,6 +318,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+       if (!data)
+               return -ENOMEM;
+ 
++      data->plat_data = imx_platform_flag;
++      pdata.flags |= imx_platform_flag->flags;
+       platform_set_drvdata(pdev, data);
+       data->usbmisc_data = usbmisc_get_init_data(dev);
+       if (IS_ERR(data->usbmisc_data))
+@@ -369,6 +380,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+                       }
+               }
+       }
++
++      if (pdata.flags & CI_HDRC_PMQOS)
++              pm_qos_add_request(&data->pm_qos_req,
++                      PM_QOS_CPU_DMA_LATENCY, 0);
++
+       ret = imx_get_clks(dev);
+       if (ret)
+               goto disable_hsic_regulator;
+@@ -396,7 +412,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+               usb_phy_init(pdata.usb_phy);
+       }
+ 
+-      pdata.flags |= imx_platform_flag->flags;
+       if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
+               data->supports_runtime_pm = true;
+ 
+@@ -438,7 +453,11 @@ err_clk:
+       imx_disable_unprepare_clks(dev);
+ disable_hsic_regulator:
+       if (data->hsic_pad_regulator)
+-              ret = regulator_disable(data->hsic_pad_regulator);
++              /* don't overwrite original ret (cf. EPROBE_DEFER) */
++              regulator_disable(data->hsic_pad_regulator);
++      if (pdata.flags & CI_HDRC_PMQOS)
++              pm_qos_remove_request(&data->pm_qos_req);
++      data->ci_pdev = NULL;
+       return ret;
+ }
+ 
+@@ -451,12 +470,17 @@ static int ci_hdrc_imx_remove(struct platform_device 
*pdev)
+               pm_runtime_disable(&pdev->dev);
+               pm_runtime_put_noidle(&pdev->dev);
+       }
+-      ci_hdrc_remove_device(data->ci_pdev);
++      if (data->ci_pdev)
++              ci_hdrc_remove_device(data->ci_pdev);
+       if (data->override_phy_control)
+               usb_phy_shutdown(data->phy);
+-      imx_disable_unprepare_clks(&pdev->dev);
+-      if (data->hsic_pad_regulator)
+-              regulator_disable(data->hsic_pad_regulator);
++      if (data->ci_pdev) {
++              imx_disable_unprepare_clks(&pdev->dev);
++              if (data->plat_data->flags & CI_HDRC_PMQOS)
++                      pm_qos_remove_request(&data->pm_qos_req);
++              if (data->hsic_pad_regulator)
++                      regulator_disable(data->hsic_pad_regulator);
++      }
+ 
+       return 0;
+ }
+@@ -480,6 +504,9 @@ static int __maybe_unused imx_controller_suspend(struct 
device *dev)
+       }
+ 
+       imx_disable_unprepare_clks(dev);
++      if (data->plat_data->flags & CI_HDRC_PMQOS)
++              pm_qos_remove_request(&data->pm_qos_req);
++
+       data->in_lpm = true;
+ 
+       return 0;
+@@ -497,6 +524,10 @@ static int __maybe_unused imx_controller_resume(struct 
device *dev)
+               return 0;
+       }
+ 
++      if (data->plat_data->flags & CI_HDRC_PMQOS)
++              pm_qos_add_request(&data->pm_qos_req,
++                      PM_QOS_CPU_DMA_LATENCY, 0);
++
+       ret = imx_prepare_enable_clks(dev);
+       if (ret)
+               return ret;
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c 
b/drivers/usb/chipidea/usbmisc_imx.c
+index d8b67e150b12..b7a5727d0c8a 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -763,6 +763,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+               .compatible = "fsl,imx7d-usbmisc",
+               .data = &imx7d_usbmisc_ops,
+       },
++      {
++              .compatible = "fsl,imx7ulp-usbmisc",
++              .data = &imx7d_usbmisc_ops,
++      },
+       { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
+diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
+index 9e90e969af55..7804869c6a31 100644
+--- a/drivers/vhost/test.c
++++ b/drivers/vhost/test.c
+@@ -22,6 +22,12 @@
+  * Using this limit prevents one virtqueue from starving others. */
+ #define VHOST_TEST_WEIGHT 0x80000
+ 
++/* Max number of packets transferred before requeueing the job.
++ * Using this limit prevents one virtqueue from starving others with
++ * pkts.
++ */
++#define VHOST_TEST_PKT_WEIGHT 256
++
+ enum {
+       VHOST_TEST_VQ = 0,
+       VHOST_TEST_VQ_MAX = 1,
+@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
+               }
+               vhost_add_used_and_signal(&n->dev, vq, head, 0);
+               total_len += len;
+-              if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
+-                      vhost_poll_queue(&vq->poll);
++              if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
+                       break;
+-              }
+       }
+ 
+       mutex_unlock(&vq->mutex);
+@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct 
file *f)
+       dev = &n->dev;
+       vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
+       n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
+-      vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
++      vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
++                     VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
+ 
+       f->private_data = n;
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index e995c12d8e24..fcd8bf2846fc 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2072,7 +2072,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
+               /* If this is an input descriptor, increment that count. */
+               if (access == VHOST_ACCESS_WO) {
+                       *in_num += ret;
+-                      if (unlikely(log)) {
++                      if (unlikely(log && ret)) {
+                               log[*log_num].addr = vhost64_to_cpu(vq, 
desc.addr);
+                               log[*log_num].len = vhost32_to_cpu(vq, 
desc.len);
+                               ++*log_num;
+@@ -2215,7 +2215,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+                       /* If this is an input descriptor,
+                        * increment that count. */
+                       *in_num += ret;
+-                      if (unlikely(log)) {
++                      if (unlikely(log && ret)) {
+                               log[*log_num].addr = vhost64_to_cpu(vq, 
desc.addr);
+                               log[*log_num].len = vhost32_to_cpu(vq, 
desc.len);
+                               ++*log_num;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index db337e53aab3..93900ff87df7 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3591,6 +3591,13 @@ void wait_on_extent_buffer_writeback(struct 
extent_buffer *eb)
+                      TASK_UNINTERRUPTIBLE);
+ }
+ 
++static void end_extent_buffer_writeback(struct extent_buffer *eb)
++{
++      clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
++      smp_mb__after_atomic();
++      wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
++}
++
+ /*
+  * Lock eb pages and flush the bio if we can't the locks
+  *
+@@ -3662,8 +3669,11 @@ static noinline_for_stack int 
lock_extent_buffer_for_io(struct extent_buffer *eb
+ 
+               if (!trylock_page(p)) {
+                       if (!flush) {
+-                              ret = flush_write_bio(epd);
+-                              if (ret < 0) {
++                              int err;
++
++                              err = flush_write_bio(epd);
++                              if (err < 0) {
++                                      ret = err;
+                                       failed_page_nr = i;
+                                       goto err_unlock;
+                               }
+@@ -3678,16 +3688,23 @@ err_unlock:
+       /* Unlock already locked pages */
+       for (i = 0; i < failed_page_nr; i++)
+               unlock_page(eb->pages[i]);
++      /*
++       * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
++       * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
++       * be made and undo everything done before.
++       */
++      btrfs_tree_lock(eb);
++      spin_lock(&eb->refs_lock);
++      set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
++      end_extent_buffer_writeback(eb);
++      spin_unlock(&eb->refs_lock);
++      percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
++                               fs_info->dirty_metadata_batch);
++      btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
++      btrfs_tree_unlock(eb);
+       return ret;
+ }
+ 
+-static void end_extent_buffer_writeback(struct extent_buffer *eb)
+-{
+-      clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+-      smp_mb__after_atomic();
+-      wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+-}
+-
+ static void set_btree_ioerr(struct page *page)
+ {
+       struct extent_buffer *eb = (struct extent_buffer *)page->private;
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 911e05af671e..edd89b7c8f18 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -61,6 +61,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_OVERRIDE_PHY_CONTROL  BIT(12) /* Glue layer manages phy */
+ #define CI_HDRC_REQUIRES_ALIGNED_DMA  BIT(13)
+ #define CI_HDRC_IMX_IS_HSIC           BIT(14)
++#define CI_HDRC_PMQOS                 BIT(15)
+       enum usb_dr_mode        dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT                0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT      1
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index 68e38c20afc0..85544777587d 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -540,7 +540,7 @@ static inline void rvt_qp_wqe_reserve(
+ /**
+  * rvt_qp_wqe_unreserve - clean reserved operation
+  * @qp - the rvt qp
+- * @wqe - the send wqe
++ * @flags - send wqe flags
+  *
+  * This decrements the reserve use count.
+  *
+@@ -552,11 +552,9 @@ static inline void rvt_qp_wqe_reserve(
+  * the compiler does not juggle the order of the s_last
+  * ring index and the decrementing of s_reserved_used.
+  */
+-static inline void rvt_qp_wqe_unreserve(
+-      struct rvt_qp *qp,
+-      struct rvt_swqe *wqe)
++static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
+ {
+-      if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
++      if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
+               atomic_dec(&qp->s_reserved_used);
+               /* insure no compiler re-order up to s_last change */
+               smp_mb__after_atomic();
+@@ -565,42 +563,6 @@ static inline void rvt_qp_wqe_unreserve(
+ 
+ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
+ 
+-/**
+- * rvt_qp_swqe_complete() - insert send completion
+- * @qp - the qp
+- * @wqe - the send wqe
+- * @status - completion status
+- *
+- * Insert a send completion into the completion
+- * queue if the qp indicates it should be done.
+- *
+- * See IBTA 10.7.3.1 for info on completion
+- * control.
+- */
+-static inline void rvt_qp_swqe_complete(
+-      struct rvt_qp *qp,
+-      struct rvt_swqe *wqe,
+-      enum ib_wc_opcode opcode,
+-      enum ib_wc_status status)
+-{
+-      if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
+-              return;
+-      if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+-          (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+-           status != IB_WC_SUCCESS) {
+-              struct ib_wc wc;
+-
+-              memset(&wc, 0, sizeof(wc));
+-              wc.wr_id = wqe->wr.wr_id;
+-              wc.status = status;
+-              wc.opcode = opcode;
+-              wc.qp = &qp->ibqp;
+-              wc.byte_len = wqe->length;
+-              rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+-                           status != IB_WC_SUCCESS);
+-      }
+-}
+-
+ /*
+  * Compare the lower 24 bits of the msn values.
+  * Returns an integer <, ==, or > than zero.
+@@ -737,6 +699,79 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, 
struct rvt_swqe *wqe)
+               atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+ }
+ 
++/**
++ * rvt_qp_sqwe_incr - increment ring index
++ * @qp: the qp
++ * @val: the starting value
++ *
++ * Return: the new value wrapping as appropriate
++ */
++static inline u32
++rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
++{
++      if (++val >= qp->s_size)
++              val = 0;
++      return val;
++}
++
++/**
++ * rvt_qp_complete_swqe - insert send completion
++ * @qp - the qp
++ * @wqe - the send wqe
++ * @opcode - wc operation (driver dependent)
++ * @status - completion status
++ *
++ * Update the s_last information, and then insert a send
++ * completion into the completion
++ * queue if the qp indicates it should be done.
++ *
++ * See IBTA 10.7.3.1 for info on completion
++ * control.
++ *
++ * Return: new last
++ */
++static inline u32
++rvt_qp_complete_swqe(struct rvt_qp *qp,
++                   struct rvt_swqe *wqe,
++                   enum ib_wc_opcode opcode,
++                   enum ib_wc_status status)
++{
++      bool need_completion;
++      u64 wr_id;
++      u32 byte_len, last;
++      int flags = wqe->wr.send_flags;
++
++      rvt_qp_wqe_unreserve(qp, flags);
++      rvt_put_qp_swqe(qp, wqe);
++
++      need_completion =
++              !(flags & RVT_SEND_RESERVE_USED) &&
++              (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
++              (flags & IB_SEND_SIGNALED) ||
++              status != IB_WC_SUCCESS);
++      if (need_completion) {
++              wr_id = wqe->wr.wr_id;
++              byte_len = wqe->length;
++              /* above fields required before writing s_last */
++      }
++      last = rvt_qp_swqe_incr(qp, qp->s_last);
++      /* see rvt_qp_is_avail() */
++      smp_store_release(&qp->s_last, last);
++      if (need_completion) {
++              struct ib_wc w = {
++                      .wr_id = wr_id,
++                      .status = status,
++                      .opcode = opcode,
++                      .qp = &qp->ibqp,
++                      .byte_len = byte_len,
++              };
++
++              rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &w,
++                           status != IB_WC_SUCCESS);
++      }
++      return last;
++}
++
+ extern const int  ib_rvt_state_ops[];
+ 
+ struct rvt_dev_info;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 9ecf1e4c624b..b07672e793a8 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4449,6 +4449,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq 
*cfs_rq, u64 delta_exec)
+       if (likely(cfs_rq->runtime_remaining > 0))
+               return;
+ 
++      if (cfs_rq->throttled)
++              return;
+       /*
+        * if we're unable to extend our runtime we resched so that the active
+        * hierarchy can be throttled
+@@ -4652,6 +4654,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth 
*cfs_b,
+               if (!cfs_rq_throttled(cfs_rq))
+                       goto next;
+ 
++              /* By the above check, this should never be true */
++              SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
++
+               runtime = -cfs_rq->runtime_remaining + 1;
+               if (runtime > remaining)
+                       runtime = remaining;
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 240ed70912d6..d78938e3e008 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct 
batadv_priv *bat_priv)
+  * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
+  * @buff_pos: current position in the skb
+  * @packet_len: total length of the skb
+- * @tvlv_len: tvlv length of the previously considered OGM
++ * @ogm_packet: potential OGM in buffer
+  *
+  * Return: true if there is enough space for another OGM, false otherwise.
+  */
+-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+-                                    __be16 tvlv_len)
++static bool
++batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
++                        const struct batadv_ogm_packet *ogm_packet)
+ {
+       int next_buff_pos = 0;
+ 
+-      next_buff_pos += buff_pos + BATADV_OGM_HLEN;
+-      next_buff_pos += ntohs(tvlv_len);
++      /* check if there is enough space for the header */
++      next_buff_pos += buff_pos + sizeof(*ogm_packet);
++      if (next_buff_pos > packet_len)
++              return false;
++
++      /* check if there is enough space for the optional TVLV */
++      next_buff_pos += ntohs(ogm_packet->tvlv_len);
+ 
+       return (next_buff_pos <= packet_len) &&
+              (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct 
batadv_forw_packet *forw_packet,
+ 
+       /* adjust all flags and log packets */
+       while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+-                                       batadv_ogm_packet->tvlv_len)) {
++                                       batadv_ogm_packet)) {
+               /* we might have aggregated direct link packets with an
+                * ordinary base packet
+                */
+@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
+ 
+       /* unpack the aggregated packets and process them one by one */
+       while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+-                                       ogm_packet->tvlv_len)) {
++                                       ogm_packet)) {
+               batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
+ 
+               ogm_offset += BATADV_OGM_HLEN;
+diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
+index a67720fad46c..fdbd9f4c976b 100644
+--- a/net/batman-adv/netlink.c
++++ b/net/batman-adv/netlink.c
+@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int 
attrtype)
+ {
+       struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
+ 
+-      return attr ? nla_get_u32(attr) : 0;
++      return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
+ }
+ 
+ /**
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 92390d457567..18e6546b4467 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, 
int action, int depth)
+       while (id >= 0) {
+               const struct hda_fixup *fix = codec->fixup_list + id;
+ 
++              if (++depth > 10)
++                      break;
+               if (fix->chained_before)
+                       apply_fixup(codec, fix->chain_id, action, depth + 1);
+ 
+@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, 
int action, int depth)
+               }
+               if (!fix->chained || fix->chained_before)
+                       break;
+-              if (++depth > 10)
+-                      break;
+               id = fix->chain_id;
+       }
+ }
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 5bf24fb819d2..10d502328b76 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
+       if (spec->init_hook)
+               spec->init_hook(codec);
+ 
+-      snd_hda_apply_verbs(codec);
++      if (!spec->skip_verbs)
++              snd_hda_apply_verbs(codec);
+ 
+       init_multi_out(codec);
+       init_extra_out(codec);
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 5f199dcb0d18..fb9f1a90238b 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -243,6 +243,7 @@ struct hda_gen_spec {
+       unsigned int indep_hp_enabled:1; /* independent HP enabled */
+       unsigned int have_aamix_ctl:1;
+       unsigned int hp_mic_jack_modes:1;
++      unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
+ 
+       /* additional mute flags (only effective with auto_mute_via_amp=1) */
+       u64 mute_bits;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e333b3e30e31..c1ddfd2fac52 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
+       if (spec->init_hook)
+               spec->init_hook(codec);
+ 
++      spec->gen.skip_verbs = 1; /* applied in below */
+       snd_hda_gen_init(codec);
+       alc_fix_pll(codec);
+       alc_auto_init_amp(codec, spec->init_amp);
++      snd_hda_apply_verbs(codec); /* apply verbs here after own init */
+ 
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+ 
+@@ -5797,6 +5799,7 @@ enum {
+       ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC299_FIXUP_PREDATOR_SPK,
++      ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
+                       { }
+               }
+       },
++      [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
++              .type = HDA_FIXUP_PINS,
++              .v.pins = (const struct hda_pintbl[]) {
++                      { 0x14, 0x411111f0 }, /* disable confusing internal 
speaker */
++                      { 0x19, 0x04a11150 }, /* use as headset mic, without 
its own jack detect */
++                      { }
++              },
++              .chained = true,
++              .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++      },
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", 
ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
++      SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+       SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", 
ALC269VB_FIXUP_ASUS_ZENBOOK),
+       SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", 
ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+       SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
++      SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", 
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+       SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
++      SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", 
ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
+ static const struct hda_device_id snd_hda_id_realtek[] = {
+       HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
++      HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),

Reply via email to