commit:     55cf95df828afff204ed90379d33ae8b7c0a7a64
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Oct  7 17:39:28 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Oct  7 17:39:28 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=55cf95df

Linux patch 4.14.148

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1147_linux-4.14.148.patch | 2595 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2599 insertions(+)

diff --git a/0000_README b/0000_README
index 300d3fc..140c250 100644
--- a/0000_README
+++ b/0000_README
@@ -631,6 +631,10 @@ Patch:  1146_linux-4.14.147.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.147
 
+Patch:  1147_linux-4.14.148.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.148
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1147_linux-4.14.148.patch b/1147_linux-4.14.148.patch
new file mode 100644
index 0000000..e0d3bfb
--- /dev/null
+++ b/1147_linux-4.14.148.patch
@@ -0,0 +1,2595 @@
+diff --git a/Makefile b/Makefile
+index d6f1a056b233..feecefa13ca6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 147
++SUBLEVEL = 148
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 49b1b8048635..9bb446cc135d 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -215,7 +215,7 @@ static inline bool access_error(unsigned int fsr, struct 
vm_area_struct *vma)
+ {
+       unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+ 
+-      if (fsr & FSR_WRITE)
++      if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+               mask = VM_WRITE;
+       if (fsr & FSR_LNX_PF)
+               mask = VM_EXEC;
+@@ -285,7 +285,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct 
pt_regs *regs)
+ 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+-      if (fsr & FSR_WRITE)
++      if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+               flags |= FAULT_FLAG_WRITE;
+ 
+       /*
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index c063708fa503..9ecc2097a87a 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -6,6 +6,7 @@
+  * Fault status register encodings.  We steal bit 31 for our own purposes.
+  */
+ #define FSR_LNX_PF            (1 << 31)
++#define FSR_CM                        (1 << 13)
+ #define FSR_WRITE             (1 << 11)
+ #define FSR_FS4                       (1 << 10)
+ #define FSR_FS3_0             (15)
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index e46a6a446cdd..70e560cf8ca0 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
+        */
+       vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ 
++      /*
++       * The first usable region must be PMD aligned. Mark its start
++       * as MEMBLOCK_NOMAP if it isn't
++       */
++      for_each_memblock(memory, reg) {
++              if (!memblock_is_nomap(reg)) {
++                      if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
++                              phys_addr_t len;
++
++                              len = round_up(reg->base, PMD_SIZE) - reg->base;
++                              memblock_mark_nomap(reg->base, len);
++                      }
++                      break;
++              }
++      }
++
+       for_each_memblock(memory, reg) {
+               phys_addr_t block_start = reg->base;
+               phys_addr_t block_end = reg->base + reg->size;
+diff --git a/arch/arm64/include/asm/cmpxchg.h 
b/arch/arm64/include/asm/cmpxchg.h
+index 0f2e1ab5e166..9b2e2e2e728a 100644
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -73,7 +73,7 @@ __XCHG_CASE( ,  ,  mb_8, dmb ish, nop,  , a, l, "memory")
+ #undef __XCHG_CASE
+ 
+ #define __XCHG_GEN(sfx)                                                       
\
+-static inline unsigned long __xchg##sfx(unsigned long x,              \
++static __always_inline  unsigned long __xchg##sfx(unsigned long x,    \
+                                       volatile void *ptr,             \
+                                       int size)                       \
+ {                                                                     \
+@@ -115,7 +115,7 @@ __XCHG_GEN(_mb)
+ #define xchg(...)             __xchg_wrapper( _mb, __VA_ARGS__)
+ 
+ #define __CMPXCHG_GEN(sfx)                                            \
+-static inline unsigned long __cmpxchg##sfx(volatile void *ptr,                
\
++static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr,       
\
+                                          unsigned long old,           \
+                                          unsigned long new,           \
+                                          int size)                    \
+@@ -248,7 +248,7 @@ __CMPWAIT_CASE( ,  , 8);
+ #undef __CMPWAIT_CASE
+ 
+ #define __CMPWAIT_GEN(sfx)                                            \
+-static inline void __cmpwait##sfx(volatile void *ptr,                 \
++static __always_inline void __cmpwait##sfx(volatile void *ptr,                
\
+                                 unsigned long val,                    \
+                                 int size)                             \
+ {                                                                     \
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index c2a6869418f7..dc495578d44d 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -634,7 +634,7 @@ static __maybe_unused void 
build_convert_pte_to_entrylo(u32 **p,
+               return;
+       }
+ 
+-      if (cpu_has_rixi && _PAGE_NO_EXEC) {
++      if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+               if (fill_includes_sw_bits) {
+                       UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+               } else {
+diff --git a/arch/powerpc/include/asm/futex.h 
b/arch/powerpc/include/asm/futex.h
+index 1a944c18c539..3c7d85945229 100644
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int 
oparg, int *oval,
+ 
+       pagefault_enable();
+ 
+-      if (!ret)
+-              *oval = oldval;
++      *oval = oldval;
+ 
+       return ret;
+ }
+diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
+index 43cde6c60279..cdc53fd90597 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -464,6 +464,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
+       RFI_TO_USER_OR_KERNEL
+ 9:
+       /* Deliver the machine check to host kernel in V mode. */
++BEGIN_FTR_SECTION
++      ld      r10,ORIG_GPR3(r1)
++      mtspr   SPRN_CFAR,r10
++END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+       MACHINE_CHECK_HANDLER_WINDUP
+       b       machine_check_pSeries
+ 
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 1643e9e53655..141d192c6953 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -874,15 +874,17 @@ static int rtas_cpu_state_change_mask(enum 
rtas_cpu_state state,
+               return 0;
+ 
+       for_each_cpu(cpu, cpus) {
++              struct device *dev = get_cpu_device(cpu);
++
+               switch (state) {
+               case DOWN:
+-                      cpuret = cpu_down(cpu);
++                      cpuret = device_offline(dev);
+                       break;
+               case UP:
+-                      cpuret = cpu_up(cpu);
++                      cpuret = device_online(dev);
+                       break;
+               }
+-              if (cpuret) {
++              if (cpuret < 0) {
+                       pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+                                       __func__,
+                                       ((state == UP) ? "up" : "down"),
+@@ -971,6 +973,8 @@ int rtas_ibm_suspend_me(u64 handle)
+       data.token = rtas_token("ibm,suspend-me");
+       data.complete = &done;
+ 
++      lock_device_hotplug();
++
+       /* All present CPUs must be online */
+       cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+       cpuret = rtas_online_cpus_mask(offline_mask);
+@@ -1002,6 +1006,7 @@ int rtas_ibm_suspend_me(u64 handle)
+                               __func__);
+ 
+ out:
++      unlock_device_hotplug();
+       free_cpumask_var(offline_mask);
+       return atomic_read(&data.error);
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c 
b/arch/powerpc/platforms/pseries/mobility.c
+index 4addc552eb33..9739a055e5f7 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -12,6 +12,7 @@
+ #include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
++#include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/stat.h>
+ #include <linux/completion.h>
+@@ -208,7 +209,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
+ 
+                               prop_data += vd;
+                       }
++
++                      cond_resched();
+               }
++
++              cond_resched();
+       } while (rtas_rc == 1);
+ 
+       of_node_put(dn);
+@@ -317,8 +322,12 @@ int pseries_devicetree_update(s32 scope)
+                                       add_dt_node(phandle, drc_index);
+                                       break;
+                               }
++
++                              cond_resched();
+                       }
+               }
++
++              cond_resched();
+       } while (rc == 1);
+ 
+       kfree(rtas_buf);
+diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
+index 6a0ad56e89b9..7a9945b35053 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -307,6 +307,9 @@ static void pseries_lpar_idle(void)
+        * low power mode by ceding processor to hypervisor
+        */
+ 
++      if (!prep_irq_for_idle())
++              return;
++
+       /* Indicate to hypervisor that we are idle. */
+       get_lppaca()->idle = 1;
+ 
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 6b9038a3e79f..5a739588aa50 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -2438,13 +2438,16 @@ static void dump_pacas(void)
+ static void dump_one_xive(int cpu)
+ {
+       unsigned int hwid = get_hard_smp_processor_id(cpu);
++      bool hv = cpu_has_feature(CPU_FTR_HVMODE);
+ 
+-      opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+-      opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+-      opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+-      opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+-      opal_xive_dump(XIVE_DUMP_VP, hwid);
+-      opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++      if (hv) {
++              opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
++              opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
++              opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
++              opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
++              opal_xive_dump(XIVE_DUMP_VP, hwid);
++              opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++      }
+ 
+       if (setjmp(bus_error_jmp) != 0) {
+               catch_memory_errors = 0;
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 45eb5999110b..32f5b3fb069f 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct 
dentry *root)
+ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+       struct inode *root_inode;
+-      struct dentry *root_dentry;
++      struct dentry *root_dentry, *update_file;
+       int rc = 0;
+       struct hypfs_sb_info *sbi;
+ 
+@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void 
*data, int silent)
+               rc = hypfs_diag_create_files(root_dentry);
+       if (rc)
+               return rc;
+-      sbi->update_file = hypfs_create_update_file(root_dentry);
+-      if (IS_ERR(sbi->update_file))
+-              return PTR_ERR(sbi->update_file);
++      update_file = hypfs_create_update_file(root_dentry);
++      if (IS_ERR(update_file))
++              return PTR_ERR(update_file);
++      sbi->update_file = update_file;
+       hypfs_update_update(sb);
+       pr_info("Hypervisor filesystem mounted\n");
+       return 0;
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 11ec92e47455..94944d063b37 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -2585,7 +2585,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t 
dev)
+       if (ret)
+               return ret;
+       if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+-              WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+               blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+               return -EINVAL;
+       }
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c 
b/drivers/char/ipmi/ipmi_si_intf.c
+index a106cf7b5ee0..f6ba90b90503 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -284,6 +284,9 @@ struct smi_info {
+        */
+       bool irq_enable_broken;
+ 
++      /* Is the driver in maintenance mode? */
++      bool in_maintenance_mode;
++
+       /*
+        * Did we get an attention that we did not handle?
+        */
+@@ -1094,11 +1097,20 @@ static int ipmi_thread(void *data)
+               spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+               busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+                                                 &busy_until);
+-              if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
++              if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+                       ; /* do nothing */
+-              else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
+-                      schedule();
+-              else if (smi_result == SI_SM_IDLE) {
++              } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
++                      /*
++                       * In maintenance mode we run as fast as
++                       * possible to allow firmware updates to
++                       * complete as fast as possible, but normally
++                       * don't bang on the scheduler.
++                       */
++                      if (smi_info->in_maintenance_mode)
++                              schedule();
++                      else
++                              usleep_range(100, 200);
++              } else if (smi_result == SI_SM_IDLE) {
+                       if (atomic_read(&smi_info->need_watch)) {
+                               schedule_timeout_interruptible(100);
+                       } else {
+@@ -1106,8 +1118,9 @@ static int ipmi_thread(void *data)
+                               __set_current_state(TASK_INTERRUPTIBLE);
+                               schedule();
+                       }
+-              } else
++              } else {
+                       schedule_timeout_interruptible(1);
++              }
+       }
+       return 0;
+ }
+@@ -1286,6 +1299,7 @@ static void set_maintenance_mode(void *send_info, bool 
enable)
+ 
+       if (!enable)
+               atomic_set(&smi_info->req_events, 0);
++      smi_info->in_maintenance_mode = enable;
+ }
+ 
+ static const struct ipmi_smi_handlers handlers = {
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 0eca20c5a80c..dcf5bb153495 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -158,12 +158,13 @@ static int tpm_class_shutdown(struct device *dev)
+ {
+       struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+ 
++      down_write(&chip->ops_sem);
+       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+-              down_write(&chip->ops_sem);
+               tpm2_shutdown(chip, TPM2_SU_CLEAR);
+               chip->ops = NULL;
+-              up_write(&chip->ops_sem);
+       }
++      chip->ops = NULL;
++      up_write(&chip->ops_sem);
+ 
+       return 0;
+ }
+diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
+index 86f38d239476..177a60e5c6ec 100644
+--- a/drivers/char/tpm/tpm-sysfs.c
++++ b/drivers/char/tpm/tpm-sysfs.c
+@@ -20,44 +20,46 @@
+ #include <linux/device.h>
+ #include "tpm.h"
+ 
+-#define READ_PUBEK_RESULT_SIZE 314
++struct tpm_readpubek_out {
++      u8 algorithm[4];
++      u8 encscheme[2];
++      u8 sigscheme[2];
++      __be32 paramsize;
++      u8 parameters[12];
++      __be32 keysize;
++      u8 modulus[256];
++      u8 checksum[20];
++} __packed;
++
+ #define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
+ #define TPM_ORD_READPUBEK 124
+-static const struct tpm_input_header tpm_readpubek_header = {
+-      .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+-      .length = cpu_to_be32(30),
+-      .ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
+-};
++
+ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+ {
+-      u8 *data;
+-      struct tpm_cmd_t tpm_cmd;
+-      ssize_t err;
+-      int i, rc;
++      struct tpm_buf tpm_buf;
++      struct tpm_readpubek_out *out;
++      int i;
+       char *str = buf;
+       struct tpm_chip *chip = to_tpm_chip(dev);
++      char anti_replay[20];
+ 
+-      memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+-
+-      tpm_cmd.header.in = tpm_readpubek_header;
+-      err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+-                             READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+-                             "attempting to read the PUBEK");
+-      if (err)
+-              goto out;
+-
+-      /*
+-         ignore header 10 bytes
+-         algorithm 32 bits (1 == RSA )
+-         encscheme 16 bits
+-         sigscheme 16 bits
+-         parameters (RSA 12->bytes: keybit, #primes, expbit)
+-         keylenbytes 32 bits
+-         256 byte modulus
+-         ignore checksum 20 bytes
+-       */
+-      data = tpm_cmd.params.readpubek_out_buffer;
++      memset(&anti_replay, 0, sizeof(anti_replay));
++
++      if (tpm_try_get_ops(chip))
++              return 0;
++
++      if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
++              goto out_ops;
++
++      tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
++
++      if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
++                            READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
++                            "attempting to read the PUBEK"))
++              goto out_buf;
++
++      out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
+       str +=
+           sprintf(str,
+                   "Algorithm: %02X %02X %02X %02X\n"
+@@ -68,22 +70,29 @@ static ssize_t pubek_show(struct device *dev, struct 
device_attribute *attr,
+                   "%02X %02X %02X %02X\n"
+                   "Modulus length: %d\n"
+                   "Modulus:\n",
+-                  data[0], data[1], data[2], data[3],
+-                  data[4], data[5],
+-                  data[6], data[7],
+-                  data[12], data[13], data[14], data[15],
+-                  data[16], data[17], data[18], data[19],
+-                  data[20], data[21], data[22], data[23],
+-                  be32_to_cpu(*((__be32 *) (data + 24))));
++                  out->algorithm[0], out->algorithm[1], out->algorithm[2],
++                  out->algorithm[3],
++                  out->encscheme[0], out->encscheme[1],
++                  out->sigscheme[0], out->sigscheme[1],
++                  out->parameters[0], out->parameters[1],
++                  out->parameters[2], out->parameters[3],
++                  out->parameters[4], out->parameters[5],
++                  out->parameters[6], out->parameters[7],
++                  out->parameters[8], out->parameters[9],
++                  out->parameters[10], out->parameters[11],
++                  be32_to_cpu(out->keysize));
+ 
+       for (i = 0; i < 256; i++) {
+-              str += sprintf(str, "%02X ", data[i + 28]);
++              str += sprintf(str, "%02X ", out->modulus[i]);
+               if ((i + 1) % 16 == 0)
+                       str += sprintf(str, "\n");
+       }
+-out:
+-      rc = str - buf;
+-      return rc;
++
++out_buf:
++      tpm_buf_destroy(&tpm_buf);
++out_ops:
++      tpm_put_ops(chip);
++      return str - buf;
+ }
+ static DEVICE_ATTR_RO(pubek);
+ 
+@@ -97,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct 
device_attribute *attr,
+       char *str = buf;
+       struct tpm_chip *chip = to_tpm_chip(dev);
+ 
+-      rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
+-                      "attempting to determine the number of PCRS",
+-                      sizeof(cap.num_pcrs));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
++      if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
++                     "attempting to determine the number of PCRS",
++                     sizeof(cap.num_pcrs))) {
++              tpm_put_ops(chip);
++              return 0;
++      }
++
+       num_pcrs = be32_to_cpu(cap.num_pcrs);
+       for (i = 0; i < num_pcrs; i++) {
+               rc = tpm_pcr_read_dev(chip, i, digest);
+@@ -113,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct 
device_attribute *attr,
+                       str += sprintf(str, "%02X ", digest[j]);
+               str += sprintf(str, "\n");
+       }
++      tpm_put_ops(chip);
+       return str - buf;
+ }
+ static DEVICE_ATTR_RO(pcrs);
+@@ -120,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
+ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+ {
++      struct tpm_chip *chip = to_tpm_chip(dev);
++      ssize_t rc = 0;
+       cap_t cap;
+-      ssize_t rc;
+ 
+-      rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+-                      "attempting to determine the permanent enabled state",
+-                      sizeof(cap.perm_flags));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
++      if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
++                     "attempting to determine the permanent enabled state",
++                     sizeof(cap.perm_flags)))
++              goto out_ops;
++
+       rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
++out_ops:
++      tpm_put_ops(chip);
+       return rc;
+ }
+ static DEVICE_ATTR_RO(enabled);
+@@ -137,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
+ static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+                   char *buf)
+ {
++      struct tpm_chip *chip = to_tpm_chip(dev);
++      ssize_t rc = 0;
+       cap_t cap;
+-      ssize_t rc;
+ 
+-      rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+-                      "attempting to determine the permanent active state",
+-                      sizeof(cap.perm_flags));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
++      if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
++                     "attempting to determine the permanent active state",
++                     sizeof(cap.perm_flags)))
++              goto out_ops;
++
+       rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
++out_ops:
++      tpm_put_ops(chip);
+       return rc;
+ }
+ static DEVICE_ATTR_RO(active);
+@@ -154,16 +178,21 @@ static DEVICE_ATTR_RO(active);
+ static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+ {
++      struct tpm_chip *chip = to_tpm_chip(dev);
++      ssize_t rc = 0;
+       cap_t cap;
+-      ssize_t rc;
+ 
+-      rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+-                      "attempting to determine the owner state",
+-                      sizeof(cap.owned));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
++      if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
++                     "attempting to determine the owner state",
++                     sizeof(cap.owned)))
++              goto out_ops;
++
+       rc = sprintf(buf, "%d\n", cap.owned);
++out_ops:
++      tpm_put_ops(chip);
+       return rc;
+ }
+ static DEVICE_ATTR_RO(owned);
+@@ -171,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
+ static ssize_t temp_deactivated_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+ {
++      struct tpm_chip *chip = to_tpm_chip(dev);
++      ssize_t rc = 0;
+       cap_t cap;
+-      ssize_t rc;
+ 
+-      rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+-                      "attempting to determine the temporary state",
+-                      sizeof(cap.stclear_flags));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
++      if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
++                     "attempting to determine the temporary state",
++                     sizeof(cap.stclear_flags)))
++              goto out_ops;
++
+       rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
++out_ops:
++      tpm_put_ops(chip);
+       return rc;
+ }
+ static DEVICE_ATTR_RO(temp_deactivated);
+@@ -189,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct 
device_attribute *attr,
+                        char *buf)
+ {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+-      cap_t cap;
+-      ssize_t rc;
++      ssize_t rc = 0;
+       char *str = buf;
++      cap_t cap;
+ 
+-      rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
+-                      "attempting to determine the manufacturer",
+-                      sizeof(cap.manufacturer_id));
+-      if (rc)
++      if (tpm_try_get_ops(chip))
+               return 0;
++
++      if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
++                     "attempting to determine the manufacturer",
++                     sizeof(cap.manufacturer_id)))
++              goto out_ops;
++
+       str += sprintf(str, "Manufacturer: 0x%x\n",
+                      be32_to_cpu(cap.manufacturer_id));
+ 
+@@ -214,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct 
device_attribute *attr,
+                              cap.tpm_version_1_2.revMinor);
+       } else {
+               /* Otherwise just use TPM_STRUCT_VER */
+-              rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
+-                              "attempting to determine the 1.1 version",
+-                              sizeof(cap.tpm_version));
+-              if (rc)
+-                      return 0;
++              if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
++                             "attempting to determine the 1.1 version",
++                             sizeof(cap.tpm_version)))
++                      goto out_ops;
++
+               str += sprintf(str,
+                              "TCG version: %d.%d\nFirmware version: %d.%d\n",
+                              cap.tpm_version.Major,
+                              cap.tpm_version.Minor,
+                              cap.tpm_version.revMajor,
+                              cap.tpm_version.revMinor);
+-      }
+-
+-      return str - buf;
++}
++      rc = str - buf;
++out_ops:
++      tpm_put_ops(chip);
++      return rc;
+ }
+ static DEVICE_ATTR_RO(caps);
+ 
+@@ -235,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct 
device_attribute *attr,
+                           const char *buf, size_t count)
+ {
+       struct tpm_chip *chip = to_tpm_chip(dev);
+-      if (chip == NULL)
++
++      if (tpm_try_get_ops(chip))
+               return 0;
+ 
+       chip->ops->cancel(chip);
++      tpm_put_ops(chip);
+       return count;
+ }
+ static DEVICE_ATTR_WO(cancel);
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 4bb9b4aa9b49..d53d12f3df6d 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -351,17 +351,6 @@ enum tpm_sub_capabilities {
+       TPM_CAP_PROP_TIS_DURATION = 0x120,
+ };
+ 
+-struct        tpm_readpubek_params_out {
+-      u8      algorithm[4];
+-      u8      encscheme[2];
+-      u8      sigscheme[2];
+-      __be32  paramsize;
+-      u8      parameters[12]; /*assuming RSA*/
+-      __be32  keysize;
+-      u8      modulus[256];
+-      u8      checksum[20];
+-} __packed;
+-
+ typedef union {
+       struct  tpm_input_header in;
+       struct  tpm_output_header out;
+@@ -391,8 +380,6 @@ struct tpm_getrandom_in {
+ } __packed;
+ 
+ typedef union {
+-      struct  tpm_readpubek_params_out readpubek_out;
+-      u8      readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
+       struct  tpm_pcrread_in  pcrread_in;
+       struct  tpm_pcrread_out pcrread_out;
+       struct  tpm_getrandom_in getrandom_in;
+diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
+index c813c27f2e58..2f97a843d6d6 100644
+--- a/drivers/clk/at91/clk-main.c
++++ b/drivers/clk/at91/clk-main.c
+@@ -27,6 +27,10 @@
+ 
+ #define MOR_KEY_MASK          (0xff << 16)
+ 
++#define clk_main_parent_select(s)     (((s) & \
++                                      (AT91_PMC_MOSCEN | \
++                                      AT91_PMC_OSCBYPASS)) ? 1 : 0)
++
+ struct clk_main_osc {
+       struct clk_hw hw;
+       struct regmap *regmap;
+@@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
+ 
+       regmap_read(regmap, AT91_PMC_SR, &status);
+ 
+-      return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
++      return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
+ }
+ 
+ static const struct clk_ops main_osc_ops = {
+@@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
+ 
+       regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+ 
+-      return status & AT91_PMC_MOSCEN ? 1 : 0;
++      return clk_main_parent_select(status);
+ }
+ 
+ static const struct clk_ops sam9x5_main_ops = {
+@@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
+       clkmain->hw.init = &init;
+       clkmain->regmap = regmap;
+       regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+-      clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
++      clkmain->parent = clk_main_parent_select(status);
+ 
+       hw = &clkmain->hw;
+       ret = clk_hw_register(NULL, &clkmain->hw);
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index b0ea753b8709..1a292519d84f 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
+               .guts_compat = "fsl,qoriq-device-config-1.0",
+               .init_periph = p5020_init_periph,
+               .cmux_groups = {
+-                      &p2041_cmux_grp1, &p2041_cmux_grp2
++                      &p5020_cmux_grp1, &p5020_cmux_grp2
+               },
+               .cmux_to_group = {
+                       0, 1, -1
+diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
+index 77e1e2491689..edb7197cc4b4 100644
+--- a/drivers/clk/sirf/clk-common.c
++++ b/drivers/clk/sirf/clk-common.c
+@@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
+ {
+       struct clk_dmn *clk = to_dmnclk(hw);
+       u32 cfg = clkc_readl(clk->regofs);
++      const char *name = clk_hw_get_name(hw);
+ 
+       /* parent of io domain can only be pll3 */
+-      if (strcmp(hw->init->name, "io") == 0)
++      if (strcmp(name, "io") == 0)
+               return 4;
+ 
+       WARN_ON((cfg & (BIT(3) - 1)) > 4);
+@@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 
parent)
+ {
+       struct clk_dmn *clk = to_dmnclk(hw);
+       u32 cfg = clkc_readl(clk->regofs);
++      const char *name = clk_hw_get_name(hw);
+ 
+       /* parent of io domain can only be pll3 */
+-      if (strcmp(hw->init->name, "io") == 0)
++      if (strcmp(name, "io") == 0)
+               return -EINVAL;
+ 
+       cfg &= ~(BIT(3) - 1);
+@@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned 
long rate,
+ {
+       unsigned long fin;
+       unsigned ratio, wait, hold;
+-      unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++      const char *name = clk_hw_get_name(hw);
++      unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+ 
+       fin = *parent_rate;
+       ratio = fin / rate;
+@@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned 
long rate,
+       struct clk_dmn *clk = to_dmnclk(hw);
+       unsigned long fin;
+       unsigned ratio, wait, hold, reg;
+-      unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++      const char *name = clk_hw_get_name(hw);
++      unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+ 
+       fin = parent_rate;
+       ratio = fin / rate;
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c 
b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+index ac12f261f8ca..9e3f4088724b 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+@@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
+               [CLK_MMC1]              = &mmc1_clk.common.hw,
+               [CLK_MMC1_SAMPLE]       = &mmc1_sample_clk.common.hw,
+               [CLK_MMC1_OUTPUT]       = &mmc1_output_clk.common.hw,
++              [CLK_MMC2]              = &mmc2_clk.common.hw,
++              [CLK_MMC2_SAMPLE]       = &mmc2_sample_clk.common.hw,
++              [CLK_MMC2_OUTPUT]       = &mmc2_output_clk.common.hw,
+               [CLK_CE]                = &ce_clk.common.hw,
+               [CLK_SPI0]              = &spi0_clk.common.hw,
+               [CLK_USB_PHY0]          = &usb_phy0_clk.common.hw,
+diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
+index 354dd508c516..8dfb8523b79d 100644
+--- a/drivers/clk/zte/clk-zx296718.c
++++ b/drivers/clk/zte/clk-zx296718.c
+@@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
+ {
+       void __iomem *reg_base;
+       int i, ret;
++      const char *name;
+ 
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+@@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
+ 
+       for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
+               zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
++              name = zx296718_pll_clk[i].hw.init->name;
+               ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
+-              if (ret) {
+-                      pr_warn("top clk %s init error!\n",
+-                              zx296718_pll_clk[i].hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("top clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
+@@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
+                       top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
+                                       &top_ffactor_clk[i].factor.hw;
+ 
++              name = top_ffactor_clk[i].factor.hw.init->name;
+               ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
+-              if (ret) {
+-                      pr_warn("top clk %s init error!\n",
+-                              top_ffactor_clk[i].factor.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("top clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
+@@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
+                                       &top_mux_clk[i].mux.hw;
+ 
+               top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++              name = top_mux_clk[i].mux.hw.init->name;
+               ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
+-              if (ret) {
+-                      pr_warn("top clk %s init error!\n",
+-                              top_mux_clk[i].mux.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("top clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
+@@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
+                                       &top_gate_clk[i].gate.hw;
+ 
+               top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++              name = top_gate_clk[i].gate.hw.init->name;
+               ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
+-              if (ret) {
+-                      pr_warn("top clk %s init error!\n",
+-                              top_gate_clk[i].gate.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("top clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
+@@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
+                                       &top_div_clk[i].div.hw;
+ 
+               top_div_clk[i].div.reg += (uintptr_t)reg_base;
++              name = top_div_clk[i].div.hw.init->name;
+               ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
+-              if (ret) {
+-                      pr_warn("top clk %s init error!\n",
+-                              top_div_clk[i].div.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("top clk %s init error!\n", name);
+       }
+ 
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ {
+       void __iomem *reg_base;
+       int i, ret;
++      const char *name;
+ 
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+@@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node 
*np)
+                                       &lsp0_mux_clk[i].mux.hw;
+ 
+               lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++              name = lsp0_mux_clk[i].mux.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
+-              if (ret) {
+-                      pr_warn("lsp0 clk %s init error!\n",
+-                              lsp0_mux_clk[i].mux.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp0 clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
+@@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node 
*np)
+                                       &lsp0_gate_clk[i].gate.hw;
+ 
+               lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++              name = lsp0_gate_clk[i].gate.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
+-              if (ret) {
+-                      pr_warn("lsp0 clk %s init error!\n",
+-                              lsp0_gate_clk[i].gate.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp0 clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
+@@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node 
*np)
+                                       &lsp0_div_clk[i].div.hw;
+ 
+               lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
++              name = lsp0_div_clk[i].div.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
+-              if (ret) {
+-                      pr_warn("lsp0 clk %s init error!\n",
+-                              lsp0_div_clk[i].div.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp0 clk %s init error!\n", name);
+       }
+ 
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ {
+       void __iomem *reg_base;
+       int i, ret;
++      const char *name;
+ 
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+@@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node 
*np)
+                                       &lsp0_mux_clk[i].mux.hw;
+ 
+               lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++              name = lsp1_mux_clk[i].mux.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
+-              if (ret) {
+-                      pr_warn("lsp1 clk %s init error!\n",
+-                              lsp1_mux_clk[i].mux.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp1 clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
+@@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node 
*np)
+                                       &lsp1_gate_clk[i].gate.hw;
+ 
+               lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++              name = lsp1_gate_clk[i].gate.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
+-              if (ret) {
+-                      pr_warn("lsp1 clk %s init error!\n",
+-                              lsp1_gate_clk[i].gate.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp1 clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
+@@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node 
*np)
+                                       &lsp1_div_clk[i].div.hw;
+ 
+               lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
++              name = lsp1_div_clk[i].div.hw.init->name;
+               ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
+-              if (ret) {
+-                      pr_warn("lsp1 clk %s init error!\n",
+-                              lsp1_div_clk[i].div.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("lsp1 clk %s init error!\n", name);
+       }
+ 
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
+ {
+       void __iomem *reg_base;
+       int i, ret;
++      const char *name;
+ 
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+@@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node 
*np)
+                                       &audio_mux_clk[i].mux.hw;
+ 
+               audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++              name = audio_mux_clk[i].mux.hw.init->name;
+               ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
+-              if (ret) {
+-                      pr_warn("audio clk %s init error!\n",
+-                              audio_mux_clk[i].mux.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("audio clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
+@@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node 
*np)
+                                       &audio_adiv_clk[i].hw;
+ 
+               audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
++              name = audio_adiv_clk[i].hw.init->name;
+               ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
+-              if (ret) {
+-                      pr_warn("audio clk %s init error!\n",
+-                              audio_adiv_clk[i].hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("audio clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
+@@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node 
*np)
+                                       &audio_div_clk[i].div.hw;
+ 
+               audio_div_clk[i].div.reg += (uintptr_t)reg_base;
++              name = audio_div_clk[i].div.hw.init->name;
+               ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
+-              if (ret) {
+-                      pr_warn("audio clk %s init error!\n",
+-                              audio_div_clk[i].div.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("audio clk %s init error!\n", name);
+       }
+ 
+       for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
+@@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node 
*np)
+                                       &audio_gate_clk[i].gate.hw;
+ 
+               audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++              name = audio_gate_clk[i].gate.hw.init->name;
+               ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
+-              if (ret) {
+-                      pr_warn("audio clk %s init error!\n",
+-                              audio_gate_clk[i].gate.hw.init->name);
+-              }
++              if (ret)
++                      pr_warn("audio clk %s init error!\n", name);
+       }
+ 
+       ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 24f83f9eeaed..114b36674af4 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence 
*fence)
+ {
+       struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+       struct sync_timeline *parent = dma_fence_parent(fence);
++      unsigned long flags;
+ 
++      spin_lock_irqsave(fence->lock, flags);
+       if (!list_empty(&pt->link)) {
+-              unsigned long flags;
+-
+-              spin_lock_irqsave(fence->lock, flags);
+-              if (!list_empty(&pt->link)) {
+-                      list_del(&pt->link);
+-                      rb_erase(&pt->node, &parent->pt_tree);
+-              }
+-              spin_unlock_irqrestore(fence->lock, flags);
++              list_del(&pt->link);
++              rb_erase(&pt->node, &parent->pt_tree);
+       }
++      spin_unlock_irqrestore(fence->lock, flags);
+ 
+       sync_timeline_put(parent);
+       dma_fence_free(fence);
+@@ -275,7 +272,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline 
*obj,
+                               p = &parent->rb_left;
+                       } else {
+                               if (dma_fence_get_rcu(&other->base)) {
+-                                      dma_fence_put(&pt->base);
++                                      sync_timeline_put(obj);
++                                      kfree(pt);
+                                       pt = other;
+                                       goto unlock;
+                               }
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 40520a968eac..28eea8317e87 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1783,7 +1783,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ 
+-                      if ((adev->family != CHIP_OLAND) && (adev->family != 
CHIP_HAINAN)) {
++                      if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type 
!= CHIP_HAINAN)) {
+                               orig = data = 
si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+                               data &= ~PLL_RAMP_UP_TIME_0_MASK;
+                               if (orig != data)
+@@ -1832,14 +1832,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
+ 
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+-                      if ((adev->family == CHIP_OLAND) || (adev->family == 
CHIP_HAINAN))
++                      if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type 
== CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ 
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+-                      if ((adev->family == CHIP_OLAND) || (adev->family == 
CHIP_HAINAN))
++                      if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type 
== CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+diff --git a/drivers/gpu/drm/bridge/tc358767.c 
b/drivers/gpu/drm/bridge/tc358767.c
+index 9705ca197b90..cefa2c1685ba 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -300,7 +300,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+                              struct drm_dp_aux_msg *msg)
+ {
+       struct tc_data *tc = aux_to_tc(aux);
+-      size_t size = min_t(size_t, 8, msg->size);
++      size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
+       u8 request = msg->request & ~DP_AUX_I2C_MOT;
+       u8 *buf = msg->buffer;
+       u32 tmp = 0;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+index 7143ea4611aa..33a9fb5ac558 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, 
u8 *cnt, u8 *len,
+               info->min     = min(info->base,
+                                   info->base + info->step * info->vidmask);
+               info->max     = nvbios_rd32(bios, volt + 0x0e);
++              if (!info->max)
++                      info->max = max(info->base, info->base + info->step * 
info->vidmask);
+               break;
+       case 0x50:
+               info->min     = nvbios_rd32(bios, volt + 0x0a);
+diff --git a/drivers/gpu/drm/panel/panel-simple.c 
b/drivers/gpu/drm/panel/panel-simple.c
+index 7a0fd4e4e78d..c1daed3fe842 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -614,9 +614,9 @@ static const struct panel_desc auo_g133han01 = {
+ static const struct display_timing auo_g185han01_timings = {
+       .pixelclock = { 120000000, 144000000, 175000000 },
+       .hactive = { 1920, 1920, 1920 },
+-      .hfront_porch = { 18, 60, 74 },
+-      .hback_porch = { 12, 44, 54 },
+-      .hsync_len = { 10, 24, 32 },
++      .hfront_porch = { 36, 120, 148 },
++      .hback_porch = { 24, 88, 108 },
++      .hsync_len = { 20, 48, 64 },
+       .vactive = { 1080, 1080, 1080 },
+       .vfront_porch = { 6, 10, 40 },
+       .vback_porch = { 2, 5, 20 },
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c 
b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 337d3a1c2a40..48f752cf7a92 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -764,7 +764,7 @@ static int radeon_connector_set_property(struct 
drm_connector *connector, struct
+ 
+               radeon_encoder->output_csc = val;
+ 
+-              if (connector->encoder->crtc) {
++              if (connector->encoder && connector->encoder->crtc) {
+                       struct drm_crtc *crtc  = connector->encoder->crtc;
+                       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c 
b/drivers/gpu/drm/radeon/radeon_drv.c
+index f4becad0a78c..54d97dd5780a 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -368,11 +368,19 @@ radeon_pci_remove(struct pci_dev *pdev)
+ static void
+ radeon_pci_shutdown(struct pci_dev *pdev)
+ {
++      struct drm_device *ddev = pci_get_drvdata(pdev);
++
+       /* if we are running in a VM, make sure the device
+        * torn down properly on reboot/shutdown
+        */
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
++
++      /* Some adapters need to be suspended before a
++      * shutdown occurs in order to prevent an error
++      * during kexec.
++      */
++      radeon_suspend_kms(ddev, true, true, false);
+ }
+ 
+ static int radeon_pmops_suspend(struct device *dev)
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index d394a03632c4..c3bd80b03f16 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -20,6 +20,7 @@
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_gem_cma_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_of.h>
+ #include <drm/drm_bridge.h>
+ #include <drm/drm_plane_helper.h>
+@@ -691,6 +692,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
+ };
+ 
+ static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
++      .prepare_fb = drm_gem_fb_prepare_fb,
+       .atomic_check = ltdc_plane_atomic_check,
+       .atomic_update = ltdc_plane_atomic_update,
+       .atomic_disable = ltdc_plane_atomic_disable,
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 1cb41992aaa1..d0a81a03ddbd 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") 
and Command (\"Flag\")
+ struct apple_sc {
+       unsigned long quirks;
+       unsigned int fn_on;
+-      DECLARE_BITMAP(pressed_fn, KEY_CNT);
+       DECLARE_BITMAP(pressed_numlock, KEY_CNT);
+ };
+ 
+@@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, 
struct input_dev *input,
+ {
+       struct apple_sc *asc = hid_get_drvdata(hid);
+       const struct apple_key_translation *trans, *table;
++      bool do_translate;
++      u16 code = 0;
+ 
+       if (usage->code == KEY_FN) {
+               asc->fn_on = !!value;
+@@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, 
struct input_dev *input,
+       }
+ 
+       if (fnmode) {
+-              int do_translate;
+-
+               if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+                               hid->product <= 
USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+                       table = macbookair_fn_keys;
+@@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, 
struct input_dev *input,
+               trans = apple_find_translation (table, usage->code);
+ 
+               if (trans) {
+-                      if (test_bit(usage->code, asc->pressed_fn))
+-                              do_translate = 1;
+-                      else if (trans->flags & APPLE_FLAG_FKEY)
+-                              do_translate = (fnmode == 2 && asc->fn_on) ||
+-                                      (fnmode == 1 && !asc->fn_on);
+-                      else
+-                              do_translate = asc->fn_on;
+-
+-                      if (do_translate) {
+-                              if (value)
+-                                      set_bit(usage->code, asc->pressed_fn);
+-                              else
+-                                      clear_bit(usage->code, asc->pressed_fn);
+-
+-                              input_event(input, usage->type, trans->to,
+-                                              value);
+-
+-                              return 1;
++                      if (test_bit(trans->from, input->key))
++                              code = trans->from;
++                      else if (test_bit(trans->to, input->key))
++                              code = trans->to;
++
++                      if (!code) {
++                              if (trans->flags & APPLE_FLAG_FKEY) {
++                                      switch (fnmode) {
++                                      case 1:
++                                              do_translate = !asc->fn_on;
++                                              break;
++                                      case 2:
++                                              do_translate = asc->fn_on;
++                                              break;
++                                      default:
++                                              /* should never happen */
++                                              do_translate = false;
++                                      }
++                              } else {
++                                      do_translate = asc->fn_on;
++                              }
++
++                              code = do_translate ? trans->to : trans->from;
+                       }
++
++                      input_event(input, usage->type, code, value);
++                      return 1;
+               }
+ 
+               if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
+diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
+index 190bbbc7bfee..29456c8821e7 100644
+--- a/drivers/i2c/busses/i2c-cht-wc.c
++++ b/drivers/i2c/busses/i2c-cht-wc.c
+@@ -185,6 +185,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
+       .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
+ };
+ 
++/*
++ * We are an i2c-adapter which itself is part of an i2c-client. This means 
that
++ * transfers done through us take adapter->bus_lock twice, once for our parent
++ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
++ * nested locking, to make lockdep happy in the case of busses with muxes, the
++ * i2c-core's i2c_adapter_lock_bus function calls:
++ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
++ *
++ * But i2c_adapter_depth only works when the direct parent of the adapter is
++ * another adapter, as it is only meant for muxes. In our case there is an
++ * i2c-client and MFD instantiated platform_device in the parent->child chain
++ * between the 2 devices.
++ *
++ * So we override the default i2c_lock_operations and pass a hardcoded
++ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
++ *
++ * Note that if there were to be a mux attached to our adapter, this would
++ * break things again since the i2c-mux code expects the root-adapter to have
++ * a locking depth of 0. But we always have only 1 client directly attached
++ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
++ */
++static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
++                               unsigned int flags)
++{
++      rt_mutex_lock_nested(&adapter->bus_lock, 1);
++}
++
++static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
++                                 unsigned int flags)
++{
++      return rt_mutex_trylock(&adapter->bus_lock);
++}
++
++static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
++                                 unsigned int flags)
++{
++      rt_mutex_unlock(&adapter->bus_lock);
++}
++
++static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
++      .lock_bus =    cht_wc_i2c_adap_lock_bus,
++      .trylock_bus = cht_wc_i2c_adap_trylock_bus,
++      .unlock_bus =  cht_wc_i2c_adap_unlock_bus,
++};
++
+ /**** irqchip for the client connected to the extchgr i2c adapter ****/
+ static void cht_wc_i2c_irq_lock(struct irq_data *data)
+ {
+@@ -268,6 +313,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct 
platform_device *pdev)
+       adap->adapter.owner = THIS_MODULE;
+       adap->adapter.class = I2C_CLASS_HWMON;
+       adap->adapter.algo = &cht_wc_i2c_adap_algo;
++      adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
+       strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+               sizeof(adap->adapter.name));
+       adap->adapter.dev.parent = &pdev->dev;
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index d9ae983095c5..2b7e8eeaa59e 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
+       info->mem = &pdev->resource[0];
+       info->irq = pdev->irq;
+ 
++      pdev->d3cold_delay = 0;
++
+       /* Probably it is enough to set this for iDMA capable devices only */
+       pci_set_master(pdev);
+       pci_try_set_mwi(pdev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 
b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+index 99a9d5278369..8441ce3541af 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const 
__be64 *rsp,
+ static int alloc_uld_rxqs(struct adapter *adap,
+                         struct sge_uld_rxq_info *rxq_info, bool lro)
+ {
+-      struct sge *s = &adap->sge;
+       unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
++      int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
+       struct sge_ofld_rxq *q = rxq_info->uldrxq;
+       unsigned short *ids = rxq_info->rspq_id;
+-      unsigned int bmap_idx = 0;
++      struct sge *s = &adap->sge;
+       unsigned int per_chan;
+-      int i, err, msi_idx, que_idx = 0;
+ 
+       per_chan = rxq_info->nrxq / adap->params.nports;
+ 
+@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
+ 
+               if (msi_idx >= 0) {
+                       bmap_idx = get_msix_idx_from_bmap(adap);
++                      if (bmap_idx < 0) {
++                              err = -ENOSPC;
++                              goto freeout;
++                      }
+                       msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+               }
+               err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c 
b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 080d00520362..5fca9a75780c 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter 
*qdev)
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping failed with error: 
%d\n",
+                                          err);
++                              dev_kfree_skb_irq(skb);
+                               ql_free_large_buffers(qdev);
+                               return -ENOMEM;
+                       }
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 18a0952f68a8..6597d1f8d68c 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2633,14 +2633,18 @@ static struct hso_device 
*hso_create_bulk_serial_device(
+                */
+               if (serial->tiocmget) {
+                       tiocmget = serial->tiocmget;
++                      tiocmget->endp = hso_get_ep(interface,
++                                                  USB_ENDPOINT_XFER_INT,
++                                                  USB_DIR_IN);
++                      if (!tiocmget->endp) {
++                              dev_err(&interface->dev, "Failed to find INT IN 
ep\n");
++                              goto exit;
++                      }
++
+                       tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+                       if (tiocmget->urb) {
+                               mutex_init(&tiocmget->mutex);
+                               init_waitqueue_head(&tiocmget->waitq);
+-                              tiocmget->endp = hso_get_ep(
+-                                      interface,
+-                                      USB_ENDPOINT_XFER_INT,
+-                                      USB_DIR_IN);
+                       } else
+                               hso_free_tiomget(serial);
+               }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index e2050afaab7a..e406a05e79dc 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1275,6 +1275,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 
RmNet) */
+       {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 
RmNet) */
+       {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet 
+ USB Audio)*/
++      {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+       {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 
4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ 
(42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ 
(42Mbps) Mobile Broadband Card */
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 5cb3edae586f..91bf86cee273 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -889,9 +889,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
+       return 0;
+ }
+ 
+-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+-                                struct sk_buff *skb,
+-                                struct sk_buff_head *list)
++static int xennet_fill_frags(struct netfront_queue *queue,
++                           struct sk_buff *skb,
++                           struct sk_buff_head *list)
+ {
+       RING_IDX cons = queue->rx.rsp_cons;
+       struct sk_buff *nskb;
+@@ -910,7 +910,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue 
*queue,
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+                       kfree_skb(nskb);
+-                      return ~0U;
++                      return -ENOENT;
+               }
+ 
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+@@ -921,7 +921,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue 
*queue,
+               kfree_skb(nskb);
+       }
+ 
+-      return cons;
++      queue->rx.rsp_cons = cons;
++
++      return 0;
+ }
+ 
+ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
+@@ -1047,8 +1049,7 @@ err:
+               skb->data_len = rx->status;
+               skb->len += rx->status;
+ 
+-              i = xennet_fill_frags(queue, skb, &tmpq);
+-              if (unlikely(i == ~0U))
++              if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
+                       goto err;
+ 
+               if (rx->flags & XEN_NETRXF_csum_blank)
+@@ -1058,7 +1059,7 @@ err:
+ 
+               __skb_queue_tail(&rxq, skb);
+ 
+-              queue->rx.rsp_cons = ++i;
++              i = ++queue->rx.rsp_cons;
+               work_done++;
+       }
+ 
+diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
+index ea03f1ec12a4..01acb418d1fd 100644
+--- a/drivers/pci/dwc/pci-exynos.c
++++ b/drivers/pci/dwc/pci-exynos.c
+@@ -683,7 +683,7 @@ static int __init exynos_pcie_probe(struct platform_device 
*pdev)
+ 
+       ep->phy = devm_of_phy_get(dev, np, NULL);
+       if (IS_ERR(ep->phy)) {
+-              if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
++              if (PTR_ERR(ep->phy) != -ENODEV)
+                       return PTR_ERR(ep->phy);
+               dev_warn(dev, "Use the 'phy' property. Current DT of pci-exynos 
was deprecated!!\n");
+       } else
+diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
+index 1f1069b70e45..5509b6e2de94 100644
+--- a/drivers/pci/dwc/pci-imx6.c
++++ b/drivers/pci/dwc/pci-imx6.c
+@@ -827,8 +827,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
+ 
+       imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+       if (IS_ERR(imx6_pcie->vpcie)) {
+-              if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
+-                      return -EPROBE_DEFER;
++              if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
++                      return PTR_ERR(imx6_pcie->vpcie);
+               imx6_pcie->vpcie = NULL;
+       }
+ 
+diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
+index 1987fec1f126..8efd086c57c9 100644
+--- a/drivers/pci/host/pci-tegra.c
++++ b/drivers/pci/host/pci-tegra.c
+@@ -1910,14 +1910,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+               err = of_pci_get_devfn(port);
+               if (err < 0) {
+                       dev_err(dev, "failed to parse address: %d\n", err);
+-                      return err;
++                      goto err_node_put;
+               }
+ 
+               index = PCI_SLOT(err);
+ 
+               if (index < 1 || index > soc->num_ports) {
+                       dev_err(dev, "invalid port number: %d\n", index);
+-                      return -EINVAL;
++                      err = -EINVAL;
++                      goto err_node_put;
+               }
+ 
+               index--;
+@@ -1926,12 +1927,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+               if (err < 0) {
+                       dev_err(dev, "failed to parse # of lanes: %d\n",
+                               err);
+-                      return err;
++                      goto err_node_put;
+               }
+ 
+               if (value > 16) {
+                       dev_err(dev, "invalid # of lanes: %u\n", value);
+-                      return -EINVAL;
++                      err = -EINVAL;
++                      goto err_node_put;
+               }
+ 
+               lanes |= value << (index << 3);
+@@ -1945,13 +1947,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+               lane += value;
+ 
+               rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
+-              if (!rp)
+-                      return -ENOMEM;
++              if (!rp) {
++                      err = -ENOMEM;
++                      goto err_node_put;
++              }
+ 
+               err = of_address_to_resource(port, 0, &rp->regs);
+               if (err < 0) {
+                       dev_err(dev, "failed to parse address: %d\n", err);
+-                      return err;
++                      goto err_node_put;
+               }
+ 
+               INIT_LIST_HEAD(&rp->list);
+@@ -1978,6 +1982,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+               return err;
+ 
+       return 0;
++
++err_node_put:
++      of_node_put(port);
++      return err;
+ }
+ 
+ /*
+diff --git a/drivers/pci/host/pcie-rockchip.c 
b/drivers/pci/host/pcie-rockchip.c
+index 9051c6c8fea4..d3f9e7d24727 100644
+--- a/drivers/pci/host/pcie-rockchip.c
++++ b/drivers/pci/host/pcie-rockchip.c
+@@ -1129,29 +1129,29 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie 
*rockchip)
+ 
+       rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+       if (IS_ERR(rockchip->vpcie12v)) {
+-              if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+-                      return -EPROBE_DEFER;
++              if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
++                      return PTR_ERR(rockchip->vpcie12v);
+               dev_info(dev, "no vpcie12v regulator found\n");
+       }
+ 
+       rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+       if (IS_ERR(rockchip->vpcie3v3)) {
+-              if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+-                      return -EPROBE_DEFER;
++              if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
++                      return PTR_ERR(rockchip->vpcie3v3);
+               dev_info(dev, "no vpcie3v3 regulator found\n");
+       }
+ 
+       rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+       if (IS_ERR(rockchip->vpcie1v8)) {
+-              if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+-                      return -EPROBE_DEFER;
++              if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
++                      return PTR_ERR(rockchip->vpcie1v8);
+               dev_info(dev, "no vpcie1v8 regulator found\n");
+       }
+ 
+       rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+       if (IS_ERR(rockchip->vpcie0v9)) {
+-              if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+-                      return -EPROBE_DEFER;
++              if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
++                      return PTR_ERR(rockchip->vpcie0v9);
+               dev_info(dev, "no vpcie0v9 regulator found\n");
+       }
+ 
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c 
b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index 51716819129d..e5c9b9c68428 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -51,7 +51,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, 
u32 reg)
+ 
+ static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 
reg)
+ {
+-      writel(val, pmx->regs[bank] + reg);
++      writel_relaxed(val, pmx->regs[bank] + reg);
++      /* make sure pinmux register write completed */
++      pmx_readl(pmx, bank, reg);
+ }
+ 
+ static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 71eee39520f0..7aa2c5ea0de4 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -280,6 +280,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+       if (!data)
+               return -ENOMEM;
+ 
++      data->rtc = devm_rtc_allocate_device(&pdev->dev);
++      if (IS_ERR(data->rtc))
++              return PTR_ERR(data->rtc);
++
+       data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 
"regmap");
+ 
+       if (IS_ERR(data->regmap)) {
+@@ -342,10 +346,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+               goto error_rtc_device_register;
+       }
+ 
+-      data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+-                                      &snvs_rtc_ops, THIS_MODULE);
+-      if (IS_ERR(data->rtc)) {
+-              ret = PTR_ERR(data->rtc);
++      data->rtc->ops = &snvs_rtc_ops;
++      ret = rtc_register_device(data->rtc);
++      if (ret) {
+               dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
+               goto error_rtc_device_register;
+       }
+diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
+index bd70339c1242..03d9855a6afd 100644
+--- a/drivers/scsi/scsi_logging.c
++++ b/drivers/scsi/scsi_logging.c
+@@ -16,57 +16,15 @@
+ #include <scsi/scsi_eh.h>
+ #include <scsi/scsi_dbg.h>
+ 
+-#define SCSI_LOG_SPOOLSIZE 4096
+-
+-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+-#warning SCSI logging bitmask too large
+-#endif
+-
+-struct scsi_log_buf {
+-      char buffer[SCSI_LOG_SPOOLSIZE];
+-      unsigned long map;
+-};
+-
+-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+-
+ static char *scsi_log_reserve_buffer(size_t *len)
+ {
+-      struct scsi_log_buf *buf;
+-      unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+-      unsigned long idx = 0;
+-
+-      preempt_disable();
+-      buf = this_cpu_ptr(&scsi_format_log);
+-      idx = find_first_zero_bit(&buf->map, map_bits);
+-      if (likely(idx < map_bits)) {
+-              while (test_and_set_bit(idx, &buf->map)) {
+-                      idx = find_next_zero_bit(&buf->map, map_bits, idx);
+-                      if (idx >= map_bits)
+-                              break;
+-              }
+-      }
+-      if (WARN_ON(idx >= map_bits)) {
+-              preempt_enable();
+-              return NULL;
+-      }
+-      *len = SCSI_LOG_BUFSIZE;
+-      return buf->buffer + idx * SCSI_LOG_BUFSIZE;
++      *len = 128;
++      return kmalloc(*len, GFP_ATOMIC);
+ }
+ 
+ static void scsi_log_release_buffer(char *bufptr)
+ {
+-      struct scsi_log_buf *buf;
+-      unsigned long idx;
+-      int ret;
+-
+-      buf = this_cpu_ptr(&scsi_format_log);
+-      if (bufptr >= buf->buffer &&
+-          bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+-              idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+-              ret = test_and_clear_bit(idx, &buf->map);
+-              WARN_ON(!ret);
+-      }
+-      preempt_enable();
++      kfree(bufptr);
+ }
+ 
+ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 6f5cc67e343e..15b1cd4ef5a7 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -363,11 +363,20 @@ static void vfio_pci_disable(struct vfio_pci_device 
*vdev)
+       pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+ 
+       /*
+-       * Try to reset the device.  The success of this is dependent on
+-       * being able to lock the device, which is not always possible.
++       * Try to get the locks ourselves to prevent a deadlock. The
++       * success of this is dependent on being able to lock the device,
++       * which is not always possible.
++       * We can not use the "try" reset interface here, which will
++       * overwrite the previously restored configuration information.
+        */
+-      if (vdev->reset_works && !pci_try_reset_function(pdev))
+-              vdev->needs_reset = false;
++      if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
++              if (device_trylock(&pdev->dev)) {
++                      if (!__pci_reset_function_locked(pdev))
++                              vdev->needs_reset = false;
++                      device_unlock(&pdev->dev);
++              }
++              pci_cfg_access_unlock(pdev);
++      }
+ 
+       pci_restore_state(pdev);
+ out:
+diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
+index f599520374dd..5f7dbf1c4609 100644
+--- a/drivers/video/fbdev/ssd1307fb.c
++++ b/drivers/video/fbdev/ssd1307fb.c
+@@ -433,7 +433,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
+       if (ret < 0)
+               return ret;
+ 
+-      ret = ssd1307fb_write_cmd(par->client, 0x0);
++      ret = ssd1307fb_write_cmd(par->client, par->page_offset);
+       if (ret < 0)
+               return ret;
+ 
+diff --git a/fs/fat/dir.c b/fs/fat/dir.c
+index 81cecbe6d7cf..971e369517a7 100644
+--- a/fs/fat/dir.c
++++ b/fs/fat/dir.c
+@@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, 
sector_t blknr, int nr_used,
+                       err = -ENOMEM;
+                       goto error;
+               }
++              /* Avoid race with userspace read via bdev */
++              lock_buffer(bhs[n]);
+               memset(bhs[n]->b_data, 0, sb->s_blocksize);
+               set_buffer_uptodate(bhs[n]);
++              unlock_buffer(bhs[n]);
+               mark_buffer_dirty_inode(bhs[n], dir);
+ 
+               n++;
+@@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec 
*ts)
+       fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
+ 
+       de = (struct msdos_dir_entry *)bhs[0]->b_data;
++      /* Avoid race with userspace read via bdev */
++      lock_buffer(bhs[0]);
+       /* filling the new directory slots ("." and ".." entries) */
+       memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
+       memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
+@@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec 
*ts)
+       de[0].size = de[1].size = 0;
+       memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
+       set_buffer_uptodate(bhs[0]);
++      unlock_buffer(bhs[0]);
+       mark_buffer_dirty_inode(bhs[0], dir);
+ 
+       err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
+@@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void 
*slots, int nr_slots,
+ 
+                       /* fill the directory entry */
+                       copy = min(size, sb->s_blocksize);
++                      /* Avoid race with userspace read via bdev */
++                      lock_buffer(bhs[n]);
+                       memcpy(bhs[n]->b_data, slots, copy);
+-                      slots += copy;
+-                      size -= copy;
+                       set_buffer_uptodate(bhs[n]);
++                      unlock_buffer(bhs[n]);
+                       mark_buffer_dirty_inode(bhs[n], dir);
++                      slots += copy;
++                      size -= copy;
+                       if (!size)
+                               break;
+                       n++;
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
+index 9635df94db7d..24ed1f4e48ae 100644
+--- a/fs/fat/fatent.c
++++ b/fs/fat/fatent.c
+@@ -389,8 +389,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct 
buffer_head **bhs,
+                               err = -ENOMEM;
+                               goto error;
+                       }
++                      /* Avoid race with userspace read via bdev */
++                      lock_buffer(c_bh);
+                       memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
+                       set_buffer_uptodate(c_bh);
++                      unlock_buffer(c_bh);
+                       mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
+                       if (sb->s_flags & MS_SYNCHRONOUS)
+                               err = sync_dirty_buffer(c_bh);
+diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
+index 63d701cd1e2e..c8e9b7031d9a 100644
+--- a/fs/ocfs2/dlm/dlmunlock.c
++++ b/fs/ocfs2/dlm/dlmunlock.c
+@@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt 
*dlm,
+       enum dlm_status status;
+       int actions = 0;
+       int in_use;
+-        u8 owner;
++      u8 owner;
++      int recovery_wait = 0;
+ 
+       mlog(0, "master_node = %d, valblk = %d\n", master_node,
+            flags & LKM_VALBLK);
+@@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt 
*dlm,
+               }
+               if (flags & LKM_CANCEL)
+                       lock->cancel_pending = 0;
+-              else
+-                      lock->unlock_pending = 0;
+-
++              else {
++                      if (!lock->unlock_pending)
++                              recovery_wait = 1;
++                      else
++                              lock->unlock_pending = 0;
++              }
+       }
+ 
+       /* get an extra ref on lock.  if we are just switching
+@@ -244,6 +248,17 @@ leave:
+       spin_unlock(&res->spinlock);
+       wake_up(&res->wq);
+ 
++      if (recovery_wait) {
++              spin_lock(&res->spinlock);
++              /* Unlock request will directly succeed after owner dies,
++               * and the lock is already removed from grant list. We have to
++               * wait for RECOVERING done or we miss the chance to purge it
++               * since the removement is much faster than RECOVERING proc.
++               */
++              __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
++              spin_unlock(&res->spinlock);
++      }
++
+       /* let the caller's final dlm_lock_put handle the actual kfree */
+       if (actions & DLM_UNLOCK_FREE_LOCK) {
+               /* this should always be coupled with list removal */
+diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
+index 04e0679767f6..2b5dfae78272 100644
+--- a/include/scsi/scsi_dbg.h
++++ b/include/scsi/scsi_dbg.h
+@@ -6,8 +6,6 @@ struct scsi_cmnd;
+ struct scsi_device;
+ struct scsi_sense_hdr;
+ 
+-#define SCSI_LOG_BUFSIZE 128
+-
+ extern void scsi_print_command(struct scsi_cmnd *);
+ extern size_t __scsi_format_command(char *, size_t,
+                                  const unsigned char *, size_t);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 2d828d346982..59d2e94ecb79 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1067,20 +1067,26 @@ static int bpf_prog_load(union bpf_attr *attr)
+       if (err)
+               goto free_used_maps;
+ 
+-      err = bpf_prog_new_fd(prog);
+-      if (err < 0) {
+-              /* failed to allocate fd.
+-               * bpf_prog_put() is needed because the above
+-               * bpf_prog_alloc_id() has published the prog
+-               * to the userspace and the userspace may
+-               * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
+-               */
+-              bpf_prog_put(prog);
+-              return err;
+-      }
+-
++      /* Upon success of bpf_prog_alloc_id(), the BPF prog is
++       * effectively publicly exposed. However, retrieving via
++       * bpf_prog_get_fd_by_id() will take another reference,
++       * therefore it cannot be gone underneath us.
++       *
++       * Only for the time /after/ successful bpf_prog_new_fd()
++       * and before returning to userspace, we might just hold
++       * one reference and any parallel close on that fd could
++       * rip everything out. Hence, below notifications must
++       * happen before bpf_prog_new_fd().
++       *
++       * Also, any failure handling from this point onwards must
++       * be using bpf_prog_put() given the program is exposed.
++       */
+       bpf_prog_kallsyms_add(prog);
+       trace_bpf_prog_load(prog, err);
++
++      err = bpf_prog_new_fd(prog);
++      if (err < 0)
++              bpf_prog_put(prog);
+       return err;
+ 
+ free_used_maps:
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index 20fef1a38602..8f15665ab616 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -301,6 +301,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, 
unsigned int order)
+ {
+       struct page *pages;
+ 
++      if (fatal_signal_pending(current))
++              return NULL;
+       pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
+       if (pages) {
+               unsigned int count, i;
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 88754e9790f9..f8dc77b18962 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -941,6 +941,7 @@ err:
+       pr_warn("patch '%s' failed for module '%s', refusing to load module 
'%s'\n",
+               patch->mod->name, obj->mod->name, obj->mod->name);
+       mod->klp_alive = false;
++      obj->mod = NULL;
+       klp_cleanup_module_patches_limited(mod, patch);
+       mutex_unlock(&klp_mutex);
+ 
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 131d5871f8c9..e1df563cdfe7 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -570,7 +570,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+       int "Maximum kmemleak early log entries"
+       depends on DEBUG_KMEMLEAK
+       range 200 40000
+-      default 400
++      default 16000
+       help
+         Kmemleak must track all the memory allocations to avoid
+         reporting false positives. Since memory may be allocated or
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c8d39092e8bf..5f466db916ee 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1561,8 +1561,6 @@ static void __sk_destruct(struct rcu_head *head)
+               sk_filter_uncharge(sk, filter);
+               RCU_INIT_POINTER(sk->sk_filter, NULL);
+       }
+-      if (rcu_access_pointer(sk->sk_reuseport_cb))
+-              reuseport_detach_sock(sk);
+ 
+       sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
+ 
+@@ -1585,7 +1583,14 @@ static void __sk_destruct(struct rcu_head *head)
+ 
+ void sk_destruct(struct sock *sk)
+ {
+-      if (sock_flag(sk, SOCK_RCU_FREE))
++      bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
++
++      if (rcu_access_pointer(sk->sk_reuseport_cb)) {
++              reuseport_detach_sock(sk);
++              use_call_rcu = true;
++      }
++
++      if (use_call_rcu)
+               call_rcu(&sk->sk_rcu, __sk_destruct);
+       else
+               __sk_destruct(&sk->sk_rcu);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 0fc499db6da2..347be2ea78d4 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1424,6 +1424,7 @@ nla_put_failure:
+ static void erspan_setup(struct net_device *dev)
+ {
+       ether_setup(dev);
++      dev->max_mtu = 0;
+       dev->netdev_ops = &erspan_netdev_ops;
+       dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 3b72990a8bb9..5a1cffb769fd 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -925,16 +925,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+       if (peer->rate_tokens == 0 ||
+           time_after(jiffies,
+                      (peer->rate_last +
+-                      (ip_rt_redirect_load << peer->rate_tokens)))) {
++                      (ip_rt_redirect_load << peer->n_redirects)))) {
+               __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
+ 
+               icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+               peer->rate_last = jiffies;
+-              ++peer->rate_tokens;
+               ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+               if (log_martians &&
+-                  peer->rate_tokens == ip_rt_redirect_number)
++                  peer->n_redirects == ip_rt_redirect_number)
+                       net_warn_ratelimited("host %pI4/if%d ignores redirects 
for %pI4 to %pI4\n",
+                                            &ip_hdr(skb)->saddr, inet_iif(skb),
+                                            &ip_hdr(skb)->daddr, &gw);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index c47161e92407..a81201dd3a1a 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5547,13 +5547,20 @@ static void __ipv6_ifa_notify(int event, struct 
inet6_ifaddr *ifp)
+       switch (event) {
+       case RTM_NEWADDR:
+               /*
+-               * If the address was optimistic
+-               * we inserted the route at the start of
+-               * our DAD process, so we don't need
+-               * to do it again
++               * If the address was optimistic we inserted the route at the
++               * start of our DAD process, so we don't need to do it again.
++               * If the device was taken down in the middle of the DAD
++               * cycle there is a race where we could get here without a
++               * host route, so nothing to insert. That will be fixed when
++               * the device is brought up.
+                */
+-              if (!rcu_access_pointer(ifp->rt->rt6i_node))
++              if (ifp->rt && !rcu_access_pointer(ifp->rt->rt6i_node)) {
+                       ip6_ins_rt(ifp->rt);
++              } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
++                      pr_warn("BUG: Address %pI6c on device %s is missing its 
host route.\n",
++                              &ifp->addr, ifp->idev->dev->name);
++              }
++
+               if (ifp->idev->cnf.forwarding)
+                       addrconf_join_anycast(ifp);
+               if (!ipv6_addr_any(&ifp->peer_addr))
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index 9ee208a348f5..e41070fb4fc0 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -173,6 +173,16 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, 
struct packet_type *pt
+       if (ipv6_addr_is_multicast(&hdr->saddr))
+               goto err;
+ 
++      /* While RFC4291 is not explicit about v4mapped addresses
++       * in IPv6 headers, it seems clear linux dual-stack
++       * model can not deal properly with these.
++       * Security models could be fooled by ::ffff:127.0.0.1 for example.
++       *
++       * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
++       */
++      if (ipv6_addr_v4mapped(&hdr->saddr))
++              goto err;
++
+       skb->transport_header = skb->network_header + sizeof(*hdr);
+       IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+ 
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 33ad7e25a89d..a51bfba19b9e 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -119,9 +119,14 @@ static int llcp_sock_bind(struct socket *sock, struct 
sockaddr *addr, int alen)
+       llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+                                         llcp_sock->service_name_len,
+                                         GFP_KERNEL);
+-
++      if (!llcp_sock->service_name) {
++              ret = -ENOMEM;
++              goto put_dev;
++      }
+       llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+       if (llcp_sock->ssap == LLCP_SAP_MAX) {
++              kfree(llcp_sock->service_name);
++              llcp_sock->service_name = NULL;
+               ret = -EADDRINUSE;
+               goto put_dev;
+       }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 08ed6abe4aae..5f2acd029da5 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, 
struct genl_info *info)
+       int rc;
+       u32 idx;
+ 
+-      if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++      if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++          !info->attrs[NFC_ATTR_TARGET_INDEX])
+               return -EINVAL;
+ 
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+@@ -1019,7 +1020,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, 
struct genl_info *info)
+       struct sk_buff *msg = NULL;
+       u32 idx;
+ 
+-      if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++      if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++          !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+               return -EINVAL;
+ 
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index c21eb4850b9d..e723146cec29 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -140,6 +140,9 @@ static void rds_ib_add_one(struct ib_device *device)
+       refcount_set(&rds_ibdev->refcount, 1);
+       INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+ 
++      INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
++      INIT_LIST_HEAD(&rds_ibdev->conn_list);
++
+       rds_ibdev->max_wrs = device->attrs.max_qp_wr;
+       rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
+ 
+@@ -199,9 +202,6 @@ static void rds_ib_add_one(struct ib_device *device)
+               device->name,
+               rds_ibdev->use_fastreg ? "FRMR" : "FMR");
+ 
+-      INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+-      INIT_LIST_HEAD(&rds_ibdev->conn_list);
+-
+       down_write(&rds_ib_devices_lock);
+       list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+       up_write(&rds_ib_devices_lock);
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index aeffa320429d..40fd1ee0095c 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1131,6 +1131,26 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 
1] = {
+       [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
+ };
+ 
++static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr 
*opt)
++{
++      int err;
++
++      if (!opt)
++              return -EINVAL;
++
++      err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
++      if (err < 0)
++              return err;
++
++      if (tb[TCA_CBQ_WRROPT]) {
++              const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
++
++              if (wrr->priority > TC_CBQ_MAXPRIO)
++                      err = -EINVAL;
++      }
++      return err;
++}
++
+ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+ {
+       struct cbq_sched_data *q = qdisc_priv(sch);
+@@ -1142,10 +1162,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr 
*opt)
+       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+       q->delay_timer.function = cbq_undelay;
+ 
+-      if (!opt)
+-              return -EINVAL;
+-
+-      err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
++      err = cbq_opt_parse(tb, opt);
+       if (err < 0)
+               return err;
+ 
+@@ -1459,10 +1476,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 
parentid, struct nlattr **t
+       struct cbq_class *parent;
+       struct qdisc_rate_table *rtab = NULL;
+ 
+-      if (opt == NULL)
+-              return -EINVAL;
+-
+-      err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
++      err = cbq_opt_parse(tb, opt);
+       if (err < 0)
+               return err;
+ 
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index 2836c80c7aa5..b507a72d5813 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -353,6 +353,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr 
*opt)
+               goto errout;
+ 
+       err = -EINVAL;
++      if (!tb[TCA_DSMARK_INDICES])
++              goto errout;
+       indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
+ 
+       if (hweight32(indices) != 1)
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index ac0144f532aa..631bfc7e9127 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -157,6 +157,7 @@ struct tipc_link {
+       struct {
+               u16 len;
+               u16 limit;
++              struct sk_buff *target_bskb;
+       } backlog[5];
+       u16 snd_nxt;
+       u16 last_retransm;
+@@ -826,6 +827,8 @@ void link_prepare_wakeup(struct tipc_link *l)
+ 
+ void tipc_link_reset(struct tipc_link *l)
+ {
++      u32 imp;
++
+       l->peer_session = ANY_SESSION;
+       l->session++;
+       l->mtu = l->advertised_mtu;
+@@ -833,11 +836,10 @@ void tipc_link_reset(struct tipc_link *l)
+       __skb_queue_purge(&l->deferdq);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       __skb_queue_purge(&l->backlogq);
+-      l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+-      l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+-      l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+-      l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+-      l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
++      for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
++              l->backlog[imp].len = 0;
++              l->backlog[imp].target_bskb = NULL;
++      }
+       kfree_skb(l->reasm_buf);
+       kfree_skb(l->failover_reasm_skb);
+       l->reasm_buf = NULL;
+@@ -876,7 +878,7 @@ int tipc_link_xmit(struct tipc_link *l, struct 
sk_buff_head *list,
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       struct sk_buff_head *transmq = &l->transmq;
+       struct sk_buff_head *backlogq = &l->backlogq;
+-      struct sk_buff *skb, *_skb, *bskb;
++      struct sk_buff *skb, *_skb, **tskb;
+       int pkt_cnt = skb_queue_len(list);
+       int rc = 0;
+ 
+@@ -922,19 +924,21 @@ int tipc_link_xmit(struct tipc_link *l, struct 
sk_buff_head *list,
+                       seqno++;
+                       continue;
+               }
+-              if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
++              tskb = &l->backlog[imp].target_bskb;
++              if (tipc_msg_bundle(*tskb, hdr, mtu)) {
+                       kfree_skb(__skb_dequeue(list));
+                       l->stats.sent_bundled++;
+                       continue;
+               }
+-              if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
++              if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
+                       kfree_skb(__skb_dequeue(list));
+-                      __skb_queue_tail(backlogq, bskb);
+-                      l->backlog[msg_importance(buf_msg(bskb))].len++;
++                      __skb_queue_tail(backlogq, *tskb);
++                      l->backlog[imp].len++;
+                       l->stats.sent_bundled++;
+                       l->stats.sent_bundles++;
+                       continue;
+               }
++              l->backlog[imp].target_bskb = NULL;
+               l->backlog[imp].len += skb_queue_len(list);
+               skb_queue_splice_tail_init(list, backlogq);
+       }
+@@ -949,6 +953,7 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct 
sk_buff_head *xmitq)
+       u16 seqno = l->snd_nxt;
+       u16 ack = l->rcv_nxt - 1;
+       u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
++      u32 imp;
+ 
+       while (skb_queue_len(&l->transmq) < l->window) {
+               skb = skb_peek(&l->backlogq);
+@@ -959,7 +964,10 @@ void tipc_link_advance_backlog(struct tipc_link *l, 
struct sk_buff_head *xmitq)
+                       break;
+               __skb_dequeue(&l->backlogq);
+               hdr = buf_msg(skb);
+-              l->backlog[msg_importance(hdr)].len--;
++              imp = msg_importance(hdr);
++              l->backlog[imp].len--;
++              if (unlikely(skb == l->backlog[imp].target_bskb))
++                      l->backlog[imp].target_bskb = NULL;
+               __skb_queue_tail(&l->transmq, skb);
+               __skb_queue_tail(xmitq, _skb);
+               TIPC_SKB_CB(skb)->ackers = l->ackers;
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 17146c16ee2d..e38396025874 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -456,10 +456,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct 
tipc_msg *msg,
+       bmsg = buf_msg(_skb);
+       tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+                     INT_H_SIZE, dnode);
+-      if (msg_isdata(msg))
+-              msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+-      else
+-              msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
++      msg_set_importance(bmsg, msg_importance(msg));
+       msg_set_seqno(bmsg, msg_seqno(msg));
+       msg_set_ack(bmsg, msg_ack(msg));
+       msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index b41170417316..148c949cdfe7 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -648,7 +648,7 @@ struct sock *__vsock_create(struct net *net,
+ }
+ EXPORT_SYMBOL_GPL(__vsock_create);
+ 
+-static void __vsock_release(struct sock *sk)
++static void __vsock_release(struct sock *sk, int level)
+ {
+       if (sk) {
+               struct sk_buff *skb;
+@@ -658,9 +658,17 @@ static void __vsock_release(struct sock *sk)
+               vsk = vsock_sk(sk);
+               pending = NULL; /* Compiler warning. */
+ 
++              /* The release call is supposed to use lock_sock_nested()
++               * rather than lock_sock(), if a sock lock should be acquired.
++               */
+               transport->release(vsk);
+ 
+-              lock_sock(sk);
++              /* When "level" is SINGLE_DEPTH_NESTING, use the nested
++               * version to avoid the warning "possible recursive locking
++               * detected". When "level" is 0, lock_sock_nested(sk, level)
++               * is the same as lock_sock(sk).
++               */
++              lock_sock_nested(sk, level);
+               sock_orphan(sk);
+               sk->sk_shutdown = SHUTDOWN_MASK;
+ 
+@@ -669,7 +677,7 @@ static void __vsock_release(struct sock *sk)
+ 
+               /* Clean up any sockets that never were accepted. */
+               while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+-                      __vsock_release(pending);
++                      __vsock_release(pending, SINGLE_DEPTH_NESTING);
+                       sock_put(pending);
+               }
+ 
+@@ -718,7 +726,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+ 
+ static int vsock_release(struct socket *sock)
+ {
+-      __vsock_release(sock->sk);
++      __vsock_release(sock->sk, 0);
+       sock->sk = NULL;
+       sock->state = SS_FREE;
+ 
+diff --git a/net/vmw_vsock/hyperv_transport.c 
b/net/vmw_vsock/hyperv_transport.c
+index ec72a5edaa1b..6614512f8180 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -539,7 +539,7 @@ static void hvs_release(struct vsock_sock *vsk)
+       struct sock *sk = sk_vsock(vsk);
+       bool remove_sock;
+ 
+-      lock_sock(sk);
++      lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+       remove_sock = hvs_close_lock_held(vsk);
+       release_sock(sk);
+       if (remove_sock)
+diff --git a/net/vmw_vsock/virtio_transport_common.c 
b/net/vmw_vsock/virtio_transport_common.c
+index f3f3d06cb6d8..a8eb0657c1e8 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -791,7 +791,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
+       struct sock *sk = &vsk->sk;
+       bool remove_sock = true;
+ 
+-      lock_sock(sk);
++      lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+       if (sk->sk_type == SOCK_STREAM)
+               remove_sock = virtio_transport_close(vsk);
+ 
+diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
+index 1a3004189447..a9c20821a726 100644
+--- a/security/smack/smack_access.c
++++ b/security/smack/smack_access.c
+@@ -469,7 +469,7 @@ char *smk_parse_smack(const char *string, int len)
+       if (i == 0 || i >= SMK_LONGLABEL)
+               return ERR_PTR(-EINVAL);
+ 
+-      smack = kzalloc(i + 1, GFP_KERNEL);
++      smack = kzalloc(i + 1, GFP_NOFS);
+       if (smack == NULL)
+               return ERR_PTR(-ENOMEM);
+ 
+@@ -504,7 +504,7 @@ int smk_netlbl_mls(int level, char *catset, struct 
netlbl_lsm_secattr *sap,
+                       if ((m & *cp) == 0)
+                               continue;
+                       rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+-                                                cat, GFP_KERNEL);
++                                                cat, GFP_NOFS);
+                       if (rc < 0) {
+                               netlbl_catmap_free(sap->attr.mls.cat);
+                               return rc;
+@@ -540,7 +540,7 @@ struct smack_known *smk_import_entry(const char *string, 
int len)
+       if (skp != NULL)
+               goto freeout;
+ 
+-      skp = kzalloc(sizeof(*skp), GFP_KERNEL);
++      skp = kzalloc(sizeof(*skp), GFP_NOFS);
+       if (skp == NULL) {
+               skp = ERR_PTR(-ENOMEM);
+               goto freeout;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 0d5ce7190b17..a0e1b99212b2 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -269,7 +269,7 @@ static struct smack_known *smk_fetch(const char *name, 
struct inode *ip,
+       if (!(ip->i_opflags & IOP_XATTR))
+               return ERR_PTR(-EOPNOTSUPP);
+ 
+-      buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
++      buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
+       if (buffer == NULL)
+               return ERR_PTR(-ENOMEM);
+ 
+@@ -944,7 +944,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
+ 
+               if (rc != 0)
+                       return rc;
+-      } else if (bprm->unsafe)
++      }
++      if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
+               return -EPERM;
+ 
+       bsp->smk_task = isp->smk_task;
+@@ -4031,6 +4032,8 @@ access_check:
+                       skp = smack_ipv6host_label(&sadd);
+               if (skp == NULL)
+                       skp = smack_net_ambient;
++              if (skb == NULL)
++                      break;
+ #ifdef CONFIG_AUDIT
+               smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+               ad.a.u.net->family = family;
+diff --git a/usr/Makefile b/usr/Makefile
+index 237a028693ce..5f1bc5b23b14 100644
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
+ datafile_d_y = .$(datafile_y).d
+ AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+ 
++# clean rules do not have CONFIG_INITRAMFS_COMPRESSION.  So clean up after all
++# possible compression formats.
++clean-files += initramfs_data.cpio*
+ 
+ # Generate builtin.o based on initramfs_data.o
+ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o

Reply via email to