diff --git a/Makefile b/Makefile
index feecefa13ca6..93c3467eeb8c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 14
-SUBLEVEL = 148
+SUBLEVEL = 149
 EXTRAVERSION =
 NAME = Petit Gorille
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3b7488fce3db..7de26809340a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1356,7 +1356,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, 
u64 id,
                *val = get_reg_val(id, vcpu->arch.pspb);
                break;
        case KVM_REG_PPC_DPDES:
-               *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+               /*
+                * On POWER9, where we are emulating msgsndp etc.,
+                * we return 1 bit for each vcpu, which can come from
+                * either vcore->dpdes or doorbell_request.
+                * On POWER8, doorbell_request is 0.
+                */
+               *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
+                                  vcpu->arch.doorbell_request);
                break;
        case KVM_REG_PPC_VTB:
                *val = get_reg_val(id, vcpu->arch.vcore->vtb);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 3c75eee45edf..46f99fc1901c 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1001,20 +1001,22 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
        /* Mask the VP IPI */
        xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
 
-       /* Disable the VP */
-       xive_native_disable_vp(xc->vp_id);
-
-       /* Free the queues & associated interrupts */
+       /* Free escalations */
        for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
-               struct xive_q *q = &xc->queues[i];
-
-               /* Free the escalation irq */
                if (xc->esc_virq[i]) {
                        free_irq(xc->esc_virq[i], vcpu);
                        irq_dispose_mapping(xc->esc_virq[i]);
                        kfree(xc->esc_virq_names[i]);
                }
-               /* Free the queue */
+       }
+
+       /* Disable the VP */
+       xive_native_disable_vp(xc->vp_id);
+
+       /* Free the queues */
+       for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+               struct xive_q *q = &xc->queues[i];
+
                xive_native_disable_queue(xc->vp_id, q, i);
                if (q->qpage) {
                        free_pages((unsigned long)q->qpage,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 87687e46b48b..58c14749bb0c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -35,6 +35,7 @@
 #include <linux/memblock.h>
 #include <linux/context_tracking.h>
 #include <linux/libfdt.h>
+#include <linux/cpu.h>
 
 #include <asm/debugfs.h>
 #include <asm/processor.h>
@@ -1852,10 +1853,16 @@ static int hpt_order_get(void *data, u64 *val)
 
 static int hpt_order_set(void *data, u64 val)
 {
+       int ret;
+
        if (!mmu_hash_ops.resize_hpt)
                return -ENODEV;
 
-       return mmu_hash_ops.resize_hpt(val);
+       cpus_read_lock();
+       ret = mmu_hash_ops.resize_hpt(val);
+       cpus_read_unlock();
+
+       return ret;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, 
"%llu\n");
diff --git a/arch/powerpc/platforms/powernv/opal.c 
b/arch/powerpc/platforms/powernv/opal.c
index c8a743af6bf5..597fcbf7a39e 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -617,7 +617,10 @@ static ssize_t symbol_map_read(struct file *fp, struct 
kobject *kobj,
                                       bin_attr->size);
 }
 
-static BIN_ATTR_RO(symbol_map, 0);
+static struct bin_attribute symbol_map_attr = {
+       .attr = {.name = "symbol_map", .mode = 0400},
+       .read = symbol_map_read
+};
 
 static void opal_export_symmap(void)
 {
@@ -634,10 +637,10 @@ static void opal_export_symmap(void)
                return;
 
        /* Setup attributes */
-       bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
-       bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+       symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
+       symbol_map_attr.size = be64_to_cpu(syms[1]);
 
-       rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+       rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
        if (rc)
                pr_warn("Error %d creating OPAL symbols file\n", rc);
 }
diff --git a/arch/powerpc/platforms/pseries/lpar.c 
b/arch/powerpc/platforms/pseries/lpar.c
index 55e97565ed2d..eb738ef57792 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -643,7 +643,10 @@ static int pseries_lpar_resize_hpt_commit(void *data)
        return 0;
 }
 
-/* Must be called in user context */
+/*
+ * Must be called in process context. The caller must hold the
+ * cpus_lock.
+ */
 static int pseries_lpar_resize_hpt(unsigned long shift)
 {
        struct hpt_resize_state state = {
@@ -699,7 +702,8 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
 
        t1 = ktime_get();
 
-       rc = stop_machine(pseries_lpar_resize_hpt_commit, &state, NULL);
+       rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
+                                    &state, NULL);
 
        t2 = ktime_get();
 
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7d4c5500c6c2..1bb7ea6afcf9 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -185,20 +185,30 @@ unsigned long get_wchan(struct task_struct *p)
 
        if (!p || p == current || p->state == TASK_RUNNING || 
!task_stack_page(p))
                return 0;
+
+       if (!try_get_task_stack(p))
+               return 0;
+
        low = task_stack_page(p);
        high = (struct stack_frame *) task_pt_regs(p);
        sf = (struct stack_frame *) p->thread.ksp;
-       if (sf <= low || sf > high)
-               return 0;
+       if (sf <= low || sf > high) {
+               return_address = 0;
+               goto out;
+       }
        for (count = 0; count < 16; count++) {
                sf = (struct stack_frame *) sf->back_chain;
-               if (sf <= low || sf > high)
-                       return 0;
+               if (sf <= low || sf > high) {
+                       return_address = 0;
+                       goto out;
+               }
                return_address = sf->gprs[8];
                if (!in_sched_functions(return_address))
-                       return return_address;
+                       goto out;
        }
-       return 0;
+out:
+       put_task_stack(p);
+       return return_address;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index ed0bdd220e1a..bd074acd0d1f 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -300,7 +300,8 @@ int arch_update_cpu_topology(void)
        rc = __arch_update_cpu_topology();
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
-               kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+               if (dev)
+                       kobject_uevent(&dev->kobj, KOBJ_CHANGE);
        }
        return rc;
 }
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index a27fb640adbe..cab41bc2572f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3658,7 +3658,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
        const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
                                    | KVM_S390_MEMOP_F_CHECK_ONLY;
 
-       if (mop->flags & ~supported_flags)
+       if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
                return -EINVAL;
 
        if (mop->size > MEM_OP_MAX_SIZE)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a5a77a19adf6..7784b02312ca 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8026,7 +8026,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                /* _system ok, nested_vmx_check_permission has verified cpl=0 */
                if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
                                                (is_long_mode(vcpu) ? 8 : 4),
-                                               NULL))
+                                               &e))
                        kvm_inject_page_fault(vcpu, &e);
        }
 
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index c5501404f145..915bbae3d5bc 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int 
len)
        return max(start, end_page);
 }
 
-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
        u8 *addr;
 
@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct skcipher_walk 
*walk, unsigned int bsize)
        addr = skcipher_get_spot(addr, bsize);
        scatterwalk_copychunks(addr, &walk->out, bsize,
                               (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+       return 0;
 }
 
 int skcipher_walk_done(struct skcipher_walk *walk, int err)
 {
-       unsigned int n; /* bytes processed */
-       bool more;
+       unsigned int n = walk->nbytes;
+       unsigned int nbytes = 0;
 
-       if (unlikely(err < 0))
+       if (!n)
                goto finish;
 
-       n = walk->nbytes - err;
-       walk->total -= n;
-       more = (walk->total != 0);
+       if (likely(err >= 0)) {
+               n -= err;
+               nbytes = walk->total - n;
+       }
 
        if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
                                    SKCIPHER_WALK_SLOW |
@@ -131,7 +133,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
                memcpy(walk->dst.virt.addr, walk->page, n);
                skcipher_unmap_dst(walk);
        } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-               if (err) {
+               if (err > 0) {
                        /*
                         * Didn't process all bytes.  Either the algorithm is
                         * broken, or this was the last step and it turned out
@@ -139,27 +141,29 @@ int skcipher_walk_done(struct skcipher_walk *walk, int 
err)
                         * the algorithm requires it.
                         */
                        err = -EINVAL;
-                       goto finish;
-               }
-               skcipher_done_slow(walk, n);
-               goto already_advanced;
+                       nbytes = 0;
+               } else
+                       n = skcipher_done_slow(walk, n);
        }
 
+       if (err > 0)
+               err = 0;
+
+       walk->total = nbytes;
+       walk->nbytes = 0;
+
        scatterwalk_advance(&walk->in, n);
        scatterwalk_advance(&walk->out, n);
-already_advanced:
-       scatterwalk_done(&walk->in, 0, more);
-       scatterwalk_done(&walk->out, 1, more);
+       scatterwalk_done(&walk->in, 0, nbytes);
+       scatterwalk_done(&walk->out, 1, nbytes);
 
-       if (more) {
+       if (nbytes) {
                crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
                             CRYPTO_TFM_REQ_MAY_SLEEP : 0);
                return skcipher_walk_next(walk);
        }
-       err = 0;
-finish:
-       walk->nbytes = 0;
 
+finish:
        /* Short-circuit for the common/fast path. */
        if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
                goto out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e4b049f281f5..a23460084955 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -106,6 +106,7 @@ struct nbd_device {
        struct nbd_config *config;
        struct mutex config_lock;
        struct gendisk *disk;
+       struct workqueue_struct *recv_workq;
 
        struct list_head list;
        struct task_struct *task_recv;
@@ -132,9 +133,10 @@ static struct dentry *nbd_dbg_dir;
 
 #define NBD_MAGIC 0x68797548
 
+#define NBD_DEF_BLKSIZE 1024
+
 static unsigned int nbds_max = 16;
 static int max_part = 16;
-static struct workqueue_struct *recv_workqueue;
 static int part_shift;
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -1013,7 +1015,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, 
unsigned long arg)
                /* We take the tx_mutex in an error path in the recv_work, so we
                 * need to queue_work outside of the tx_mutex.
                 */
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
 
                atomic_inc(&config->live_connections);
                wake_up(&config->conn_wait);
@@ -1118,6 +1120,10 @@ static void nbd_config_put(struct nbd_device *nbd)
                kfree(nbd->config);
                nbd->config = NULL;
 
+               if (nbd->recv_workq)
+                       destroy_workqueue(nbd->recv_workq);
+               nbd->recv_workq = NULL;
+
                nbd->tag_set.timeout = 0;
                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
@@ -1143,6 +1149,14 @@ static int nbd_start_device(struct nbd_device *nbd)
                return -EINVAL;
        }
 
+       nbd->recv_workq = alloc_workqueue("knbd%d-recv",
+                                         WQ_MEM_RECLAIM | WQ_HIGHPRI |
+                                         WQ_UNBOUND, 0, nbd->index);
+       if (!nbd->recv_workq) {
+               dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv 
work queue.\n");
+               return -ENOMEM;
+       }
+
        blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
        nbd->task_recv = current;
 
@@ -1173,7 +1187,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                INIT_WORK(&args->work, recv_work);
                args->nbd = nbd;
                args->index = i;
-               queue_work(recv_workqueue, &args->work);
+               queue_work(nbd->recv_workq, &args->work);
        }
        nbd_size_update(nbd);
        return error;
@@ -1193,8 +1207,10 @@ static int nbd_start_device_ioctl(struct nbd_device 
*nbd, struct block_device *b
        mutex_unlock(&nbd->config_lock);
        ret = wait_event_interruptible(config->recv_wq,
                                         atomic_read(&config->recv_threads) == 
0);
-       if (ret)
+       if (ret) {
                sock_shutdown(nbd);
+               flush_workqueue(nbd->recv_workq);
+       }
        mutex_lock(&nbd->config_lock);
        bd_set_size(bdev, 0);
        /* user requested, ignore socket errors */
@@ -1216,6 +1232,14 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
                nbd_config_put(nbd);
 }
 
+static bool nbd_is_valid_blksize(unsigned long blksize)
+{
+       if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
+           blksize > PAGE_SIZE)
+               return false;
+       return true;
+}
+
 /* Must be called with config_lock held */
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                       unsigned int cmd, unsigned long arg)
@@ -1231,8 +1255,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct 
nbd_device *nbd,
        case NBD_SET_SOCK:
                return nbd_add_socket(nbd, arg, false);
        case NBD_SET_BLKSIZE:
-               if (!arg || !is_power_of_2(arg) || arg < 512 ||
-                   arg > PAGE_SIZE)
+               if (!arg)
+                       arg = NBD_DEF_BLKSIZE;
+               if (!nbd_is_valid_blksize(arg))
                        return -EINVAL;
                nbd_size_set(nbd, arg,
                             div_s64(config->bytesize, arg));
@@ -1312,7 +1337,7 @@ static struct nbd_config *nbd_alloc_config(void)
        atomic_set(&config->recv_threads, 0);
        init_waitqueue_head(&config->recv_wq);
        init_waitqueue_head(&config->conn_wait);
-       config->blksize = 1024;
+       config->blksize = NBD_DEF_BLKSIZE;
        atomic_set(&config->live_connections, 0);
        try_module_get(THIS_MODULE);
        return config;
@@ -1744,6 +1769,12 @@ static int nbd_genl_connect(struct sk_buff *skb, struct 
genl_info *info)
        if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
                u64 bsize =
                        nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
+               if (!bsize)
+                       bsize = NBD_DEF_BLKSIZE;
+               if (!nbd_is_valid_blksize(bsize)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
        }
        if (info->attrs[NBD_ATTR_TIMEOUT]) {
@@ -1819,6 +1850,12 @@ static void nbd_disconnect_and_put(struct nbd_device 
*nbd)
        mutex_lock(&nbd->config_lock);
        nbd_disconnect(nbd);
        mutex_unlock(&nbd->config_lock);
+       /*
+        * Make sure recv thread has finished, so it does not drop the last
+        * config ref and try to destroy the workqueue from inside the work
+        * queue.
+        */
+       flush_workqueue(nbd->recv_workq);
        if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
                nbd_config_put(nbd);
@@ -2199,19 +2236,12 @@ static int __init nbd_init(void)
 
        if (nbds_max > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
-       recv_workqueue = alloc_workqueue("knbd-recv",
-                                        WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
-       if (!recv_workqueue)
-               return -ENOMEM;
 
-       if (register_blkdev(NBD_MAJOR, "nbd")) {
-               destroy_workqueue(recv_workqueue);
+       if (register_blkdev(NBD_MAJOR, "nbd"))
                return -EIO;
-       }
 
        if (genl_register_family(&nbd_genl_family)) {
                unregister_blkdev(NBD_MAJOR, "nbd");
-               destroy_workqueue(recv_workqueue);
                return -EINVAL;
        }
        nbd_dbg_init();
@@ -2253,7 +2283,6 @@ static void __exit nbd_cleanup(void)
 
        idr_destroy(&nbd_index_idr);
        genl_unregister_family(&nbd_genl_family);
-       destroy_workqueue(recv_workqueue);
        unregister_blkdev(NBD_MAJOR, "nbd");
 }
 
diff --git a/drivers/crypto/caam/caamalg_desc.c 
b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..b23c7b72525c 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -476,6 +476,7 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct 
alginfo *cdata,
                               const bool is_qi)
 {
        u32 geniv, moveiv;
+       u32 *wait_cmd;
 
        /* Note: Context registers are saved. */
        init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
@@ -566,6 +567,14 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct 
alginfo *cdata,
 
        /* Will read cryptlen */
        append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+       /*
+        * Wait for IV transfer (ofifo -> class2) to finish before starting
+        * ciphertext transfer (ofifo -> external memory).
+        */
+       wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+       set_jump_tgt_here(desc, wait_cmd);
+
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
                             FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
diff --git a/drivers/crypto/caam/caamalg_desc.h 
b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..9197b55d708a 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -12,7 +12,7 @@
 #define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
 #define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
 #define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN           (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_ENC_LEN           (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_DEC_LEN           (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
 #define DESC_QI_AEAD_GIVENC_LEN                (DESC_AEAD_GIVENC_LEN + 3 * 
CAAM_CMD_SZ)
diff --git a/drivers/crypto/cavium/zip/zip_main.c 
b/drivers/crypto/cavium/zip/zip_main.c
index a6425a7afa7b..279ffdd8b53d 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -595,6 +595,7 @@ static const struct file_operations zip_stats_fops = {
        .owner = THIS_MODULE,
        .open  = zip_stats_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 static int zip_clear_open(struct inode *inode, struct file *file)
@@ -606,6 +607,7 @@ static const struct file_operations zip_clear_fops = {
        .owner = THIS_MODULE,
        .open  = zip_clear_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 static int zip_regs_open(struct inode *inode, struct file *file)
@@ -617,6 +619,7 @@ static const struct file_operations zip_regs_fops = {
        .owner = THIS_MODULE,
        .open  = zip_regs_open,
        .read  = seq_read,
+       .release = single_release,
 };
 
 /* Root directory for thunderx_zip debugfs entry */
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h 
b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5c4c0a253129..d78f8d5c89c3 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ struct service_hndl {
 
 static inline int get_current_node(void)
 {
-       return topology_physical_package_id(smp_processor_id());
+       return topology_physical_package_id(raw_smp_processor_id());
 }
 
 int adf_service_register(struct service_hndl *service);
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index ae712159246f..6627a7dce95c 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -485,11 +485,11 @@ static int tegra_devfreq_target(struct device *dev, 
unsigned long *freq,
 {
        struct tegra_devfreq *tegra = dev_get_drvdata(dev);
        struct dev_pm_opp *opp;
-       unsigned long rate = *freq * KHZ;
+       unsigned long rate;
 
-       opp = devfreq_recommended_opp(dev, &rate, flags);
+       opp = devfreq_recommended_opp(dev, freq, flags);
        if (IS_ERR(opp)) {
-               dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+               dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
                return PTR_ERR(opp);
        }
        rate = dev_pm_opp_get_freq(opp);
@@ -498,8 +498,6 @@ static int tegra_devfreq_target(struct device *dev, 
unsigned long *freq,
        clk_set_min_rate(tegra->emc_clock, rate);
        clk_set_rate(tegra->emc_clock, 0);
 
-       *freq = rate;
-
        return 0;
 }
 
@@ -509,7 +507,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
        struct tegra_devfreq *tegra = dev_get_drvdata(dev);
        struct tegra_devfreq_device *actmon_dev;
 
-       stat->current_frequency = tegra->cur_freq;
+       stat->current_frequency = tegra->cur_freq * KHZ;
 
        /* To be used by the tegra governor */
        stat->private_data = tegra;
@@ -564,7 +562,7 @@ static int tegra_governor_get_target(struct devfreq 
*devfreq,
                target_freq = max(target_freq, dev->target_freq);
        }
 
-       *freq = target_freq;
+       *freq = target_freq * KHZ;
 
        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e16229000a98..884ed359f249 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -540,6 +540,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
                if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
                        sh_num = 0xffffffff;
 
+               if (info->read_mmr_reg.count > 128)
+                       return -EINVAL;
+
                regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), 
GFP_KERNEL);
                if (!regs)
                        return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c 
b/drivers/gpu/drm/omapdrm/dss/dss.c
index 41ebb37aaa79..844e1142a122 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1100,7 +1100,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
        .model                  =       DSS_MODEL_OMAP3,
-       .fck_div_max            =       32,
+       .fck_div_max            =       31,
        .fck_freq_max           =       173000000,
        .dss_fck_multiplier     =       1,
        .parent_clk_name        =       "dpll4_ck",
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c 
b/drivers/hwtracing/coresight/coresight-etm4x.c
index 78cb3b8881fa..b0141ba7b741 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -181,6 +181,12 @@ static void etm4_enable_hw(void *info)
        if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
                dev_err(drvdata->dev,
                        "timeout while waiting for Idle Trace Status\n");
+       /*
+        * As recommended by section 4.3.7 ("Synchronization when using the
+        * memory-mapped interface") of ARM IHI 0064D
+        */
+       dsb(sy);
+       isb();
 
        CS_LOCK(drvdata->base);
 
@@ -331,8 +337,12 @@ static void etm4_disable_hw(void *info)
        /* EN, bit[0] Trace unit enable bit */
        control &= ~0x1;
 
-       /* make sure everything completes before disabling */
-       mb();
+       /*
+        * Make sure everything completes before disabling, as recommended
+        * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+        * SSTATUS") of ARM IHI 0064D
+        */
+       dsb(sy);
        isb();
        writel_relaxed(control, drvdata->base + TRCPRGCTLR);
 
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c 
b/drivers/mmc/host/sdhci-of-esdhc.c
index bcfa84aa2113..176cbc67d08a 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -435,7 +435,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
                dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 
        value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
-       value |= ESDHC_DMA_SNOOP;
+
+       if (of_dma_is_coherent(dev->of_node))
+               value |= ESDHC_DMA_SNOOP;
+       else
+               value &= ~ESDHC_DMA_SNOOP;
+
        sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
        return 0;
 }
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3741d4c846e5..5807028c8309 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2638,6 +2638,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 
intmask, u32 *intmask_p)
 static void sdhci_adma_show_error(struct sdhci_host *host)
 {
        void *desc = host->adma_table;
+       dma_addr_t dma = host->adma_addr;
 
        sdhci_dumpregs(host);
 
@@ -2645,18 +2646,21 @@ static void sdhci_adma_show_error(struct sdhci_host 
*host)
                struct sdhci_adma2_64_desc *dma_desc = desc;
 
                if (host->flags & SDHCI_USE_64_BIT_DMA)
-                       DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
-                           desc, le32_to_cpu(dma_desc->addr_hi),
+                       SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, 
Attr=0x%02x\n",
+                           (unsigned long long)dma,
+                           le32_to_cpu(dma_desc->addr_hi),
                            le32_to_cpu(dma_desc->addr_lo),
                            le16_to_cpu(dma_desc->len),
                            le16_to_cpu(dma_desc->cmd));
                else
-                       DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
-                           desc, le32_to_cpu(dma_desc->addr_lo),
+                       SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, 
Attr=0x%02x\n",
+                           (unsigned long long)dma,
+                           le32_to_cpu(dma_desc->addr_lo),
                            le16_to_cpu(dma_desc->len),
                            le16_to_cpu(dma_desc->cmd));
 
                desc += host->desc_sz;
+               dma += host->desc_sz;
 
                if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
                        break;
@@ -2732,7 +2736,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 
intmask)
                        != MMC_BUS_TEST_R)
                host->data->error = -EILSEQ;
        else if (intmask & SDHCI_INT_ADMA_ERROR) {
-               pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
+               pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
+                      intmask);
                sdhci_adma_show_error(host);
                host->data->error = -EIO;
                if (host->ops->adma_workaround)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index d8c448beab24..ec0b3d025867 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -627,7 +627,7 @@ static int mcp251x_setup(struct net_device *net, struct 
mcp251x_priv *priv,
 static int mcp251x_hw_reset(struct spi_device *spi)
 {
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u8 reg;
+       unsigned long timeout;
        int ret;
 
        /* Wait for oscillator startup timer after power up */
@@ -641,10 +641,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
        /* Wait for oscillator startup timer after reset */
        mdelay(MCP251X_OST_DELAY_MS);
 
-       reg = mcp251x_read_reg(spi, CANSTAT);
-       if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
-               return -ENODEV;
-
+       /* Wait for reset to finish */
+       timeout = jiffies + HZ;
+       while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+              CANCTRL_REQOP_CONF) {
+               usleep_range(MCP251X_OST_DELAY_MS * 1000,
+                            MCP251X_OST_DELAY_MS * 1000 * 2);
+
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&spi->dev,
+                               "MCP251x didn't enter in conf mode after 
reset\n");
+                       return -EBUSY;
+               }
+       }
        return 0;
 }
 
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index ef688518ad77..8e2cbc88df98 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -907,10 +907,11 @@ static void atusb_disconnect(struct usb_interface 
*interface)
 
        ieee802154_unregister_hw(atusb->hw);
 
+       usb_put_dev(atusb->usb_dev);
+
        ieee802154_free_hw(atusb->hw);
 
        usb_set_intfdata(interface, NULL);
-       usb_put_dev(atusb->usb_dev);
 
        pr_debug("atusb_disconnect done\n");
 }
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 9793b296108f..3f2e4ef695d7 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -59,6 +59,12 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct 
pwm_device *pwm,
        /* Calculate the period and prescaler value */
        div = (unsigned long long)clk_get_rate(priv->clk) * state->period;
        do_div(div, NSEC_PER_SEC);
+       if (!div) {
+               /* Clock is too slow to achieve requested period. */
+               dev_dbg(priv->chip.dev, "Can't reach %u ns\n",  state->period);
+               return -EINVAL;
+       }
+
        prd = div;
        while (div > STM32_LPTIM_MAX_ARR) {
                presc++;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 34b9ad6b3143..158b5ce45b3b 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -369,7 +369,7 @@ int ccwgroup_create_dev(struct device *parent, struct 
ccwgroup_driver *gdrv,
                goto error;
        }
        /* Check for trailing stuff. */
-       if (i == num_devices && strlen(buf) > 0) {
+       if (i == num_devices && buf && strlen(buf) > 0) {
                rc = -EINVAL;
                goto error;
        }
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index d3e504c3c362..dadff1838fec 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1178,6 +1178,8 @@ device_initcall(cio_settle_init);
 
 int sch_is_pseudo_sch(struct subchannel *sch)
 {
+       if (!sch->dev.parent)
+               return 0;
        return sch == to_css(sch->dev.parent)->pseudo_subchannel;
 }
 
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 17d6079c7642..456ef213dc14 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -299,7 +299,7 @@ static void thermal_zone_device_set_polling(struct 
thermal_zone_device *tz,
                mod_delayed_work(system_freezable_wq, &tz->poll_queue,
                                 msecs_to_jiffies(delay));
        else
-               cancel_delayed_work(&tz->poll_queue);
+               cancel_delayed_work_sync(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index fd91007b4e41..cee7334b2a00 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -38,6 +38,7 @@ static const struct aspeed_wdt_config ast2500_config = {
 static const struct of_device_id aspeed_wdt_of_table[] = {
        { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
        { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+       { .compatible = "aspeed,ast2600-wdt", .data = &ast2500_config },
        { },
 };
 MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
@@ -257,7 +258,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
                set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
        }
 
-       if (of_device_is_compatible(np, "aspeed,ast2500-wdt")) {
+       if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+               (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
                u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
 
                reg &= config->ext_pulse_width_mask;
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 5098982e1a58..e5c162b05376 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -58,7 +58,7 @@
 
 #define IMX2_WDT_WMCR          0x08            /* Misc Register */
 
-#define IMX2_WDT_MAX_TIME      128
+#define IMX2_WDT_MAX_TIME      128U
 #define IMX2_WDT_DEFAULT_TIME  60              /* in seconds */
 
 #define WDOG_SEC_TO_COUNT(s)   ((s * 2 - 1) << 8)
@@ -183,7 +183,7 @@ static int imx2_wdt_set_timeout(struct watchdog_device 
*wdog,
 {
        unsigned int actual;
 
-       actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
+       actual = min(new_timeout, IMX2_WDT_MAX_TIME);
        __imx2_wdt_set_timeout(wdog, actual);
        wdog->timeout = new_timeout;
        return 0;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 7494dbeb4409..db58aaa4dc59 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -29,6 +29,8 @@
 #include "../pci/pci.h"
 #ifdef CONFIG_PCI_MMCONFIG
 #include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
 #endif
 
 static bool __read_mostly pci_seg_supported = true;
@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
 #ifdef CONFIG_PCI_IOV
        struct pci_dev *physfn = pci_dev->physfn;
 #endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+       static bool pci_mcfg_reserved = false;
+       /*
+        * Reserve MCFG areas in Xen on first invocation due to this being
+        * potentially called from inside of acpi_init immediately after
+        * MCFG table has been finally parsed.
+        */
+       if (!pci_mcfg_reserved) {
+               xen_mcfg_late();
+               pci_mcfg_reserved = true;
+       }
+#endif
        if (pci_seg_supported) {
                struct {
                        struct physdev_pci_device_add add;
@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
 arch_initcall(register_xen_pci_notifier);
 
 #ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
 {
        struct pci_mmcfg_region *cfg;
        int rc;
@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
        }
        return 0;
 }
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
 #endif
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c 
b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 53877e078a7b..6f59bd875a22 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -113,6 +114,8 @@ struct xenbus_file_priv {
        wait_queue_head_t read_waitq;
 
        struct kref kref;
+
+       struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -297,14 +300,14 @@ static void watch_fired(struct xenbus_watch *watch,
        mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
        struct xenbus_file_priv *u;
        struct xenbus_transaction_holder *trans, *tmp;
        struct watch_adapter *watch, *tmp_watch;
        struct read_buffer *rb, *tmp_rb;
 
-       u = container_of(kref, struct xenbus_file_priv, kref);
+       u = container_of(wq, struct xenbus_file_priv, wq);
 
        /*
         * No need for locking here because there are no other users,
@@ -330,6 +333,18 @@ static void xenbus_file_free(struct kref *kref)
        kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+       struct xenbus_file_priv *u;
+
+       /*
+        * We might be called in xenbus_thread().
+        * Use workqueue to avoid deadlock.
+        */
+       u = container_of(kref, struct xenbus_file_priv, kref);
+       schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
        struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -626,6 +641,7 @@ static int xenbus_file_open(struct inode *inode, struct 
file *filp)
        INIT_LIST_HEAD(&u->watches);
        INIT_LIST_HEAD(&u->read_buffers);
        init_waitqueue_head(&u->read_waitq);
+       INIT_WORK(&u->wq, xenbus_worker);
 
        mutex_init(&u->reply_mutex);
        mutex_init(&u->msgbuffer_mutex);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 89e69904976a..2651192f0166 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -528,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct 
vm_area_struct *vma)
        v9inode = V9FS_I(inode);
        mutex_lock(&v9inode->v_mutex);
        if (!v9inode->writeback_fid &&
+           (vma->vm_flags & VM_SHARED) &&
            (vma->vm_flags & VM_WRITE)) {
                /*
                 * clone a fid and add it to writeback_fid
@@ -629,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
                        (vma->vm_end - vma->vm_start - 1),
        };
 
+       if (!(vma->vm_flags & VM_SHARED))
+               return;
 
        p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
 
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9bda8c7a80a0..879bc0825093 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -789,7 +789,12 @@ static int fill_inode(struct inode *inode, struct page 
*locked_page,
        ci->i_version = le64_to_cpu(info->version);
        inode->i_version++;
        inode->i_rdev = le32_to_cpu(info->rdev);
-       inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+       /* directories have fl_stripe_unit set to zero */
+       if (le32_to_cpu(info->layout.fl_stripe_unit))
+               inode->i_blkbits =
+                       fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+       else
+               inode->i_blkbits = CEPH_BLOCK_SHIFT;
 
        if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
            (issued & CEPH_CAP_AUTH_EXCL) == 0) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e1ded4bd6115..b968334f841e 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3543,7 +3543,9 @@ static void delayed_work(struct work_struct *work)
                                pr_info("mds%d hung\n", s->s_mds);
                        }
                }
-               if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+               if (s->s_state == CEPH_MDS_SESSION_NEW ||
+                   s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+                   s->s_state == CEPH_MDS_SESSION_REJECTED) {
                        /* this mds is failed or recovering, just wait */
                        ceph_put_mds_session(s);
                        continue;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index e9e97803442a..55db06c7c587 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -513,6 +513,7 @@ static int cuse_channel_open(struct inode *inode, struct 
file *file)
        rc = cuse_send_init(cc);
        if (rc) {
                fuse_dev_free(fud);
+               fuse_conn_put(&cc->fc);
                return rc;
        }
        file->private_data = fud;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 549c916d2859..525684b0056f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1132,7 +1132,7 @@ static void encode_attrs(struct xdr_stream *xdr, const 
struct iattr *iap,
                } else
                        *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
        }
-       if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+       if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
                *p++ = cpu_to_be32(label->lfs);
                *p++ = cpu_to_be32(label->pi);
                *p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 96867fb159bf..ec04cce31814 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1319,10 +1319,15 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args 
*args,
        const nfs4_stateid *res_stateid = NULL;
        struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
-       if (ret == 0) {
-               arg_stateid = &args->stateid;
+       switch (ret) {
+       case -NFS4ERR_NOMATCHING_LAYOUT:
+               break;
+       case 0:
                if (res->lrs_present)
                        res_stateid = &res->stateid;
+               /* Fallthrough */
+       default:
+               arg_stateid = &args->stateid;
        }
        pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
                        res_stateid);
diff --git a/fs/statfs.c b/fs/statfs.c
index c25dd9a26cc1..ca1084cbe03c 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -304,19 +304,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct 
compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct 
kstatfs *kbuf)
 {
        struct compat_statfs64 buf;
-       if (sizeof(ubuf->f_bsize) == 4) {
-               if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-                    kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-                       return -EOVERFLOW;
-               /* f_files and f_ffree may be -1; it's okay
-                * to stuff that into 32 bits */
-               if (kbuf->f_files != 0xffffffffffffffffULL
-                && (kbuf->f_files & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-               if (kbuf->f_ffree != 0xffffffffffffffffULL
-                && (kbuf->f_ffree & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-       }
+
+       if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+               return -EOVERFLOW;
+
        memset(&buf, 0, sizeof(struct compat_statfs64));
        buf.f_type = kbuf->f_type;
        buf.f_bsize = kbuf->f_bsize;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55a604ad459f..2e179778576c 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2743,4 +2743,57 @@ static inline bool ieee80211_action_contains_tpc(struct 
sk_buff *skb)
        return true;
 }
 
+struct element {
+       u8 id;
+       u8 datalen;
+       u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen)                       \
+       for (_elem = (const struct element *)(_data);                   \
+            (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=    \
+               (int)sizeof(*_elem) &&                                  \
+            (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >=    \
+               (int)sizeof(*_elem) + _elem->datalen;                   \
+            _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen)               \
+       for_each_element(element, data, datalen)                        \
+               if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen)          \
+       for_each_element(element, data, datalen)                        \
+               if (element->id == WLAN_EID_EXTENSION &&                \
+                   element->datalen > 0 &&                             \
+                   element->data[0] == (extid))
+
+#define for_each_subelement(sub, element)                              \
+       for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element)                       \
+       for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element)                 \
+       for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+                                             const void *data, size_t datalen)
+{
+       return (const u8 *)element == (const u8 *)data + datalen;
+}
+
 #endif /* LINUX_IEEE80211_H */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 344b96c206a3..14a35befad6c 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -349,6 +349,8 @@ struct device;
 #define SND_SOC_DAPM_WILL_PMD   0x80    /* called at start of sequence */
 #define SND_SOC_DAPM_PRE_POST_PMD \
                                (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+                               (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
 
 /* convenience event type detection */
 #define SND_SOC_DAPM_EVENT_ON(e)       \
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8e0bd8..57fb4dcff434 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/binfmts.h>
+#include <linux/elfcore.h>
 
 Elf_Half __weak elf_core_extra_phdrs(void)
 {
diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
index 1e882dfc8b79..3a6aeae98a4a 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -247,7 +247,7 @@ pv_wait_early(struct pv_node *prev, int loop)
        if ((loop & PV_PREV_CHECK_MASK) != 0)
                return false;
 
-       return READ_ONCE(prev->state) != vcpu_running || 
vcpu_is_preempted(prev->cpu);
+       return READ_ONCE(prev->state) != vcpu_running;
 }
 
 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3d24d401b9d4..32ba789c544c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1111,7 +1111,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (cpumask_equal(&p->cpus_allowed, new_mask))
                goto out;
 
-       if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+       dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+       if (dest_cpu >= nr_cpu_ids) {
                ret = -EINVAL;
                goto out;
        }
@@ -1132,7 +1133,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        if (cpumask_test_cpu(task_cpu(p), new_mask))
                goto out;
 
-       dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
        if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
diff --git a/kernel/time/tick-broadcast-hrtimer.c 
b/kernel/time/tick-broadcast-hrtimer.c
index 58045eb976c3..c750c80570e8 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -44,34 +44,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-       int bc_moved;
        /*
-        * We try to cancel the timer first. If the callback is on
-        * flight on some other cpu then we let it handle it. If we
-        * were able to cancel the timer nothing can rearm it as we
-        * own broadcast_lock.
+        * This is called either from enter/exit idle code or from the
+        * broadcast handler. In all cases tick_broadcast_lock is held.
         *
-        * However we can also be called from the event handler of
-        * ce_broadcast_hrtimer itself when it expires. We cannot
-        * restart the timer because we are in the callback, but we
-        * can set the expiry time and let the callback return
-        * HRTIMER_RESTART.
+        * hrtimer_cancel() cannot be called here neither from the
+        * broadcast handler nor from the enter/exit idle code. The idle
+        * code can run into the problem described in bc_shutdown() and the
+        * broadcast handler cannot wait for itself to complete for obvious
+        * reasons.
         *
-        * Since we are in the idle loop at this point and because
-        * hrtimer_{start/cancel} functions call into tracing,
-        * calls to these functions must be bound within RCU_NONIDLE.
+        * Each caller tries to arm the hrtimer on its own CPU, but if the
+        * hrtimer callbback function is currently running, then
+        * hrtimer_start() cannot move it and the timer stays on the CPU on
+        * which it is assigned at the moment.
+        *
+        * As this can be called from idle code, the hrtimer_start()
+        * invocation has to be wrapped with RCU_NONIDLE() as
+        * hrtimer_start() can call into tracing.
         */
-       RCU_NONIDLE({
-                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-                       if (bc_moved)
-                               hrtimer_start(&bctimer, expires,
-                                             HRTIMER_MODE_ABS_PINNED);});
-       if (bc_moved) {
-               /* Bind the "device" to the cpu */
-               bc->bound_on = smp_processor_id();
-       } else if (bc->bound_on == smp_processor_id()) {
-               hrtimer_set_expires(&bctimer, expires);
-       }
+       RCU_NONIDLE( {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+               /*
+                * The core tick broadcast mode expects bc->bound_on to be set
+                * correctly to prevent a CPU which has the broadcast hrtimer
+                * armed from going deep idle.
+                *
+                * As tick_broadcast_lock is held, nothing can change the cpu
+                * base which was just established in hrtimer_start() above. So
+                * the below access is safe even without holding the hrtimer
+                * base lock.
+                */
+               bc->bound_on = bctimer.base->cpu_base->cpu;
+       } );
        return 0;
 }
 
@@ -97,10 +102,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-               if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-                       return HRTIMER_RESTART;
-
        return HRTIMER_NORESTART;
 }
 
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index f17c76a1a05f..9f8e8892e5b0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1545,21 +1545,23 @@ void timer_clear_idle(void)
 static int collect_expired_timers(struct timer_base *base,
                                  struct hlist_head *heads)
 {
+       unsigned long now = READ_ONCE(jiffies);
+
        /*
         * NOHZ optimization. After a long idle sleep we need to forward the
         * base to current jiffies. Avoid a loop by searching the bitfield for
         * the next expiring timer.
         */
-       if ((long)(jiffies - base->clk) > 2) {
+       if ((long)(now - base->clk) > 2) {
                unsigned long next = __next_timer_interrupt(base);
 
                /*
                 * If the next timer is ahead of time forward to current
                 * jiffies, otherwise forward to the next expiry time:
                 */
-               if (time_after(next, jiffies)) {
+               if (time_after(next, now)) {
                        /* The call site will increment clock! */
-                       base->clk = jiffies - 1;
+                       base->clk = now - 1;
                        return 0;
                }
                base->clk = next;
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 975f7dff8059..f8d74e09f8e4 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/sched/task.h>
@@ -203,7 +204,12 @@ static inline const char *check_heap_object(const void 
*ptr, unsigned long n,
        if (!virt_addr_valid(ptr))
                return NULL;
 
-       page = virt_to_head_page(ptr);
+       /*
+        * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+        * highmem page or fallback to virt_to_page(). The following
+        * is effectively a highmem-aware virt_to_head_page().
+        */
+       page = compound_head(kmap_to_page((void *)ptr));
 
        /* Check slab allocator for flags and size. */
        if (PageSlab(page))
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b149a7219084..7ef126489d4e 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3131,8 +3131,11 @@ static int nf_tables_newset(struct net *net, struct sock 
*nlsk,
                              NFT_SET_OBJECT))
                        return -EINVAL;
                /* Only one of these operations is supported */
-               if ((flags & (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT)) ==
-                            (NFT_SET_MAP | NFT_SET_EVAL | NFT_SET_OBJECT))
+               if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
+                            (NFT_SET_MAP | NFT_SET_OBJECT))
+                       return -EOPNOTSUPP;
+               if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
+                            (NFT_SET_EVAL | NFT_SET_OBJECT))
                        return -EOPNOTSUPP;
        }
 
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 475570e89ede..44015a151ad6 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -76,9 +76,6 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        if (IS_ERR(set))
                return PTR_ERR(set);
 
-       if (set->flags & NFT_SET_EVAL)
-               return -EOPNOTSUPP;
-
        priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
        err = nft_validate_register_load(priv->sreg, set->klen);
        if (err < 0)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f19d5a55f09e..ec504c4a397b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -199,6 +199,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct 
genl_info *info)
        return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       const u8 *data = nla_data(attr);
+       unsigned int len = nla_len(attr);
+       const struct element *elem;
+       const struct ieee80211_mgmt *mgmt = (void *)data;
+       unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+                                        u.beacon.variable);
+
+       if (len < fixedlen)
+               goto err;
+
+       if (ieee80211_hdrlen(mgmt->frame_control) !=
+           offsetof(struct ieee80211_mgmt, u.beacon))
+               goto err;
+
+       data += fixedlen;
+       len -= fixedlen;
+
+       for_each_element(elem, data, len) {
+               /* nothing */
+       }
+
+       if (for_each_element_completed(elem, data, len))
+               return 0;
+
+err:
+       NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+       return -EINVAL;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -2111,6 +2143,8 @@ static int nl80211_parse_chandef(struct 
cfg80211_registered_device *rdev,
 
        control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+       memset(chandef, 0, sizeof(*chandef));
+
        chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
        chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
        chandef->center_freq1 = control_freq;
@@ -2580,7 +2614,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 
portid, u32 seq, int flag
 
        if (rdev->ops->get_channel) {
                int ret;
-               struct cfg80211_chan_def chandef;
+               struct cfg80211_chan_def chandef = {};
 
                ret = rdev_get_channel(rdev, wdev, &chandef);
                if (ret == 0) {
@@ -3736,6 +3770,12 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
        memset(bcn, 0, sizeof(*bcn));
 
        if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+               int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD],
+                                              NULL);
+
+               if (ret)
+                       return ret;
+
                bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
                bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
                if (!bcn->head_len)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 376f08798169..b940d5c2003b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1567,7 +1567,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
 
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 5ed0ed0559dc..ea47ef156e7d 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -484,6 +484,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int 
len,
                                 const u8 *match, int match_len,
                                 int match_offset)
 {
+       const struct element *elem;
+
        /* match_offset can't be smaller than 2, unless match_len is
         * zero, in which case match_offset must be zero as well.
         */
@@ -491,14 +493,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, 
int len,
                    (!match_len && match_offset)))
                return NULL;
 
-       while (len >= 2 && len >= ies[1] + 2) {
-               if ((ies[0] == eid) &&
-                   (ies[1] + 2 >= match_offset + match_len) &&
-                   !memcmp(ies + match_offset, match, match_len))
-                       return ies;
-
-               len -= ies[1] + 2;
-               ies += ies[1] + 2;
+       for_each_element_id(elem, eid, ies, len) {
+               if (elem->datalen >= match_offset - 2 + match_len &&
+                   !memcmp(elem->data + match_offset - 2, match, match_len))
+                       return (void *)elem;
        }
 
        return NULL;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 7ca04a7de85a..bf3bae4ac5f4 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -800,7 +800,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        int ret;
 
        switch (wdev->iftype) {
diff --git a/security/integrity/ima/ima_crypto.c 
b/security/integrity/ima/ima_crypto.c
index af680b5b678a..06b0ee75f34f 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -293,8 +293,11 @@ static int ima_calc_file_hash_atfm(struct file *file,
                rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
                rc = integrity_kernel_read(file, offset, rbuf[active],
                                           rbuf_len);
-               if (rc != rbuf_len)
+               if (rc != rbuf_len) {
+                       if (rc >= 0)
+                               rc = -EINVAL;
                        goto out3;
+               }
 
                if (rbuf[1] && offset) {
                        /* Using two buffers, and it is not the first
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index b649675d190d..10764c1e854e 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -35,6 +35,13 @@
 #define SGTL5000_DAP_REG_OFFSET        0x0100
 #define SGTL5000_MAX_REG_OFFSET        0x013A
 
+/* Delay for the VAG ramp up */
+#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
+/* Delay for the VAG ramp down */
+#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
+
+#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
+
 /* default value of sgtl5000 registers */
 static const struct reg_default sgtl5000_reg_defaults[] = {
        { SGTL5000_CHIP_DIG_POWER,              0x0000 },
@@ -120,6 +127,13 @@ enum  {
        I2S_LRCLK_STRENGTH_HIGH,
 };
 
+enum {
+       HP_POWER_EVENT,
+       DAC_POWER_EVENT,
+       ADC_POWER_EVENT,
+       LAST_POWER_EVENT = ADC_POWER_EVENT
+};
+
 /* sgtl5000 private structure in codec */
 struct sgtl5000_priv {
        int sysclk;     /* sysclk rate */
@@ -133,8 +147,117 @@ struct sgtl5000_priv {
        u8 micbias_resistor;
        u8 micbias_voltage;
        u8 lrclk_strength;
+       u16 mute_state[LAST_POWER_EVENT + 1];
 };
 
+static inline int hp_sel_input(struct snd_soc_component *component)
+{
+       unsigned int ana_reg = 0;
+
+       snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &ana_reg);
+
+       return (ana_reg & SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
+}
+
+static inline u16 mute_output(struct snd_soc_component *component,
+                             u16 mute_mask)
+{
+       unsigned int mute_reg = 0;
+
+       snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &mute_reg);
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+                           mute_mask, mute_mask);
+       return mute_reg;
+}
+
+static inline void restore_output(struct snd_soc_component *component,
+                                 u16 mute_mask, u16 mute_reg)
+{
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+               mute_mask, mute_reg);
+}
+
+static void vag_power_on(struct snd_soc_component *component, u32 source)
+{
+       unsigned int ana_reg = 0;
+
+       snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_reg);
+
+       if (ana_reg & SGTL5000_VAG_POWERUP)
+               return;
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+                           SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+
+       /* When VAG powering on to get local loop from Line-In, the sleep
+        * is required to avoid loud pop.
+        */
+       if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
+           source == HP_POWER_EVENT)
+               msleep(SGTL5000_VAG_POWERUP_DELAY);
+}
+
+static int vag_power_consumers(struct snd_soc_component *component,
+                              u16 ana_pwr_reg, u32 source)
+{
+       int consumers = 0;
+
+       /* count dac/adc consumers unconditional */
+       if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
+               consumers++;
+       if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
+               consumers++;
+
+       /*
+        * If the event comes from HP and Line-In is selected,
+        * current action is 'DAC to be powered down'.
+        * As HP_POWERUP is not set when HP muxed to line-in,
+        * we need to keep VAG power ON.
+        */
+       if (source == HP_POWER_EVENT) {
+               if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
+                       consumers++;
+       } else {
+               if (ana_pwr_reg & SGTL5000_HP_POWERUP)
+                       consumers++;
+       }
+
+       return consumers;
+}
+
+static void vag_power_off(struct snd_soc_component *component, u32 source)
+{
+       unsigned int ana_pwr = SGTL5000_VAG_POWERUP;
+
+       snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_pwr);
+
+       if (!(ana_pwr & SGTL5000_VAG_POWERUP))
+               return;
+
+       /*
+        * This function calls when any of VAG power consumers is disappearing.
+        * Thus, if there is more than one consumer at the moment, as minimum
+        * one consumer will definitely stay after the end of the current
+        * event.
+        * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
+        * - LINE_IN (for HP events) / HP (for DAC/ADC events)
+        * - DAC
+        * - ADC
+        * (the current consumer is disappearing right now)
+        */
+       if (vag_power_consumers(component, ana_pwr, source) >= 2)
+               return;
+
+       snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+               SGTL5000_VAG_POWERUP, 0);
+       /* In power down case, we need wait 400-1000 ms
+        * when VAG fully ramped down.
+        * As longer we wait, as smaller pop we've got.
+        */
+       msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+}
+
 /*
  * mic_bias power on/off share the same register bits with
  * output impedance of mic bias, when power on mic bias, we
@@ -166,36 +289,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-/*
- * As manual described, ADC/DAC only works when VAG powerup,
- * So enabled VAG before ADC/DAC up.
- * In power down case, we need wait 400ms when vag fully ramped down.
- */
-static int power_vag_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+static int vag_and_mute_control(struct snd_soc_component *component,
+                                int event, int event_source)
 {
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-       const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+       static const u16 mute_mask[] = {
+               /*
+                * Mask for HP_POWER_EVENT.
+                * Muxing Headphones have to be wrapped with mute/unmute
+                * headphones only.
+                */
+               SGTL5000_HP_MUTE,
+               /*
+                * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
+                * Muxing DAC or ADC block have to be wrapped with mute/unmute
+                * both headphones and line-out.
+                */
+               SGTL5000_OUTPUTS_MUTE,
+               SGTL5000_OUTPUTS_MUTE
+       };
+
+       struct sgtl5000_priv *sgtl5000 =
+               snd_soc_component_get_drvdata(component);
 
        switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               sgtl5000->mute_state[event_source] =
+                       mute_output(component, mute_mask[event_source]);
+               break;
        case SND_SOC_DAPM_POST_PMU:
-               snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
-                       SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
-               msleep(400);
+               vag_power_on(component, event_source);
+               restore_output(component, mute_mask[event_source],
+                              sgtl5000->mute_state[event_source]);
                break;
-
        case SND_SOC_DAPM_PRE_PMD:
-               /*
-                * Don't clear VAG_POWERUP, when both DAC and ADC are
-                * operational to prevent inadvertently starving the
-                * other one of them.
-                */
-               if ((snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER) &
-                               mask) != mask) {
-                       snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
-                               SGTL5000_VAG_POWERUP, 0);
-                       msleep(400);
-               }
+               sgtl5000->mute_state[event_source] =
+                       mute_output(component, mute_mask[event_source]);
+               vag_power_off(component, event_source);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               restore_output(component, mute_mask[event_source],
+                              sgtl5000->mute_state[event_source]);
                break;
        default:
                break;
@@ -204,6 +337,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+/*
+ * Mute Headphone when power it up/down.
+ * Control VAG power on HP power path.
+ */
+static int headphone_pga_event(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, HP_POWER_EVENT);
+}
+
+/* As manual describes, ADC/DAC powering up/down requires
+ * to mute outputs to avoid pops.
+ * Control VAG power on ADC/DAC power path.
+ */
+static int adc_updown_depop(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, ADC_POWER_EVENT);
+}
+
+static int dac_updown_depop(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component =
+               snd_soc_dapm_to_component(w->dapm);
+
+       return vag_and_mute_control(component, event, DAC_POWER_EVENT);
+}
+
 /* input sources for ADC */
 static const char *adc_mux_text[] = {
        "MIC_IN", "LINE_IN"
@@ -239,7 +407,10 @@ static const struct snd_soc_dapm_widget 
sgtl5000_dapm_widgets[] = {
                            mic_bias_event,
                            SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-       SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+       SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+                          headphone_pga_event,
+                          SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
        SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
 
        SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
@@ -255,11 +426,12 @@ static const struct snd_soc_dapm_widget 
sgtl5000_dapm_widgets[] = {
                                0, SGTL5000_CHIP_DIG_POWER,
                                1, 0),
 
-       SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
-       SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
-
-       SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
-       SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
+       SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
+                          adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
+       SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
+                          dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+                          SND_SOC_DAPM_PRE_POST_PMD),
 };
 
 /* routes for sgtl5000 */
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index a26c44cf31aa..8107f060fa84 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -259,8 +259,8 @@ endef
 
 define do_generate_dynamic_list_file
        symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
-       xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
-       if [ "$$symbol_type" = "U W w" ];then                           \
+       xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+       if [ "$$symbol_type" = "U W" ];then                             \
                (echo '{';                                              \
                $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
                echo '};';                                              \
diff --git a/tools/lib/traceevent/event-parse.c 
b/tools/lib/traceevent/event-parse.c
index 7989dd6289e7..8211e8010e09 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -268,10 +268,10 @@ static int add_new_comm(struct pevent *pevent, const char 
*comm, int pid)
                errno = ENOMEM;
                return -1;
        }
+       pevent->cmdlines = cmdlines;
 
        cmdlines[pevent->cmdline_count].comm = strdup(comm);
        if (!cmdlines[pevent->cmdline_count].comm) {
-               free(cmdlines);
                errno = ENOMEM;
                return -1;
        }
@@ -282,7 +282,6 @@ static int add_new_comm(struct pevent *pevent, const char 
*comm, int pid)
                pevent->cmdline_count++;
 
        qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-       pevent->cmdlines = cmdlines;
 
        return 0;
 }
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f362ee46506a..b97e31498ff7 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -795,7 +795,7 @@ ifndef NO_JVMTI
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print 
$$3}')
   else
     ifneq (,$(wildcard /usr/sbin/alternatives))
-      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' 
-f 5 | sed 's%/jre/bin/java.%%g')
+      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' 
-f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
     endif
   endif
   ifndef JDIR
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c 
b/tools/perf/arch/x86/util/unwind-libunwind.c
index 05920e3edf7a..47357973b55b 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,11 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <errno.h>
+#include "../../util/debug.h"
 #ifndef REMOTE_UNWIND_LIBUNWIND
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
-#include "../../util/debug.h"
 #endif
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 52486c90ab93..0801e0ffba4a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2769,8 +2769,11 @@ int cmd_stat(int argc, const char **argv)
                        fprintf(output, "[ perf stat: executing run #%d ... 
]\n",
                                run_idx + 1);
 
+               if (run_idx != 0)
+                       perf_evlist__reset_prev_raw_counts(evsel_list);
+
                status = run_perf_stat(argc, argv);
-               if (forever && status != -1) {
+               if (forever && status != -1 && !interval) {
                        print_counters(NULL, argc, argv);
                        perf_stat__reset_stats();
                }
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e1fe446f65da..c892bb4f26c3 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1063,7 +1063,7 @@ static int cpu_cache_level__read(struct cpu_cache_level 
*cache, u32 cpu, u16 lev
 
        scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
        if (sysfs__read_str(file, &cache->map, &len)) {
-               free(cache->map);
+               free(cache->size);
                free(cache->type);
                return -1;
        }
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c9bae5fb8b47..d028c2786802 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -154,6 +154,15 @@ static void perf_evsel__free_prev_raw_counts(struct 
perf_evsel *evsel)
        evsel->prev_raw_counts = NULL;
 }
 
+static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
+{
+       if (evsel->prev_raw_counts) {
+               evsel->prev_raw_counts->aggr.val = 0;
+               evsel->prev_raw_counts->aggr.ena = 0;
+               evsel->prev_raw_counts->aggr.run = 0;
+       }
+}
+
 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 {
        int ncpus = perf_evsel__nr_cpus(evsel);
@@ -204,6 +213,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
        }
 }
 
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel)
+               perf_evsel__reset_prev_raw_counts(evsel);
+}
+
 static void zero_per_pkg(struct perf_evsel *counter)
 {
        if (counter->per_pkg_mask)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 96326b1f9443..bdfbed8e2df2 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -100,6 +100,7 @@ void perf_stat__collect_metric_expr(struct perf_evlist *);
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
 
 int perf_stat_process_counter(struct perf_stat_config *config,
                              struct perf_evsel *counter);

Reply via email to