commit:     6c68a8814b8fb45ad5ec871aeacc3377f1056ecd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 17 01:24:35 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 17 01:24:35 2015 +0000
URL:        
http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=6c68a881
Linux patch 3.10.65

---
 0000_README              |    4 +
 1064_linux-3.10.65.patch | 1498 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1502 insertions(+)

diff --git a/0000_README b/0000_README
index 940fe8c..755963e 100644
--- a/0000_README
+++ b/0000_README
@@ -298,6 +298,10 @@ Patch:  1063_linux-3.10.64.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.10.64
 
+Patch:  1063_linux-3.10.65.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.10.65
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1064_linux-3.10.65.patch b/1064_linux-3.10.65.patch
new file mode 100644
index 0000000..983b755
--- /dev/null
+++ b/1064_linux-3.10.65.patch
@@ -0,0 +1,1498 @@
+diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
+index 69b3cac4749d..5d8675615e59 100644
+--- a/Documentation/ramoops.txt
++++ b/Documentation/ramoops.txt
+@@ -14,11 +14,19 @@ survive after a restart.
+ 
+ 1. Ramoops concepts
+ 
+-Ramoops uses a predefined memory area to store the dump. The start and size of
+-the memory area are set using two variables:
++Ramoops uses a predefined memory area to store the dump. The start and size
++and type of the memory area are set using three variables:
+   * "mem_address" for the start
+   * "mem_size" for the size. The memory size will be rounded down to a
+   power of two.
++  * "mem_type" to specifiy if the memory type (default is 
pgprot_writecombine).
++
++Typically the default value of mem_type=0 should be used as that sets the 
pstore
++mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
++pgprot_noncached, which only works on some platforms. This is because pstore
++depends on atomic operations. At least on ARM, pgprot_noncached causes the
++memory to be mapped strongly ordered, and atomic operations on strongly 
ordered
++memory are implementation defined, and won't work on many ARMs such as omaps.
+ 
+ The memory area is divided into "record_size" chunks (also rounded down to
+ power of two) and each oops/panic writes a "record_size" chunk of
+@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different 
manners:
+ static struct ramoops_platform_data ramoops_data = {
+         .mem_size               = <...>,
+         .mem_address            = <...>,
++        .mem_type               = <...>,
+         .record_size            = <...>,
+         .dump_oops              = <...>,
+         .ecc                    = <...>,
+diff --git a/Makefile b/Makefile
+index e5b63fb3d0e1..7889b38766db 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 64
++SUBLEVEL = 65
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index 8278960066c3..3ee701f1d38e 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -141,6 +141,29 @@ int __init coherency_init(void)
+ {
+       struct device_node *np;
+ 
++      /*
++       * The coherency fabric is needed:
++       * - For coherency between processors on Armada XP, so only
++       *   when SMP is enabled.
++       * - For coherency between the processor and I/O devices, but
++       *   this coherency requires many pre-requisites (write
++       *   allocate cache policy, shareable pages, SMP bit set) that
++       *   are only meant in SMP situations.
++       *
++       * Note that this means that on Armada 370, there is currently
++       * no way to use hardware I/O coherency, because even when
++       * CONFIG_SMP is enabled, is_smp() returns false due to the
++       * Armada 370 being a single-core processor. To lift this
++       * limitation, we would have to find a way to make the cache
++       * policy set to write-allocate (on all Armada SoCs), and to
++       * set the shareable attribute in page tables (on all Armada
++       * SoCs except the Armada 370). Unfortunately, such decisions
++       * are taken very early in the kernel boot process, at a point
++       * where we don't know yet on which SoC we are running.
++       */
++      if (!is_smp())
++              return 0;
++
+       np = of_find_matching_node(NULL, of_coherency_table);
+       if (np) {
+               pr_info("Initializing Coherency fabric\n");
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index 2a46ca720afc..2874be9aef0a 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
+               native_read_tscp(&p);
+       } else {
+               /* Load per CPU data from GDT */
+-              asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
++              asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       }
+ 
+       return p;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c 
b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+index 8aac56bda7dc..7185af255fb5 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -2657,6 +2657,17 @@ static struct intel_uncore_box 
*uncore_event_to_box(struct perf_event *event)
+       return uncore_pmu_to_box(uncore_event_to_pmu(event), 
smp_processor_id());
+ }
+ 
++/*
++ * Using uncore_pmu_event_init pmu event_init callback
++ * as a detection point for uncore events.
++ */
++static int uncore_pmu_event_init(struct perf_event *event);
++
++static bool is_uncore_event(struct perf_event *event)
++{
++      return event->pmu->event_init == uncore_pmu_event_init;
++}
++
+ static int
+ uncore_collect_events(struct intel_uncore_box *box, struct perf_event 
*leader, bool dogrp)
+ {
+@@ -2671,13 +2682,18 @@ uncore_collect_events(struct intel_uncore_box *box, 
struct perf_event *leader, b
+               return -EINVAL;
+ 
+       n = box->n_events;
+-      box->event_list[n] = leader;
+-      n++;
++
++      if (is_uncore_event(leader)) {
++              box->event_list[n] = leader;
++              n++;
++      }
++
+       if (!dogrp)
+               return n;
+ 
+       list_for_each_entry(event, &leader->sibling_list, group_entry) {
+-              if (event->state <= PERF_EVENT_STATE_OFF)
++              if (!is_uncore_event(event) ||
++                  event->state <= PERF_EVENT_STATE_OFF)
+                       continue;
+ 
+               if (n >= max_count)
+diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
+index 431e87544411..ab6ba35a9357 100644
+--- a/arch/x86/vdso/vma.c
++++ b/arch/x86/vdso/vma.c
+@@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
+ 
+ struct linux_binprm;
+ 
+-/* Put the vdso above the (randomized) stack with another randomized offset.
+-   This way there is no hole in the middle of address space.
+-   To save memory make sure it is still in the same PTE as the stack top.
+-   This doesn't give that many random bits */
++/*
++ * Put the vdso above the (randomized) stack with another randomized
++ * offset.  This way there is no hole in the middle of address space.
++ * To save memory make sure it is still in the same PTE as the stack
++ * top.  This doesn't give that many random bits.
++ *
++ * Note that this algorithm is imperfect: the distribution of the vdso
++ * start address within a PMD is biased toward the end.
++ *
++ * Only used for the 64-bit and x32 vdsos.
++ */
+ static unsigned long vdso_addr(unsigned long start, unsigned len)
+ {
+       unsigned long addr, end;
+       unsigned offset;
+-      end = (start + PMD_SIZE - 1) & PMD_MASK;
++
++      /*
++       * Round up the start address.  It can start out unaligned as a result
++       * of stack start randomization.
++       */
++      start = PAGE_ALIGN(start);
++
++      /* Round the lowest possible end address up to a PMD boundary. */
++      end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+       if (end >= TASK_SIZE_MAX)
+               end = TASK_SIZE_MAX;
+       end -= len;
+-      /* This loses some more bits than a modulo, but is cheaper */
+-      offset = get_random_int() & (PTRS_PER_PTE - 1);
+-      addr = start + (offset << PAGE_SHIFT);
+-      if (addr >= end)
+-              addr = end;
++
++      if (end > start) {
++              offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
++              addr = start + (offset << PAGE_SHIFT);
++      } else {
++              addr = start;
++      }
+ 
+       /*
+-       * page-align it here so that get_unmapped_area doesn't
+-       * align it wrongfully again to the next page. addr can come in 4K
+-       * unaligned here as a result of stack start randomization.
++       * Forcibly align the final address in case we have a hardware
++       * issue that requires alignment for performance reasons.
+        */
+-      addr = PAGE_ALIGN(addr);
+       addr = align_vdso_addr(addr);
+ 
+       return addr;
+diff --git a/block/genhd.c b/block/genhd.c
+index e670148c3773..7694dffe9f0e 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int 
partno)
+       struct disk_part_tbl *old_ptbl = disk->part_tbl;
+       struct disk_part_tbl *new_ptbl;
+       int len = old_ptbl ? old_ptbl->len : 0;
+-      int target = partno + 1;
++      int i, target;
+       size_t size;
+-      int i;
++
++      /*
++       * check for int overflow, since we can get here from blkpg_ioctl()
++       * with a user passed 'partno'.
++       */
++      target = partno + 1;
++      if (target < 0)
++              return -EINVAL;
+ 
+       /* disk_max_parts() is zero during initialization, ignore if so */
+       if (disk_max_parts(disk) && target > disk_max_parts(disk))
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index d414331b480e..558d562f4901 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -242,13 +242,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
+                                  const char *buf, size_t count)
+ {
+       struct device *dev;
++      int err = -EINVAL;
+ 
+       dev = bus_find_device_by_name(bus, NULL, buf);
+       if (!dev)
+               return -ENODEV;
+-      if (bus_rescan_devices_helper(dev, NULL) != 0)
+-              return -EINVAL;
+-      return count;
++      if (bus_rescan_devices_helper(dev, NULL) == 0)
++              err = count;
++      put_device(dev);
++      return err;
+ }
+ 
+ static struct device *next_device(struct klist_iter *i)
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 81d0e6e1f754..2bd798a7d9aa 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1687,6 +1687,7 @@ static const struct hid_device_id 
hid_have_special_driver[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) 
},
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, 
USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) 
},
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index a1e431f830e3..45c593dbf5cd 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -478,6 +478,7 @@
+ #define USB_DEVICE_ID_KYE_GPEN_560    0x5003
+ #define USB_DEVICE_ID_KYE_EASYPEN_I405X       0x5010
+ #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X      0x5011
++#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2    0x501a
+ #define USB_DEVICE_ID_KYE_EASYPEN_M610X       0x5013
+ 
+ #define USB_VENDOR_ID_LABTEC          0x1020
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 012880a2228c..03a6acffed5d 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -317,6 +317,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++                             USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
++        HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
++      { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+               USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       {}
+diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
+index 843f2dd55200..973eed788cc6 100644
+--- a/drivers/hid/hid-kye.c
++++ b/drivers/hid/hid-kye.c
+@@ -303,6 +303,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, 
__u8 *rdesc,
+               }
+               break;
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
++      case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+               if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
+                       rdesc = mousepen_i608x_rdesc_fixed;
+                       *rsize = sizeof(mousepen_i608x_rdesc_fixed);
+@@ -383,6 +384,7 @@ static int kye_probe(struct hid_device *hdev, const struct 
hid_device_id *id)
+       switch (id->product) {
+       case USB_DEVICE_ID_KYE_EASYPEN_I405X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
++      case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
+       case USB_DEVICE_ID_KYE_EASYPEN_M610X:
+               ret = kye_tablet_enable(hdev);
+               if (ret) {
+@@ -406,6 +408,8 @@ static const struct hid_device_id kye_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
++                              USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+       { }
+ };
+diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
+index d4f1e3bee590..264ddc4a0118 100644
+--- a/drivers/hid/hid-roccat-pyra.c
++++ b/drivers/hid/hid-roccat-pyra.c
+@@ -35,6 +35,8 @@ static struct class *pyra_class;
+ static void profile_activated(struct pyra_device *pyra,
+               unsigned int new_profile)
+ {
++      if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
++              return;
+       pyra->actual_profile = new_profile;
+       pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
+ }
+@@ -236,9 +238,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
+       if (off != 0 || count != PYRA_SIZE_SETTINGS)
+               return -EINVAL;
+ 
+-      mutex_lock(&pyra->pyra_lock);
+-
+       settings = (struct pyra_settings const *)buf;
++      if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
++              return -EINVAL;
++
++      mutex_lock(&pyra->pyra_lock);
+ 
+       retval = pyra_set_settings(usb_dev, settings);
+       if (retval) {
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 2b1799a3b212..469daa04dadb 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -134,6 +134,7 @@ struct i2c_hid {
+                                                  * descriptor. */
+       unsigned int            bufsize;        /* i2c buffer size */
+       char                    *inbuf;         /* Input buffer */
++      char                    *rawbuf;        /* Raw Input buffer */
+       char                    *cmdbuf;        /* Command buffer */
+       char                    *argsbuf;       /* Command arguments buffer */
+ 
+@@ -340,7 +341,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+       int ret, ret_size;
+-      int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++      int size = ihid->bufsize;
+ 
+       ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+       if (ret != size) {
+@@ -471,9 +472,11 @@ static void i2c_hid_find_max_report(struct hid_device 
*hid, unsigned int type,
+ static void i2c_hid_free_buffers(struct i2c_hid *ihid)
+ {
+       kfree(ihid->inbuf);
++      kfree(ihid->rawbuf);
+       kfree(ihid->argsbuf);
+       kfree(ihid->cmdbuf);
+       ihid->inbuf = NULL;
++      ihid->rawbuf = NULL;
+       ihid->cmdbuf = NULL;
+       ihid->argsbuf = NULL;
+       ihid->bufsize = 0;
+@@ -489,10 +492,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, 
size_t report_size)
+                      report_size; /* report */
+ 
+       ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
++      ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
+       ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
+       ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
+ 
+-      if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
++      if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+               i2c_hid_free_buffers(ihid);
+               return -ENOMEM;
+       }
+@@ -519,12 +523,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
+ 
+       ret = i2c_hid_get_report(client,
+                       report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+-                      report_number, ihid->inbuf, ask_count);
++                      report_number, ihid->rawbuf, ask_count);
+ 
+       if (ret < 0)
+               return ret;
+ 
+-      ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
++      ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
+ 
+       if (ret_count <= 2)
+               return 0;
+@@ -533,7 +537,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
+ 
+       /* The query buffer contains the size, dropping it in the reply */
+       count = min(count, ret_count - 2);
+-      memcpy(buf, ihid->inbuf + 2, count);
++      memcpy(buf, ihid->rawbuf + 2, count);
+ 
+       return count;
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 0db9a67278ba..5b46a79dcb1f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -110,6 +110,7 @@ static const struct hid_blacklist {
+       { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, 
HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, 
HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, 
HID_QUIRK_MULTI_INPUT },
++      { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, 
HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, 
HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, 
HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, 
HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 6771e3c94801..db4e10d4c7f5 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1796,7 +1796,7 @@ static int __domain_mapping(struct dmar_domain *domain, 
unsigned long iov_pfn,
+       struct dma_pte *first_pte = NULL, *pte = NULL;
+       phys_addr_t uninitialized_var(pteval);
+       int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+-      unsigned long sg_res;
++      unsigned long sg_res = 0;
+       unsigned int largepage_lvl = 0;
+       unsigned long lvl_pages = 0;
+ 
+@@ -1807,10 +1807,8 @@ static int __domain_mapping(struct dmar_domain *domain, 
unsigned long iov_pfn,
+ 
+       prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+ 
+-      if (sg)
+-              sg_res = 0;
+-      else {
+-              sg_res = nr_pages + 1;
++      if (!sg) {
++              sg_res = nr_pages;
+               pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+       }
+ 
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index ec2c2dc1c1ca..2a1b6e037e1a 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct 
ubi_volume *vol,
+       ubi_assert(!vol->updating && !vol->changing_leb);
+       vol->updating = 1;
+ 
++      vol->upd_buf = vmalloc(ubi->leb_size);
++      if (!vol->upd_buf)
++              return -ENOMEM;
++
+       err = set_update_marker(ubi, vol);
+       if (err)
+               return err;
+@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct 
ubi_volume *vol,
+               err = clear_update_marker(ubi, vol, 0);
+               if (err)
+                       return err;
++
++              vfree(vol->upd_buf);
+               vol->updating = 0;
+               return 0;
+       }
+ 
+-      vol->upd_buf = vmalloc(ubi->leb_size);
+-      if (!vol->upd_buf)
+-              return -ENOMEM;
+-
+       vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+                              vol->usable_leb_size);
+       vol->upd_bytes = bytes;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index c95bfb183c62..49e570abe58b 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1209,7 +1209,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, 
struct ubi_work *wrk,
+ 
+       err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+       if (err) {
+-              kmem_cache_free(ubi_wl_entry_slab, e1);
+               if (e2)
+                       kmem_cache_free(ubi_wl_entry_slab, e2);
+               goto out_ro;
+@@ -1223,10 +1222,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, 
struct ubi_work *wrk,
+               dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+                      e2->pnum, vol_id, lnum);
+               err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+-              if (err) {
+-                      kmem_cache_free(ubi_wl_entry_slab, e2);
++              if (err)
+                       goto out_ro;
+-              }
+       }
+ 
+       dbg_wl("done");
+@@ -1262,10 +1259,9 @@ out_not_moved:
+ 
+       ubi_free_vid_hdr(ubi, vid_hdr);
+       err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+-      if (err) {
+-              kmem_cache_free(ubi_wl_entry_slab, e2);
++      if (err)
+               goto out_ro;
+-      }
++
+       mutex_unlock(&ubi->move_mutex);
+       return 0;
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index a0f647f92bf5..3a220d2f2ee1 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -727,7 +727,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
+       dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+       if (!dev->cmd_buf) {
+               err = -ENOMEM;
+-              goto lbl_set_intf_data;
++              goto lbl_free_candev;
+       }
+ 
+       dev->udev = usb_dev;
+@@ -766,7 +766,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
+       err = register_candev(netdev);
+       if (err) {
+               dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+-              goto lbl_free_cmd_buf;
++              goto lbl_restore_intf_data;
+       }
+ 
+       if (dev->prev_siblings)
+@@ -779,14 +779,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
+       if (dev->adapter->dev_init) {
+               err = dev->adapter->dev_init(dev);
+               if (err)
+-                      goto lbl_free_cmd_buf;
++                      goto lbl_unregister_candev;
+       }
+ 
+       /* set bus off */
+       if (dev->adapter->dev_set_bus) {
+               err = dev->adapter->dev_set_bus(dev, 0);
+               if (err)
+-                      goto lbl_free_cmd_buf;
++                      goto lbl_unregister_candev;
+       }
+ 
+       /* get device number early */
+@@ -798,11 +798,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
+ 
+       return 0;
+ 
+-lbl_free_cmd_buf:
+-      kfree(dev->cmd_buf);
++lbl_unregister_candev:
++      unregister_candev(netdev);
+ 
+-lbl_set_intf_data:
++lbl_restore_intf_data:
+       usb_set_intfdata(intf, dev->prev_siblings);
++      kfree(dev->cmd_buf);
++
++lbl_free_candev:
+       free_candev(netdev);
+ 
+       return err;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index 263dd921edc4..f7f796a2c50b 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device 
*dev, int req_id,
+       if (!(dev->state & PCAN_USB_STATE_CONNECTED))
+               return 0;
+ 
+-      memset(req_addr, '\0', req_size);
+-
+       req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
+ 
+       switch (req_id) {
+@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device 
*dev, int req_id,
+       default:
+               p = usb_rcvctrlpipe(dev->udev, 0);
+               req_type |= USB_DIR_IN;
++              memset(req_addr, '\0', req_size);
+               break;
+       }
+ 
+diff --git a/drivers/net/wireless/ath/ath5k/qcu.c 
b/drivers/net/wireless/ath/ath5k/qcu.c
+index 65fe929529a8..3bfd0b88016e 100644
+--- a/drivers/net/wireless/ath/ath5k/qcu.c
++++ b/drivers/net/wireless/ath/ath5k/qcu.c
+@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum 
ath5k_tx_queue queue_type,
+       } else {
+               switch (queue_type) {
+               case AR5K_TX_QUEUE_DATA:
+-                      for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
+-                              ah->ah_txq[queue].tqi_type !=
+-                              AR5K_TX_QUEUE_INACTIVE; queue++) {
+-
+-                              if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
+-                                      return -EINVAL;
+-                      }
++                      queue = queue_info->tqi_subtype;
+                       break;
+               case AR5K_TX_QUEUE_UAPSD:
+                       queue = AR5K_TX_QUEUE_ID_UAPSD;
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h 
b/drivers/net/wireless/ath/ath9k/hw.h
+index ae3034374bc4..d7d9e311089f 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -215,8 +215,8 @@
+ #define AH_WOW_BEACON_MISS            BIT(3)
+ 
+ enum ath_hw_txq_subtype {
+-      ATH_TXQ_AC_BE = 0,
+-      ATH_TXQ_AC_BK = 1,
++      ATH_TXQ_AC_BK = 0,
++      ATH_TXQ_AC_BE = 1,
+       ATH_TXQ_AC_VI = 2,
+       ATH_TXQ_AC_VO = 3,
+ };
+diff --git a/drivers/net/wireless/ath/ath9k/mac.c 
b/drivers/net/wireless/ath/ath9k/mac.c
+index 566109a40fb3..941b08b71308 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum 
ath9k_tx_queue type,
+               q = ATH9K_NUM_TX_QUEUES - 3;
+               break;
+       case ATH9K_TX_QUEUE_DATA:
+-              for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
+-                      if (ah->txq[q].tqi_type ==
+-                          ATH9K_TX_QUEUE_INACTIVE)
+-                              break;
+-              if (q == ATH9K_NUM_TX_QUEUES) {
+-                      ath_err(common, "No available TX queue\n");
+-                      return -1;
+-              }
++              q = qinfo->tqi_subtype;
+               break;
+       default:
+               ath_err(common, "Invalid TX queue type: %u\n", type);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index ea37072e8bf2..034a4d2964d6 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -210,14 +210,17 @@ int __pci_read_base(struct pci_dev *dev, enum 
pci_bar_type type,
+               res->flags |= IORESOURCE_SIZEALIGN;
+               if (res->flags & IORESOURCE_IO) {
+                       l &= PCI_BASE_ADDRESS_IO_MASK;
++                      sz &= PCI_BASE_ADDRESS_IO_MASK;
+                       mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
+               } else {
+                       l &= PCI_BASE_ADDRESS_MEM_MASK;
++                      sz &= PCI_BASE_ADDRESS_MEM_MASK;
+                       mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+               }
+       } else {
+               res->flags |= (l & IORESOURCE_ROM_ENABLE);
+               l &= PCI_ROM_ADDRESS_MASK;
++              sz &= PCI_ROM_ADDRESS_MASK;
+               mask = (u32)PCI_ROM_ADDRESS_MASK;
+       }
+ 
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 0c8a9fa2be6c..b8366b154fb9 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -534,11 +534,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, 
unsigned int level,
+                             unsigned int old)
+ {
+       struct s3c24xx_uart_port *ourport = to_ourport(port);
++      int timeout = 10000;
+ 
+       ourport->pm_level = level;
+ 
+       switch (level) {
+       case 3:
++              while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
++                      udelay(100);
++
+               if (!IS_ERR(ourport->baudclk))
+                       clk_disable_unprepare(ourport->baudclk);
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 1e71f918eb9f..2800776b2e91 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1087,10 +1087,11 @@ next_desc:
+       } else {
+               control_interface = usb_ifnum_to_if(usb_dev, 
union_header->bMasterInterface0);
+               data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = 
union_header->bSlaveInterface0));
+-              if (!control_interface || !data_interface) {
+-                      dev_dbg(&intf->dev, "no interfaces\n");
+-                      return -ENODEV;
+-              }
++      }
++
++      if (!control_interface || !data_interface) {
++              dev_dbg(&intf->dev, "no interfaces\n");
++              return -ENODEV;
+       }
+ 
+       if (data_interface_num != call_interface_num)
+@@ -1365,6 +1366,7 @@ alloc_fail8:
+                               &dev_attr_wCountryCodes);
+               device_remove_file(&acm->control->dev,
+                               &dev_attr_iCountryCodeRelDate);
++              kfree(acm->country_codes);
+       }
+       device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+ alloc_fail7:
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 1d94316f0ea4..301b08496478 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -390,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, 
dma_addr_t dev_addr,
+ 
+       /* NOTE: We use dev_addr here, not paddr! */
+       if (is_xen_swiotlb_buffer(dev_addr)) {
+-              swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
++              swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
+               return;
+       }
+ 
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index f26f38ccd194..019fc5a68a14 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1843,6 +1843,14 @@ int btrfs_delayed_update_inode(struct 
btrfs_trans_handle *trans,
+       struct btrfs_delayed_node *delayed_node;
+       int ret = 0;
+ 
++      /*
++       * we don't do delayed inode updates during log recovery because it
++       * leads to enospc problems.  This means we also can't do
++       * delayed inode refs
++       */
++      if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
++              return -EAGAIN;
++
+       delayed_node = btrfs_get_or_create_delayed_node(inode);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 387213ac2608..b44306378193 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -470,12 +470,28 @@ __writeback_single_inode(struct inode *inode, struct 
writeback_control *wbc)
+        * write_inode()
+        */
+       spin_lock(&inode->i_lock);
+-      /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+-      if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+-              inode->i_state &= ~I_DIRTY_PAGES;
++
+       dirty = inode->i_state & I_DIRTY;
+-      inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
++      inode->i_state &= ~I_DIRTY;
++
++      /*
++       * Paired with smp_mb() in __mark_inode_dirty().  This allows
++       * __mark_inode_dirty() to test i_state without grabbing i_lock -
++       * either they see the I_DIRTY bits cleared or we see the dirtied
++       * inode.
++       *
++       * I_DIRTY_PAGES is always cleared together above even if @mapping
++       * still has dirty pages.  The flag is reinstated after smp_mb() if
++       * necessary.  This guarantees that either __mark_inode_dirty()
++       * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
++       */
++      smp_mb();
++
++      if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
++              inode->i_state |= I_DIRTY_PAGES;
++
+       spin_unlock(&inode->i_lock);
++
+       /* Don't write the inode if only I_DIRTY_PAGES was set */
+       if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+               int err = write_inode(inode, wbc);
+@@ -1146,12 +1162,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+       }
+ 
+       /*
+-       * make sure that changes are seen by all cpus before we test i_state
+-       * -- mikulas
++       * Paired with smp_mb() in __writeback_single_inode() for the
++       * following lockless i_state test.  See there for details.
+        */
+       smp_mb();
+ 
+-      /* avoid the locking if we can */
+       if ((inode->i_state & flags) == flags)
+               return;
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 836307ae1f08..4a58afa99654 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1200,15 +1200,14 @@ static int copy_cred(struct svc_cred *target, struct 
svc_cred *source)
+       return 0;
+ }
+ 
+-static long long
++static int
+ compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
+ {
+-      long long res;
+-
+-      res = o1->len - o2->len;
+-      if (res)
+-              return res;
+-      return (long long)memcmp(o1->data, o2->data, o1->len);
++      if (o1->len < o2->len)
++              return -1;
++      if (o1->len > o2->len)
++              return 1;
++      return memcmp(o1->data, o2->data, o1->len);
+ }
+ 
+ static int same_name(const char *n1, const char *n2)
+@@ -1365,7 +1364,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct 
rb_root *root)
+ static struct nfs4_client *
+ find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
+ {
+-      long long cmp;
++      int cmp;
+       struct rb_node *node = root->rb_node;
+       struct nfs4_client *clp;
+ 
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 9b45f0666cfc..acf179d7615f 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1743,6 +1743,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char 
*components,
+               }
+               else
+                       end++;
++              if (found_esc)
++                      end = next;
++
+               str = end;
+       }
+       *pp = p;
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 2e1372efbb00..587d699bdc2c 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -49,6 +49,8 @@ struct nilfs_iget_args {
+       int for_gc;
+ };
+ 
++static int nilfs_iget_test(struct inode *inode, void *opaque);
++
+ void nilfs_inode_add_blocks(struct inode *inode, int n)
+ {
+       struct nilfs_root *root = NILFS_I(inode)->i_root;
+@@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
+       .is_partially_uptodate  = block_is_partially_uptodate,
+ };
+ 
++static int nilfs_insert_inode_locked(struct inode *inode,
++                                   struct nilfs_root *root,
++                                   unsigned long ino)
++{
++      struct nilfs_iget_args args = {
++              .ino = ino, .root = root, .cno = 0, .for_gc = 0
++      };
++
++      return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
++}
++
+ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
+ {
+       struct super_block *sb = dir->i_sb;
+@@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t 
mode)
+       if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
+               err = nilfs_bmap_read(ii->i_bmap, NULL);
+               if (err < 0)
+-                      goto failed_bmap;
++                      goto failed_after_creation;
+ 
+               set_bit(NILFS_I_BMAP, &ii->i_state);
+               /* No lock is needed; iget() ensures it. */
+@@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t 
mode)
+       spin_lock(&nilfs->ns_next_gen_lock);
+       inode->i_generation = nilfs->ns_next_generation++;
+       spin_unlock(&nilfs->ns_next_gen_lock);
+-      insert_inode_hash(inode);
++      if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
++              err = -EIO;
++              goto failed_after_creation;
++      }
+ 
+       err = nilfs_init_acl(inode, dir);
+       if (unlikely(err))
+-              goto failed_acl; /* never occur. When supporting
++              goto failed_after_creation; /* never occur. When supporting
+                                   nilfs_init_acl(), proper cancellation of
+                                   above jobs should be considered */
+ 
+       return inode;
+ 
+- failed_acl:
+- failed_bmap:
++ failed_after_creation:
+       clear_nlink(inode);
++      unlock_new_inode(inode);
+       iput(inode);  /* raw_inode will be deleted through
+-                       generic_delete_inode() */
++                       nilfs_evict_inode() */
+       goto failed;
+ 
+  failed_ifile_create_inode:
+@@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
+       inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+       inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
+       inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+-      if (inode->i_nlink == 0 && inode->i_mode == 0)
+-              return -EINVAL; /* this inode is deleted */
++      if (inode->i_nlink == 0)
++              return -ESTALE; /* this inode is deleted */
+ 
+       inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
+       ii->i_flags = le32_to_cpu(raw_inode->i_flags);
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 9de78f08989e..0f84b257932c 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, 
struct inode *inode)
+       int err = nilfs_add_link(dentry, inode);
+       if (!err) {
+               d_instantiate(dentry, inode);
++              unlock_new_inode(inode);
+               return 0;
+       }
+       inode_dec_link_count(inode);
++      unlock_new_inode(inode);
+       iput(inode);
+       return err;
+ }
+@@ -182,6 +184,7 @@ out:
+ out_fail:
+       drop_nlink(inode);
+       nilfs_mark_inode_dirty(inode);
++      unlock_new_inode(inode);
+       iput(inode);
+       goto out;
+ }
+@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct 
inode *dir,
+       inode_inc_link_count(inode);
+       ihold(inode);
+ 
+-      err = nilfs_add_nondir(dentry, inode);
+-      if (!err)
++      err = nilfs_add_link(dentry, inode);
++      if (!err) {
++              d_instantiate(dentry, inode);
+               err = nilfs_transaction_commit(dir->i_sb);
+-      else
++      } else {
++              inode_dec_link_count(inode);
++              iput(inode);
+               nilfs_transaction_abort(dir->i_sb);
++      }
+ 
+       return err;
+ }
+@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry 
*dentry, umode_t mode)
+ 
+       nilfs_mark_inode_dirty(inode);
+       d_instantiate(dentry, inode);
++      unlock_new_inode(inode);
+ out:
+       if (!err)
+               err = nilfs_transaction_commit(dir->i_sb);
+@@ -255,6 +263,7 @@ out_fail:
+       drop_nlink(inode);
+       drop_nlink(inode);
+       nilfs_mark_inode_dirty(inode);
++      unlock_new_inode(inode);
+       iput(inode);
+ out_dir:
+       drop_nlink(dir);
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 20dfec72e903..f998c6009ad4 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -917,7 +917,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int 
num_pages)
+       }
+ }
+ 
+-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
++static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
+ {
+       int i;
+ 
+@@ -938,7 +938,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt 
*wc)
+               page_cache_release(wc->w_target_page);
+       }
+       ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
++}
+ 
++static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
++{
++      ocfs2_unlock_pages(wc);
+       brelse(wc->w_di_bh);
+       kfree(wc);
+ }
+@@ -2060,11 +2064,19 @@ out_write_size:
+       di->i_mtime_nsec = di->i_ctime_nsec = 
cpu_to_le32(inode->i_mtime.tv_nsec);
+       ocfs2_journal_dirty(handle, wc->w_di_bh);
+ 
++      /* unlock pages before dealloc since it needs acquiring j_trans_barrier
++       * lock, or it will cause a deadlock since journal commit threads holds
++       * this lock and will ask for the page lock when flushing the data.
++       * put it here to preserve the unlock order.
++       */
++      ocfs2_unlock_pages(wc);
++
+       ocfs2_commit_trans(osb, handle);
+ 
+       ocfs2_run_deallocs(osb, &wc->w_dealloc);
+ 
+-      ocfs2_free_write_ctxt(wc);
++      brelse(wc->w_di_bh);
++      kfree(wc);
+ 
+       return copied;
+ }
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 1376e5a8f0d6..42d5911c7e29 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
+ MODULE_PARM_DESC(mem_size,
+               "size of reserved RAM used to store oops/panic logs");
+ 
++static unsigned int mem_type;
++module_param(mem_type, uint, 0600);
++MODULE_PARM_DESC(mem_type,
++              "set to 1 to try to use unbuffered memory (default 0)");
++
+ static int dump_oops = 1;
+ module_param(dump_oops, int, 0600);
+ MODULE_PARM_DESC(dump_oops,
+@@ -79,6 +84,7 @@ struct ramoops_context {
+       struct persistent_ram_zone *fprz;
+       phys_addr_t phys_addr;
+       unsigned long size;
++      unsigned int memtype;
+       size_t record_size;
+       size_t console_size;
+       size_t ftrace_size;
+@@ -331,7 +337,8 @@ static int ramoops_init_przs(struct device *dev, struct 
ramoops_context *cxt,
+               size_t sz = cxt->record_size;
+ 
+               cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
+-                                                &cxt->ecc_info);
++                                                &cxt->ecc_info,
++                                                cxt->memtype);
+               if (IS_ERR(cxt->przs[i])) {
+                       err = PTR_ERR(cxt->przs[i]);
+                       dev_err(dev, "failed to request mem region 
(0x%zx@0x%llx): %d\n",
+@@ -361,7 +368,7 @@ static int ramoops_init_prz(struct device *dev, struct 
ramoops_context *cxt,
+               return -ENOMEM;
+       }
+ 
+-      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
++      *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 
cxt->memtype);
+       if (IS_ERR(*prz)) {
+               int err = PTR_ERR(*prz);
+ 
+@@ -411,6 +418,7 @@ static int ramoops_probe(struct platform_device *pdev)
+       cxt->dump_read_cnt = 0;
+       cxt->size = pdata->mem_size;
+       cxt->phys_addr = pdata->mem_address;
++      cxt->memtype = pdata->mem_type;
+       cxt->record_size = pdata->record_size;
+       cxt->console_size = pdata->console_size;
+       cxt->ftrace_size = pdata->ftrace_size;
+@@ -541,6 +549,7 @@ static void ramoops_register_dummy(void)
+ 
+       dummy_data->mem_size = mem_size;
+       dummy_data->mem_address = mem_address;
++      dummy_data->mem_type = 0;
+       dummy_data->record_size = record_size;
+       dummy_data->console_size = ramoops_console_size;
+       dummy_data->ftrace_size = ramoops_ftrace_size;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 59337326e288..6ff97553331b 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -333,7 +333,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
+       persistent_ram_update_header_ecc(prz);
+ }
+ 
+-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
++static void *persistent_ram_vmap(phys_addr_t start, size_t size,
++              unsigned int memtype)
+ {
+       struct page **pages;
+       phys_addr_t page_start;
+@@ -345,7 +346,10 @@ static void *persistent_ram_vmap(phys_addr_t start, 
size_t size)
+       page_start = start - offset_in_page(start);
+       page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+ 
+-      prot = pgprot_noncached(PAGE_KERNEL);
++      if (memtype)
++              prot = pgprot_noncached(PAGE_KERNEL);
++      else
++              prot = pgprot_writecombine(PAGE_KERNEL);
+ 
+       pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
+       if (!pages) {
+@@ -364,27 +368,35 @@ static void *persistent_ram_vmap(phys_addr_t start, 
size_t size)
+       return vaddr;
+ }
+ 
+-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
++static void *persistent_ram_iomap(phys_addr_t start, size_t size,
++              unsigned int memtype)
+ {
++      void *va;
++
+       if (!request_mem_region(start, size, "persistent_ram")) {
+               pr_err("request mem region (0x%llx@0x%llx) failed\n",
+                       (unsigned long long)size, (unsigned long long)start);
+               return NULL;
+       }
+ 
+-      return ioremap(start, size);
++      if (memtype)
++              va = ioremap(start, size);
++      else
++              va = ioremap_wc(start, size);
++
++      return va;
+ }
+ 
+ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+-              struct persistent_ram_zone *prz)
++              struct persistent_ram_zone *prz, int memtype)
+ {
+       prz->paddr = start;
+       prz->size = size;
+ 
+       if (pfn_valid(start >> PAGE_SHIFT))
+-              prz->vaddr = persistent_ram_vmap(start, size);
++              prz->vaddr = persistent_ram_vmap(start, size, memtype);
+       else
+-              prz->vaddr = persistent_ram_iomap(start, size);
++              prz->vaddr = persistent_ram_iomap(start, size, memtype);
+ 
+       if (!prz->vaddr) {
+               pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
+@@ -452,7 +464,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
+ }
+ 
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-                      u32 sig, struct persistent_ram_ecc_info *ecc_info)
++                      u32 sig, struct persistent_ram_ecc_info *ecc_info,
++                      unsigned int memtype)
+ {
+       struct persistent_ram_zone *prz;
+       int ret = -ENOMEM;
+@@ -463,7 +476,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t 
start, size_t size,
+               goto err;
+       }
+ 
+-      ret = persistent_ram_buffer_map(start, size, prz);
++      ret = persistent_ram_buffer_map(start, size, prz, memtype);
+       if (ret)
+               goto err;
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index d4cdac903468..c4085192c2b6 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1630,7 +1630,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
+ #if VM_GROWSUP
+ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+ #else
+-  #define expand_upwards(vma, address) do { } while (0)
++  #define expand_upwards(vma, address) (0)
+ #endif
+ 
+ /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
+diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
+index 9974975d40db..4af3fdc85b01 100644
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -53,7 +53,8 @@ struct persistent_ram_zone {
+ };
+ 
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+-                      u32 sig, struct persistent_ram_ecc_info *ecc_info);
++                      u32 sig, struct persistent_ram_ecc_info *ecc_info,
++                      unsigned int memtype);
+ void persistent_ram_free(struct persistent_ram_zone *prz);
+ void persistent_ram_zap(struct persistent_ram_zone *prz);
+ 
+@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone 
*prz,
+ struct ramoops_platform_data {
+       unsigned long   mem_size;
+       unsigned long   mem_address;
++      unsigned int    mem_type;
+       unsigned long   record_size;
+       unsigned long   console_size;
+       unsigned long   ftrace_size;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3f63ea6464ca..7bf4d519c20f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6887,11 +6887,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ 
+       if (move_group) {
+               synchronize_rcu();
+-              perf_install_in_context(ctx, group_leader, event->cpu);
++              perf_install_in_context(ctx, group_leader, group_leader->cpu);
+               get_ctx(ctx);
+               list_for_each_entry(sibling, &group_leader->sibling_list,
+                                   group_entry) {
+-                      perf_install_in_context(ctx, sibling, event->cpu);
++                      perf_install_in_context(ctx, sibling, sibling->cpu);
+                       get_ctx(ctx);
+               }
+       }
+diff --git a/mm/memory.c b/mm/memory.c
+index 0926ccd04d7a..8b2d75f61b32 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3200,7 +3200,7 @@ static inline int check_stack_guard_page(struct 
vm_area_struct *vma, unsigned lo
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+ 
+-              expand_downwards(vma, address - PAGE_SIZE);
++              return expand_downwards(vma, address - PAGE_SIZE);
+       }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) 
{
+               struct vm_area_struct *next = vma->vm_next;
+@@ -3209,7 +3209,7 @@ static inline int check_stack_guard_page(struct 
vm_area_struct *vma, unsigned lo
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+ 
+-              expand_upwards(vma, address + PAGE_SIZE);
++              return expand_upwards(vma, address + PAGE_SIZE);
+       }
+       return 0;
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 8f87b14c7968..43a7089c6a7c 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2056,14 +2056,17 @@ static int acct_stack_growth(struct vm_area_struct 
*vma, unsigned long size, uns
+ {
+       struct mm_struct *mm = vma->vm_mm;
+       struct rlimit *rlim = current->signal->rlim;
+-      unsigned long new_start;
++      unsigned long new_start, actual_size;
+ 
+       /* address space limit tests */
+       if (!may_expand_vm(mm, grow))
+               return -ENOMEM;
+ 
+       /* Stack limit test */
+-      if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
++      actual_size = size;
++      if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
++              actual_size -= PAGE_SIZE;
++      if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+               return -ENOMEM;
+ 
+       /* mlock limit tests */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 4e89500391dc..a2fd7e759cb7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2631,18 +2631,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int 
order, long remaining,
+               return false;
+ 
+       /*
+-       * There is a potential race between when kswapd checks its watermarks
+-       * and a process gets throttled. There is also a potential race if
+-       * processes get throttled, kswapd wakes, a large process exits therby
+-       * balancing the zones that causes kswapd to miss a wakeup. If kswapd
+-       * is going to sleep, no process should be sleeping on pfmemalloc_wait
+-       * so wake them now if necessary. If necessary, processes will wake
+-       * kswapd and get throttled again
++       * The throttled processes are normally woken up in balance_pgdat() as
++       * soon as pfmemalloc_watermark_ok() is true. But there is a potential
++       * race between when kswapd checks the watermarks and a process gets
++       * throttled. There is also a potential race if processes get
++       * throttled, kswapd wakes, a large process exits thereby balancing the
++       * zones, which causes kswapd to exit balance_pgdat() before reaching
++       * the wake up checks. If kswapd is going to sleep, no process should
++       * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
++       * the wake up is premature, processes will wake kswapd and get
++       * throttled again. The difference from wake ups in balance_pgdat() is
++       * that here we are under prepare_to_wait().
+        */
+-      if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
+-              wake_up(&pgdat->pfmemalloc_wait);
+-              return false;
+-      }
++      if (waitqueue_active(&pgdat->pfmemalloc_wait))
++              wake_up_all(&pgdat->pfmemalloc_wait);
+ 
+       return pgdat_balanced(pgdat, order, classzone_idx);
+ }
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index 4305b2f2ec5e..8c0e07b7a70b 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
+       # strip kmemcheck_bitfield_{begin,end}.*;
+       $members =~ s/kmemcheck_bitfield_.*?;//gos;
+       # strip attributes
+-      $members =~ s/__aligned\s*\(.+\)//gos;
++      $members =~ s/__aligned\s*\([^;]*\)//gos;
+ 
+       create_parameterlist($members, ';', $file);
+       check_sections($file, $declaration_name, "struct", $sectcheck, 
$struct_actual, $nested);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index aeefec74a061..83a0f9b4452b 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -327,8 +327,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, 
hda_nid_t nid,
+       unsigned int parm;
+ 
+       parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
+-      if (parm == -1)
++      if (parm == -1) {
++              *start_id = 0;
+               return 0;
++      }
+       *start_id = (parm >> 16) & 0x7fff;
+       return (int)(parm & 0x7fff);
+ }
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 5dd4c4af9c9f..4ae5767a2cf5 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -573,9 +573,9 @@ static void stac_store_hints(struct hda_codec *codec)
+                       spec->gpio_mask;
+       }
+       if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
+-              spec->gpio_mask &= spec->gpio_mask;
+-      if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_dir &= spec->gpio_mask;
++      if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
++              spec->gpio_data &= spec->gpio_mask;
+       if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
+               spec->eapd_mask &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 76bfeb3c3e30..be8de7ce1cda 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -1364,8 +1364,8 @@ static const struct snd_soc_dapm_route 
max98090_dapm_routes[] = {
+       {"STENL Mux", "Sidetone Left", "DMICL"},
+       {"STENR Mux", "Sidetone Right", "ADCR"},
+       {"STENR Mux", "Sidetone Right", "DMICR"},
+-      {"DACL", "NULL", "STENL Mux"},
+-      {"DACR", "NULL", "STENL Mux"},
++      {"DACL", NULL, "STENL Mux"},
++      {"DACR", NULL, "STENL Mux"},
+ 
+       {"AIFINL", NULL, "SHDN"},
+       {"AIFINR", NULL, "SHDN"},
+diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
+index 4068f2491232..bb3878c9625f 100644
+--- a/sound/soc/codecs/sigmadsp.c
++++ b/sound/soc/codecs/sigmadsp.c
+@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
+               goto done;
+       }
+ 
++      if (ssfw_head->version != 1) {
++              dev_err(dev,
++                      "Failed to load firmware: Invalid version %d. Supported 
firmware versions: 1\n",
++                      ssfw_head->version);
++              goto done;
++      }
++
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
+       pr_debug("%s: crc=%x\n", __func__, crc);
+diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
+index 593a3ea12d4c..489a9abf112b 100644
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream 
*substream,
+       snd_soc_dai_set_dma_data(dai, substream, NULL);
+ }
+ 
++static int dw_i2s_prepare(struct snd_pcm_substream *substream,
++                        struct snd_soc_dai *dai)
++{
++      struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
++
++      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++              i2s_write_reg(dev->i2s_base, TXFFR, 1);
++      else
++              i2s_write_reg(dev->i2s_base, RXFFR, 1);
++
++      return 0;
++}
++
+ static int dw_i2s_trigger(struct snd_pcm_substream *substream,
+               int cmd, struct snd_soc_dai *dai)
+ {
+@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
+       .startup        = dw_i2s_startup,
+       .shutdown       = dw_i2s_shutdown,
+       .hw_params      = dw_i2s_hw_params,
++      .prepare        = dw_i2s_prepare,
+       .trigger        = dw_i2s_trigger,
+ };
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 0339d464791a..4df31b0f94a3 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -322,8 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
+       { 0 }                           /* terminator */
+ };
+ 
+-static const struct usbmix_name_map kef_x300a_map[] = {
+-      { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
++/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
++ * when anything attempts to access FU 10 (control)
++ */
++static const struct usbmix_name_map scms_usb3318_map[] = {
++      { 10, NULL },
+       { 0 }
+ };
+ 
+@@ -415,8 +418,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+               .map = ebox44_map,
+       },
+       {
++              /* KEF X300A */
+               .id = USB_ID(0x27ac, 0x1000),
+-              .map = kef_x300a_map,
++              .map = scms_usb3318_map,
++      },
++      {
++              /* Arcam rPAC */
++              .id = USB_ID(0x25c4, 0x0003),
++              .map = scms_usb3318_map,
+       },
+       { 0 } /* terminator */
+ };
+diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
+index 14c2fe20aa62..20764e01df16 100644
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -34,6 +34,7 @@ struct events_stats {
+       u32 nr_invalid_chains;
+       u32 nr_unknown_id;
+       u32 nr_unprocessable_samples;
++      u32 nr_unordered_events;
+ };
+ 
+ enum hist_column {
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index e392202b96bc..6f593a704ea5 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -656,8 +656,7 @@ static int perf_session_queue_event(struct perf_session 
*s, union perf_event *ev
+               return -ETIME;
+ 
+       if (timestamp < s->ordered_samples.last_flush) {
+-              printf("Warning: Timestamp below last timeslice flush\n");
+-              return -EINVAL;
++              s->stats.nr_unordered_events++;
+       }
+ 
+       if (!list_empty(sc)) {
+@@ -1057,6 +1056,8 @@ static void perf_session__warn_about_errors(const struct 
perf_session *session,
+                           "Do you have a KVM guest running and not using 
'perf kvm'?\n",
+                           session->stats.nr_unprocessable_samples);
+       }
++      if (session->stats.nr_unordered_events != 0)
++              ui__warning("%u out of order events recorded.\n", 
session->stats.nr_unordered_events);
+ }
+ 
+ #define session_done()        (*(volatile int *)(&session_done))

Reply via email to