4.1.35-rt41-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Sebastian Andrzej Siewior <bige...@linutronix.de>

We might get preempted, grab the same ressource again and then corrupt
the memory.

Cc: stable...@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Signed-off-by: Steven Rostedt <rost...@goodmis.org>
---
 mm/zsmalloc.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e819dffd142c..873d9bdb94fd 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -68,6 +68,7 @@
 #include <linux/debugfs.h>
 #include <linux/zsmalloc.h>
 #include <linux/zpool.h>
+#include <linux/locallock.h>
 
 /*
  * This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -398,6 +399,7 @@ static unsigned int get_maxobj_per_zspage(int size, int 
pages_per_zspage)
 
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
 
 static int is_first_page(struct page *page)
 {
@@ -1289,7 +1291,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long 
handle,
        class = pool->size_class[class_idx];
        off = obj_idx_to_offset(page, obj_idx, class->size);
 
-       area = per_cpu_ptr(&zs_map_area, get_cpu_light());
+       area = &get_locked_var(zs_map_area_lock, zs_map_area);
        area->vm_mm = mm;
        if (off + class->size <= PAGE_SIZE) {
                /* this object is contained entirely within a page */
@@ -1342,7 +1344,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long 
handle)
 
                __zs_unmap_object(area, pages, off, class->size);
        }
-       put_cpu_light();
+       put_locked_var(zs_map_area_lock, zs_map_area);
        unpin_tag(handle);
 }
 EXPORT_SYMBOL_GPL(zs_unmap_object);
-- 
2.9.3


Reply via email to