4.19.37-rt20-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Sebastian Andrzej Siewior <bige...@linutronix.de>

The locallock protects the per-CPU variable tce_page. The function
attempts to allocate memory while tce_page is protected (by disabling
interrupts).

Use local_irq_save() instead of local_irq_disable().

Cc: stable...@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org>
---
 arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/iommu.c 
b/arch/powerpc/platforms/pseries/iommu.c
index 06f02960b439..d80d919c78d3 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -38,6 +38,7 @@
 #include <linux/of.h>
 #include <linux/iommu.h>
 #include <linux/rculist.h>
+#include <linux/locallock.h>
 #include <asm/io.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
@@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, 
long tcenum,
 }
 
 static DEFINE_PER_CPU(__be64 *, tce_page);
+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock);
 
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                     long npages, unsigned long uaddr,
@@ -232,7 +234,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                                           direction, attrs);
        }
 
-       local_irq_save(flags);  /* to protect tcep and the page behind it */
+       /* to protect tcep and the page behind it */
+       local_lock_irqsave(tcp_page_lock, flags);
 
        tcep = __this_cpu_read(tce_page);
 
@@ -243,7 +246,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
                /* If allocation fails, fall back to the loop implementation */
                if (!tcep) {
-                       local_irq_restore(flags);
+                       local_unlock_irqrestore(tcp_page_lock, flags);
                        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                            direction, attrs);
                }
@@ -277,7 +280,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                tcenum += limit;
        } while (npages > 0 && !rc);
 
-       local_irq_restore(flags);
+       local_unlock_irqrestore(tcp_page_lock, flags);
 
        if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
                ret = (int)rc;
@@ -435,13 +438,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long 
start_pfn,
        u64 rc = 0;
        long l, limit;
 
-       local_irq_disable();    /* to protect tcep and the page behind it */
+       /* to protect tcep and the page behind it */
+       local_lock_irq(tcp_page_lock);
        tcep = __this_cpu_read(tce_page);
 
        if (!tcep) {
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
                if (!tcep) {
-                       local_irq_enable();
+                       local_unlock_irq(tcp_page_lock);
                        return -ENOMEM;
                }
                __this_cpu_write(tce_page, tcep);
@@ -487,7 +491,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long 
start_pfn,
 
        /* error cleanup: caller will clear whole range */
 
-       local_irq_enable();
+       local_unlock_irq(tcp_page_lock);
        return rc;
 }
 
-- 
2.20.1


Reply via email to