From: Sebastian Andrzej Siewior <[email protected]>

v3.18.138-rt116-rc1 stable review patch.
If anyone has any objections, please let me know.

-----------


[ Upstream commit 5c1b4cd70e2ca0c81038b65babe6dc66086322e0 ]

The locallock protects the per-CPU variable tce_page. The function
attempts to allocate memory while tce_page is protected (by disabling
interrupts).

Use local_irq_save() instead of local_irq_disable().

Cc: [email protected]
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Tom Zanussi <[email protected]>

 Conflicts:
        arch/powerpc/platforms/pseries/iommu.c
---
 arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/platforms/pseries/iommu.c 
b/arch/powerpc/platforms/pseries/iommu.c
index 05a2c9eefc08..acc6f64f0cb8 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -36,6 +36,7 @@
 #include <linux/crash_dump.h>
 #include <linux/memory.h>
 #include <linux/of.h>
+#include <linux/locallock.h>
 #include <asm/io.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
@@ -177,6 +178,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, 
long tcenum,
 }
 
 static DEFINE_PER_CPU(__be64 *, tce_page);
+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock);
 
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                     long npages, unsigned long uaddr,
@@ -197,7 +199,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                                           direction, attrs);
        }
 
-       local_irq_save(flags);  /* to protect tcep and the page behind it */
+       /* to protect tcep and the page behind it */
+       local_lock_irqsave(tcp_page_lock, flags);
 
        tcep = __get_cpu_var(tce_page);
 
@@ -208,7 +211,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
                /* If allocation fails, fall back to the loop implementation */
                if (!tcep) {
-                       local_irq_restore(flags);
+                       local_unlock_irqrestore(tcp_page_lock, flags);
                        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                            direction, attrs);
                }
@@ -242,7 +245,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table 
*tbl, long tcenum,
                tcenum += limit;
        } while (npages > 0 && !rc);
 
-       local_irq_restore(flags);
+       local_unlock_irqrestore(tcp_page_lock, flags);
 
        if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
                ret = (int)rc;
@@ -397,13 +400,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long 
start_pfn,
        u64 rc = 0;
        long l, limit;
 
-       local_irq_disable();    /* to protect tcep and the page behind it */
+       /* to protect tcep and the page behind it */
+       local_lock_irq(tcp_page_lock);
        tcep = __get_cpu_var(tce_page);
 
        if (!tcep) {
                tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
                if (!tcep) {
-                       local_irq_enable();
+                       local_unlock_irq(tcp_page_lock);
                        return -ENOMEM;
                }
                __get_cpu_var(tce_page) = tcep;
@@ -449,7 +453,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long 
start_pfn,
 
        /* error cleanup: caller will clear whole range */
 
-       local_irq_enable();
+       local_unlock_irq(tcp_page_lock);
        return rc;
 }
 
-- 
2.14.1

Reply via email to