Force use of soft_enabled_set() wrapper to update paca-soft_enabled
wherever possisble. Also add a new wrapper function, soft_enabled_set_return(),
added to force the paca->soft_enabled updates.

Signed-off-by: Madhavan Srinivasan <ma...@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/hw_irq.h  | 14 ++++++++++++++
 arch/powerpc/include/asm/kvm_ppc.h |  2 +-
 arch/powerpc/kernel/irq.c          |  2 +-
 arch/powerpc/kernel/setup_64.c     |  4 ++--
 arch/powerpc/kernel/time.c         |  6 +++---
 5 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/hw_irq.h 
b/arch/powerpc/include/asm/hw_irq.h
index 29f04e287845..6bbcacaa4bd4 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -62,6 +62,20 @@ static inline notrace void soft_enabled_set(unsigned long 
enable)
        : "memory");
 }
 
+static inline notrace unsigned long soft_enabled_set_return(unsigned long 
enable)
+{
+       unsigned long flags;
+
+       asm volatile(
+               "lbz %0,%1(13); stb %2,%1(13)"
+               : "=r" (flags)
+               : "i" (offsetof(struct paca_struct, soft_enabled)),\
+                 "r" (enable)
+               : "memory");
+
+       return flags;
+}
+
 static inline unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index d3608fe43245..76f82014cd80 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -869,7 +869,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
 
        /* Only need to enable IRQs by hard enabling them after this */
        local_paca->irq_happened = 0;
-       local_paca->soft_enabled = IRQ_ENABLED;
+       soft_enabled_set(IRQ_ENABLED);
 #endif
 }
 
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d5b90b8e13dc..d482f9a7e6a9 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -337,7 +337,7 @@ bool prep_irq_for_idle(void)
         * of entering the low power state.
         */
        local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-       local_paca->soft_enabled = IRQ_ENABLED;
+       soft_enabled_set(IRQ_ENABLED);
 
        /* Tell the caller to enter the low power state */
        return true;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 946e6131ff25..4e77972edfac 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void)
        /* Allow percpu accesses to work until we setup percpu data */
        get_paca()->data_offset = 0;
        /* Mark interrupts disabled in PACA */
-       get_paca()->soft_enabled = IRQ_DISABLED;
+       soft_enabled_set(IRQ_DISABLED);
 }
 
 static void __init configure_exceptions(void)
@@ -345,7 +345,7 @@ void __init early_setup(unsigned long dt_ptr)
 void early_setup_secondary(void)
 {
        /* Mark interrupts disabled in PACA */
-       get_paca()->soft_enabled = 0;
+       soft_enabled_set(IRQ_DISABLED);
 
        /* Initialize the hash table or TLB handling */
        early_init_mmu_secondary();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index d0d730c61758..f495956a3664 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
 void accumulate_stolen_time(void)
 {
        u64 sst, ust;
-       u8 save_soft_enabled = local_paca->soft_enabled;
+       unsigned long save_soft_enabled;
        struct cpu_accounting_data *acct = &local_paca->accounting;
 
        /* We are called early in the exception entry, before
@@ -253,7 +253,7 @@ void accumulate_stolen_time(void)
         * needs to reflect that so various debug stuff doesn't
         * complain
         */
-       local_paca->soft_enabled = IRQ_DISABLED;
+       save_soft_enabled = soft_enabled_set_return(IRQ_DISABLED);
 
        sst = scan_dispatch_log(acct->starttime_user);
        ust = scan_dispatch_log(acct->starttime);
@@ -261,7 +261,7 @@ void accumulate_stolen_time(void)
        acct->utime -= ust;
        acct->steal_time += ust + sst;
 
-       local_paca->soft_enabled = save_soft_enabled;
+       soft_enabled_set(save_soft_enabled);
 }
 
 static inline u64 calculate_stolen_time(u64 stop_tb)
-- 
2.7.4

Reply via email to