On 2025/12/29 12:54, Mukesh Kumar Chaurasiya wrote:
> From: Mukesh Kumar Chaurasiya <[email protected]>
> 
> Rename arch_irq_disabled_regs() to regs_irqs_disabled() to align with the
> naming used in the generic irqentry framework. This makes the function
> available for use both in the PowerPC architecture code and in the
> common entry/exit paths shared with other architectures.
> 
> This is a preparatory change for enabling the generic irqentry framework
> on PowerPC.
> 
> Signed-off-by: Mukesh Kumar Chaurasiya <[email protected]>
> Reviewed-by: Shrikanth Hegde <[email protected]>
> ---
>  arch/powerpc/include/asm/hw_irq.h    |  4 ++--
>  arch/powerpc/include/asm/interrupt.h | 16 ++++++++--------
>  arch/powerpc/kernel/interrupt.c      |  4 ++--
>  arch/powerpc/kernel/syscall.c        |  2 +-
>  arch/powerpc/kernel/traps.c          |  2 +-
>  arch/powerpc/kernel/watchdog.c       |  2 +-
>  arch/powerpc/perf/core-book3s.c      |  2 +-
>  7 files changed, 16 insertions(+), 16 deletions(-)

Reviewed-by: Jinjie Ruan <[email protected]>

> 
> diff --git a/arch/powerpc/include/asm/hw_irq.h 
> b/arch/powerpc/include/asm/hw_irq.h
> index 1078ba88efaf..8dfe36b442a5 100644
> --- a/arch/powerpc/include/asm/hw_irq.h
> +++ b/arch/powerpc/include/asm/hw_irq.h
> @@ -393,7 +393,7 @@ static inline void do_hard_irq_enable(void)
>       __hard_irq_enable();
>  }
>  
> -static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
> +static inline bool regs_irqs_disabled(struct pt_regs *regs)
>  {
>       return (regs->softe & IRQS_DISABLED);
>  }
> @@ -466,7 +466,7 @@ static inline bool arch_irqs_disabled(void)
>  
>  #define hard_irq_disable()           arch_local_irq_disable()
>  
> -static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
> +static inline bool regs_irqs_disabled(struct pt_regs *regs)
>  {
>       return !(regs->msr & MSR_EE);
>  }
> diff --git a/arch/powerpc/include/asm/interrupt.h 
> b/arch/powerpc/include/asm/interrupt.h
> index eb0e4a20b818..0e2cddf8bd21 100644
> --- a/arch/powerpc/include/asm/interrupt.h
> +++ b/arch/powerpc/include/asm/interrupt.h
> @@ -172,7 +172,7 @@ static inline void interrupt_enter_prepare(struct pt_regs 
> *regs)
>       /* Enable MSR[RI] early, to support kernel SLB and hash faults */
>  #endif
>  
> -     if (!arch_irq_disabled_regs(regs))
> +     if (!regs_irqs_disabled(regs))
>               trace_hardirqs_off();
>  
>       if (user_mode(regs)) {
> @@ -192,11 +192,11 @@ static inline void interrupt_enter_prepare(struct 
> pt_regs *regs)
>                       CT_WARN_ON(ct_state() != CT_STATE_KERNEL &&
>                                  ct_state() != CT_STATE_IDLE);
>               INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
> -             INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
> -                                        
> search_kernel_restart_table(regs->nip));
> +             INT_SOFT_MASK_BUG_ON(regs, regs_irqs_disabled(regs) &&
> +                                  search_kernel_restart_table(regs->nip));
>       }
> -     INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
> -                                !(regs->msr & MSR_EE));
> +     INT_SOFT_MASK_BUG_ON(regs, !regs_irqs_disabled(regs) &&
> +                          !(regs->msr & MSR_EE));
>  
>       booke_restore_dbcr0();
>  }
> @@ -298,7 +298,7 @@ static inline void interrupt_nmi_enter_prepare(struct 
> pt_regs *regs, struct inte
>                * Adjust regs->softe to be soft-masked if it had not been
>                * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
>                * not yet set disabled), or if it was in an implicit soft
> -              * masked state. This makes arch_irq_disabled_regs(regs)
> +              * masked state. This makes regs_irqs_disabled(regs)
>                * behave as expected.
>                */
>               regs->softe = IRQS_ALL_DISABLED;
> @@ -372,7 +372,7 @@ static inline void interrupt_nmi_exit_prepare(struct 
> pt_regs *regs, struct inter
>  
>  #ifdef CONFIG_PPC64
>  #ifdef CONFIG_PPC_BOOK3S
> -     if (arch_irq_disabled_regs(regs)) {
> +     if (regs_irqs_disabled(regs)) {
>               unsigned long rst = search_kernel_restart_table(regs->nip);
>               if (rst)
>                       regs_set_return_ip(regs, rst);
> @@ -661,7 +661,7 @@ void replay_soft_interrupts(void);
>  
>  static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
>  {
> -     if (!arch_irq_disabled_regs(regs))
> +     if (!regs_irqs_disabled(regs))
>               local_irq_enable();
>  }
>  
> diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
> index aea6f7e8e9c6..9ce2013e70ae 100644
> --- a/arch/powerpc/kernel/interrupt.c
> +++ b/arch/powerpc/kernel/interrupt.c
> @@ -347,7 +347,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct 
> pt_regs *regs)
>       unsigned long ret;
>  
>       BUG_ON(regs_is_unrecoverable(regs));
> -     BUG_ON(arch_irq_disabled_regs(regs));
> +     BUG_ON(regs_irqs_disabled(regs));
>       CT_WARN_ON(ct_state() == CT_STATE_USER);
>  
>       /*
> @@ -396,7 +396,7 @@ notrace unsigned long 
> interrupt_exit_kernel_prepare(struct pt_regs *regs)
>  
>       local_irq_disable();
>  
> -     if (!arch_irq_disabled_regs(regs)) {
> +     if (!regs_irqs_disabled(regs)) {
>               /* Returning to a kernel context with local irqs enabled. */
>               WARN_ON_ONCE(!(regs->msr & MSR_EE));
>  again:
> diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
> index be159ad4b77b..9f03a6263fb4 100644
> --- a/arch/powerpc/kernel/syscall.c
> +++ b/arch/powerpc/kernel/syscall.c
> @@ -32,7 +32,7 @@ notrace long system_call_exception(struct pt_regs *regs, 
> unsigned long r0)
>  
>       BUG_ON(regs_is_unrecoverable(regs));
>       BUG_ON(!user_mode(regs));
> -     BUG_ON(arch_irq_disabled_regs(regs));
> +     BUG_ON(regs_irqs_disabled(regs));
>  
>  #ifdef CONFIG_PPC_PKEY
>       if (mmu_has_feature(MMU_FTR_PKEY)) {
> diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
> index cb8e9357383e..629f2a2d4780 100644
> --- a/arch/powerpc/kernel/traps.c
> +++ b/arch/powerpc/kernel/traps.c
> @@ -1956,7 +1956,7 @@ 
> DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
>        * prevent hash faults on user addresses when reading callchains (and
>        * looks better from an irq tracing perspective).
>        */
> -     if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
> +     if (IS_ENABLED(CONFIG_PPC64) && unlikely(regs_irqs_disabled(regs)))
>               performance_monitor_exception_nmi(regs);
>       else
>               performance_monitor_exception_async(regs);
> diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
> index 2429cb1c7baa..6111cbbde069 100644
> --- a/arch/powerpc/kernel/watchdog.c
> +++ b/arch/powerpc/kernel/watchdog.c
> @@ -373,7 +373,7 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
>       u64 tb;
>  
>       /* should only arrive from kernel, with irqs disabled */
> -     WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
> +     WARN_ON_ONCE(!regs_irqs_disabled(regs));
>  
>       if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
>               return 0;
> diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
> index 8b0081441f85..f7518b7e3055 100644
> --- a/arch/powerpc/perf/core-book3s.c
> +++ b/arch/powerpc/perf/core-book3s.c
> @@ -2482,7 +2482,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
>        * will trigger a PMI after waking up from idle. Since counter values 
> are _not_
>        * saved/restored in idle path, can lead to below "Can't find PMC" 
> message.
>        */
> -     if (unlikely(!found) && !arch_irq_disabled_regs(regs))
> +     if (unlikely(!found) && !regs_irqs_disabled(regs))
>               printk_ratelimited(KERN_WARNING "Can't find PMC that caused 
> IRQ\n");
>  
>       /*

Reply via email to