By saving the pointer pointing to thread_info.flags, gcc copies r2
in a non-volatile register.

We know 'current' doesn't change, so avoid that intermediaite pointer.

Reduces null_syscall benchmark by 2 cycles (322 => 320 cycles)

On PPC64, gcc seems to know that 'current' is not changing, and it keeps
it in a non volatile register to avoid multiple read of 'current' in paca.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
v5: Also in interrupt exit prepare
---
 arch/powerpc/kernel/interrupt.c | 28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)

diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 8c38e8c95be2..c89a8eac3e24 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -223,7 +223,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
                                           struct pt_regs *regs,
                                           long scv)
 {
-       unsigned long *ti_flagsp = &current_thread_info()->flags;
        unsigned long ti_flags;
        unsigned long ret = 0;
 
@@ -241,7 +240,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
        /* Check whether the syscall is issued inside a restartable sequence */
        rseq_syscall(regs);
 
-       ti_flags = *ti_flagsp;
+       ti_flags = current_thread_info()->flags;
 
        if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
                if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
@@ -255,7 +254,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
                        ret = _TIF_RESTOREALL;
                else
                        regs->gpr[3] = r3;
-               clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
+               clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
        } else {
                regs->gpr[3] = r3;
        }
@@ -268,7 +267,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
        local_irq_disable();
 
 again:
-       ti_flags = READ_ONCE(*ti_flagsp);
+       ti_flags = READ_ONCE(current_thread_info()->flags);
        while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
                local_irq_enable();
                if (ti_flags & _TIF_NEED_RESCHED) {
@@ -284,7 +283,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
                        do_notify_resume(regs, ti_flags);
                }
                local_irq_disable();
-               ti_flags = READ_ONCE(*ti_flagsp);
+               ti_flags = READ_ONCE(current_thread_info()->flags);
        }
 
        if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -339,10 +338,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long 
r3,
 #ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, 
unsigned long msr)
 {
-#ifdef CONFIG_PPC_BOOK3E
-       struct thread_struct *ts = &current->thread;
-#endif
-       unsigned long *ti_flagsp = &current_thread_info()->flags;
        unsigned long ti_flags;
        unsigned long flags;
        unsigned long ret = 0;
@@ -365,7 +360,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct 
pt_regs *regs, unsigned
        local_irq_save(flags);
 
 again:
-       ti_flags = READ_ONCE(*ti_flagsp);
+       ti_flags = READ_ONCE(current_thread_info()->flags);
        while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
                local_irq_enable(); /* returning to user: may enable */
                if (ti_flags & _TIF_NEED_RESCHED) {
@@ -376,7 +371,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct 
pt_regs *regs, unsigned
                        do_notify_resume(regs, ti_flags);
                }
                local_irq_disable();
-               ti_flags = READ_ONCE(*ti_flagsp);
+               ti_flags = READ_ONCE(current_thread_info()->flags);
        }
 
        if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -407,13 +402,13 @@ notrace unsigned long interrupt_exit_user_prepare(struct 
pt_regs *regs, unsigned
        }
 
 #ifdef CONFIG_PPC_BOOK3E
-       if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
+       if (unlikely(current->thread.debug.dbcr0 & DBCR0_IDM)) {
                /*
                 * Check to see if the dbcr0 register is set up to debug.
                 * Use the internal debug mode bit to do this.
                 */
                mtmsr(mfmsr() & ~MSR_DE);
-               mtspr(SPRN_DBCR0, ts->debug.dbcr0);
+               mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
                mtspr(SPRN_DBSR, -1);
        }
 #endif
@@ -438,7 +433,6 @@ void preempt_schedule_irq(void);
 
 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, 
unsigned long msr)
 {
-       unsigned long *ti_flagsp = &current_thread_info()->flags;
        unsigned long flags;
        unsigned long ret = 0;
 #ifdef CONFIG_PPC64
@@ -461,8 +455,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct 
pt_regs *regs, unsign
        amr = kuap_get_and_check_amr();
 #endif
 
-       if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
-               clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
+       if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
+               clear_bits(_TIF_EMULATE_STACK_STORE, 
&current_thread_info()->flags);
                ret = 1;
        }
 
@@ -474,7 +468,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct 
pt_regs *regs, unsign
 again:
                if (IS_ENABLED(CONFIG_PREEMPT)) {
                        /* Return to preemptible kernel context */
-                       if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
+                       if (unlikely(current_thread_info()->flags & 
_TIF_NEED_RESCHED)) {
                                if (preempt_count() == 0)
                                        preempt_schedule_irq();
                        }
-- 
2.25.0

Reply via email to