[PATCH v5 3/9] powerpc: Prepare for moving thread_info into task_struct

2018-10-05 Thread Christophe Leroy
This patch cleans the powerpc kernel before activating
CONFIG_THREAD_INFO_IN_TASK:
- The purpose of the pointer given to call_do_softirq() and
call_do_irq() is to point the new stack ==> change it to void* and
rename it 'sp'
- Don't use CURRENT_THREAD_INFO() to locate the stack.
- Fix a few comments.
- Replace current_thread_info()->task by current
- Remove unnecessary casts to thread_info, as they'll become invalid
once thread_info is not in stack anymore.
- Rename THREAD_INFO to TASK_STASK: as it is in fact the offset of the
pointer to the stack in task_struct, this pointer will not be impacted
by the move of THREAD_INFO.
- Makes TASK_STACK available to PPC64. PPC64 will need it to get the
stack pointer from current once the thread_info have been moved.

Signed-off-by: Christophe Leroy 
Reviewed-by: Nicholas Piggin 
---
 arch/powerpc/include/asm/irq.h   |  4 ++--
 arch/powerpc/include/asm/livepatch.h |  2 +-
 arch/powerpc/include/asm/processor.h |  4 ++--
 arch/powerpc/include/asm/reg.h   |  2 +-
 arch/powerpc/kernel/asm-offsets.c|  2 +-
 arch/powerpc/kernel/entry_32.S   |  2 +-
 arch/powerpc/kernel/entry_64.S   |  2 +-
 arch/powerpc/kernel/head_32.S|  4 ++--
 arch/powerpc/kernel/head_40x.S   |  4 ++--
 arch/powerpc/kernel/head_44x.S   |  2 +-
 arch/powerpc/kernel/head_8xx.S   |  2 +-
 arch/powerpc/kernel/head_booke.h |  4 ++--
 arch/powerpc/kernel/head_fsl_booke.S |  4 ++--
 arch/powerpc/kernel/irq.c|  2 +-
 arch/powerpc/kernel/misc_32.S|  4 ++--
 arch/powerpc/kernel/process.c|  6 +++---
 arch/powerpc/kernel/setup_32.c   | 15 +--
 arch/powerpc/kernel/smp.c|  4 +++-
 18 files changed, 33 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index ee39ce56b2a2..2efbae8d93be 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -63,8 +63,8 @@ extern struct thread_info *hardirq_ctx[NR_CPUS];
 extern struct thread_info *softirq_ctx[NR_CPUS];
 
 extern void irq_ctx_init(void);
-extern void call_do_softirq(struct thread_info *tp);
-extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
+void call_do_softirq(void *sp);
+void call_do_irq(struct pt_regs *regs, void *sp);
 extern void do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/livepatch.h 
b/arch/powerpc/include/asm/livepatch.h
index 47a03b9b528b..818451bf629c 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -49,7 +49,7 @@ static inline void klp_init_thread_info(struct thread_info 
*ti)
ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
 }
 #else
-static void klp_init_thread_info(struct thread_info *ti) { }
+static inline void klp_init_thread_info(struct thread_info *ti) { }
 #endif /* CONFIG_LIVEPATCH */
 
 #endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index 13589274fe9b..b225c7f7c5a4 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -40,7 +40,7 @@
 
 #ifndef __ASSEMBLY__
 #include 
-#include 
+#include 
 #include 
 #include 
 
@@ -332,7 +332,7 @@ struct thread_struct {
 
 #define INIT_SP(sizeof(init_stack) + (unsigned long) 
&init_stack)
 #define INIT_SP_LIMIT \
-   (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
+   (_ALIGN_UP(sizeof(struct thread_info), 16) + (unsigned long)&init_stack)
 
 #ifdef CONFIG_SPE
 #define SPEFSCR_INIT \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 640a4d818772..d2528a0b2f5b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1058,7 +1058,7 @@
  * - SPRG9 debug exception scratch
  *
  * All 32-bit:
- * - SPRG3 current thread_info pointer
+ * - SPRG3 current thread_struct physical addr pointer
  *(virtual on BookE, physical on others)
  *
  * 32-bit classic:
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index a6d70fd2e499..c583a02e5a21 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,10 +91,10 @@ int main(void)
DEFINE(NMI_MASK, NMI_MASK);
OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
 #else
-   OFFSET(THREAD_INFO, task_struct, stack);
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
 #endif /* CONFIG_PPC64 */
+   OFFSET(TASK_STACK, task_struct, stack);
 
 #ifdef CONFIG_LIVEPATCH
OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 77decded1175..7ea1d71f4546 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1166

Re: [PATCH v5 3/9] powerpc: Prepare for moving thread_info into task_struct

2018-10-06 Thread Michael Ellerman
Christophe Leroy  writes:

> diff --git a/arch/powerpc/include/asm/livepatch.h 
> b/arch/powerpc/include/asm/livepatch.h
> index 47a03b9b528b..818451bf629c 100644
> --- a/arch/powerpc/include/asm/livepatch.h
> +++ b/arch/powerpc/include/asm/livepatch.h
> @@ -49,7 +49,7 @@ static inline void klp_init_thread_info(struct thread_info 
> *ti)
>   ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;

We need to do something about this.

Currently the thread_info sits at the low address of the stack, and we
use the space immediately above that as a miniature upward growing stack
for livepatching.

If we keep the livepatch_sp in the thread_info then we need to
initialise it somewhere when the task starts running on a stack. And I
don't know how that works if we end up running on the emergency stack
for example.

So I'm not sure that makes much sense.

Instead we might need to keep the livepatch_sp on the stack page at the
base, where thread_info currently lives.

That obviously sucks because you can still overflow into it and trash
it, but it's no worse than what we have now for livepatching.

Need to think about it some more.

cheers


Re: [PATCH v5 3/9] powerpc: Prepare for moving thread_info into task_struct

2018-10-07 Thread Christophe Leroy




On 10/06/2018 12:40 PM, Michael Ellerman wrote:

Christophe Leroy  writes:


diff --git a/arch/powerpc/include/asm/livepatch.h 
b/arch/powerpc/include/asm/livepatch.h
index 47a03b9b528b..818451bf629c 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -49,7 +49,7 @@ static inline void klp_init_thread_info(struct thread_info 
*ti)
ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;


We need to do something about this.


Oops I missed that one.



Currently the thread_info sits at the low address of the stack, and we
use the space immediately above that as a miniature upward growing stack
for livepatching.

If we keep the livepatch_sp in the thread_info then we need to
initialise it somewhere when the task starts running on a stack. And I
don't know how that works if we end up running on the emergency stack
for example.

So I'm not sure that makes much sense.

Instead we might need to keep the livepatch_sp on the stack page at the
base, where thread_info currently lives.

That obviously sucks because you can still overflow into it and trash
it, but it's no worse than what we have now for livepatching.

Need to think about it some more.



I think for that serie we can leave with it in the stack, as it won't be 
worst than before. Then in a future patch we can change it. I'll open an 
issue for that on github.


Then for now, I'll add the following change in this patch.

diff --git a/arch/powerpc/include/asm/livepatch.h 
b/arch/powerpc/include/asm/livepatch.h

index 47a03b9b528b..8a81d10ccc82 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -43,13 +43,14 @@ static inline unsigned long 
klp_get_ftrace_location(unsigned long faddr)

return ftrace_location_range(faddr, faddr + 16);
 }

-static inline void klp_init_thread_info(struct thread_info *ti)
+static inline void klp_init_thread_info(struct task_struct *p)
 {
+   struct thread_info *ti = task_thread_info(p);
/* + 1 to account for STACK_END_MAGIC */
-   ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
+   ti->livepatch_sp = end_of_stack(p) + 1;
 }
 #else
-static void klp_init_thread_info(struct thread_info *ti) { }
+static inline void klp_init_thread_info(struct task_struct *p) { }
 #endif /* CONFIG_LIVEPATCH */

 #endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d9d4eb2ea6c9..a12307ebb7ef 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1632,7 +1632,7 @@ int copy_thread(unsigned long clone_flags, 
unsigned long usp,

unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);

-   klp_init_thread_info(ti);
+   klp_init_thread_info(p);

/* Copy registers */
sp -= sizeof(struct pt_regs);
diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c

index 9ca9db707bcb..8054a7b9e026 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -940,7 +940,7 @@ void __init setup_arch(char **cmdline_p)
/* Reserve large chunks of memory for use by CMA for KVM. */
kvm_cma_reserve();

-   klp_init_thread_info(&init_thread_info);
+   klp_init_thread_info(&init_task);

init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;

Christophe