Michael Ellerman's on February 5, 2019 9:31 pm: > From: Christophe Leroy <christophe.le...@c-s.fr> > > Since only the virtual address of allocated blocks is used, > lets use functions returning directly virtual address. > > Those functions have the advantage of also zeroing the block. > > Suggested-by: Mike Rapoport <r...@linux.ibm.com> > Acked-by: Mike Rapoport <r...@linux.ibm.com> > Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr> > Signed-off-by: Michael Ellerman <m...@ellerman.id.au> > --- > arch/powerpc/kernel/irq.c | 5 ----- > arch/powerpc/kernel/setup_32.c | 26 ++++++++++++++++---------- > arch/powerpc/kernel/setup_64.c | 19 +++++++------------ > 3 files changed, 23 insertions(+), 27 deletions(-) > > diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c > index bb299613a462..4a5dd8800946 100644 > --- a/arch/powerpc/kernel/irq.c > +++ b/arch/powerpc/kernel/irq.c > @@ -725,18 +725,15 @@ void exc_lvl_ctx_init(void) > #endif > #endif > > - memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); > tp = critirq_ctx[cpu_nr]; > tp->cpu = cpu_nr; > tp->preempt_count = 0; > > #ifdef CONFIG_BOOKE > - memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); > tp = dbgirq_ctx[cpu_nr]; > tp->cpu = cpu_nr; > tp->preempt_count = 0; > > - memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); > tp = mcheckirq_ctx[cpu_nr]; > tp->cpu = cpu_nr; > tp->preempt_count = HARDIRQ_OFFSET; > @@ -754,12 +751,10 @@ void irq_ctx_init(void) > int i; > > for_each_possible_cpu(i) { > - memset((void *)softirq_ctx[i], 0, THREAD_SIZE); > tp = softirq_ctx[i]; > tp->cpu = i; > klp_init_thread_info(tp); > > - memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); > tp = hardirq_ctx[i]; > tp->cpu = i; > klp_init_thread_info(tp); > diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c > index 947f904688b0..1f0b7629c1a6 100644 > --- a/arch/powerpc/kernel/setup_32.c > +++ b/arch/powerpc/kernel/setup_32.c > @@ -196,6 +196,17 @@ static int __init ppc_init(void) > } > arch_initcall(ppc_init); > > +static void *__init alloc_stack(void) > +{ > + void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE); > + > + if (!ptr) > + panic("cannot allocate %d bytes for stack at %pS\n", > + THREAD_SIZE, (void *)_RET_IP_); > + > + return ptr; > +} > + > void __init irqstack_early_init(void) > { > unsigned int i; > @@ -203,10 +214,8 @@ void __init irqstack_early_init(void) > /* interrupt stacks must be in lowmem, we get that for free on ppc32 > * as the memblock is limited to lowmem by default */ > for_each_possible_cpu(i) { > - softirq_ctx[i] = (struct thread_info *) > - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); > - hardirq_ctx[i] = (struct thread_info *) > - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); > + softirq_ctx[i] = alloc_stack(); > + hardirq_ctx[i] = alloc_stack(); > } > } > > @@ -224,13 +233,10 @@ void __init exc_lvl_early_init(void) > hw_cpu = 0; > #endif > > - critirq_ctx[hw_cpu] = (struct thread_info *) > - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); > + critirq_ctx[hw_cpu] = alloc_stack(); > #ifdef CONFIG_BOOKE > - dbgirq_ctx[hw_cpu] = (struct thread_info *) > - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); > - mcheckirq_ctx[hw_cpu] = (struct thread_info *) > - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); > + dbgirq_ctx[hw_cpu] = alloc_stack(); > + mcheckirq_ctx[hw_cpu] = alloc_stack(); > #endif > } > } > diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c > index 236c1151a3a7..080dd515d587 100644 > --- a/arch/powerpc/kernel/setup_64.c > +++ b/arch/powerpc/kernel/setup_64.c > @@ -634,19 +634,17 @@ __init u64 ppc64_bolted_size(void) > > static void *__init alloc_stack(unsigned long limit, int cpu) > { > - unsigned long pa; > + void *ptr; > > BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); > > - pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, > - early_cpu_to_node(cpu), MEMBLOCK_NONE); > - if (!pa) { > - pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); > - if (!pa) > - panic("cannot allocate stacks"); > - } > + ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE, > + MEMBLOCK_LOW_LIMIT, limit, > + early_cpu_to_node(cpu));
This is much nicer. Looks like removing MEMBLOCK_LOW_LIMIT would be a cleanup for generic kernel code. Actually some of these calls are used only by powerpc (memblock_alloc_base_nid) which probably could get tidied up. Something for a rainy day or a beginner. Reviewed-by: Nicholas Piggin <npig...@gmail.com>