On Fri, 23 Jun 2017 18:31:18 +1000 Oliver O'Halloran <ooh...@gmail.com> wrote:
> Removes an indentation level and shuffles some code around to make the > following patch cleaner. No functional changes. > > Signed-off-by: Oliver O'Halloran <ooh...@gmail.com> > --- > v1 -> v2: Remove broken initialiser > --- > arch/powerpc/mm/init_64.c | 48 > ++++++++++++++++++++++++----------------------- > 1 file changed, 25 insertions(+), 23 deletions(-) > > diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c > index ec84b31c6c86..8851e4f5dbab 100644 > --- a/arch/powerpc/mm/init_64.c > +++ b/arch/powerpc/mm/init_64.c > @@ -234,13 +234,15 @@ static unsigned long vmemmap_list_free(unsigned long > start) > void __ref vmemmap_free(unsigned long start, unsigned long end) > { > unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; > + unsigned long page_order = get_order(page_size); > > start = _ALIGN_DOWN(start, page_size); > > pr_debug("vmemmap_free %lx...%lx\n", start, end); > > for (; start < end; start += page_size) { > - unsigned long addr; > + unsigned long nr_pages, addr; > + struct page *page; > > /* > * the section has already be marked as invalid, so > @@ -251,29 +253,29 @@ void __ref vmemmap_free(unsigned long start, unsigned > long end) > continue; > > addr = vmemmap_list_free(start); > - if (addr) { > - struct page *page = pfn_to_page(addr >> PAGE_SHIFT); > - > - if (PageReserved(page)) { > - /* allocated from bootmem */ > - if (page_size < PAGE_SIZE) { > - /* > - * this shouldn't happen, but if it is > - * the case, leave the memory there > - */ > - WARN_ON_ONCE(1); > - } else { > - unsigned int nr_pages = > - 1 << get_order(page_size); > - while (nr_pages--) > - free_reserved_page(page++); > - } > - } else > - free_pages((unsigned long)(__va(addr)), > - get_order(page_size)); > - > - vmemmap_remove_mapping(start, page_size); > + if (!addr) > + continue; > + > + page = pfn_to_page(addr >> PAGE_SHIFT); > + nr_pages = 1 << page_order; > + > + if (PageReserved(page)) { > + /* allocated from bootmem */ > + if (page_size < PAGE_SIZE) { > + /* > + * this shouldn't happen, but if it is > + * the case, leave the memory there > + */ > + WARN_ON_ONCE(1); > + } else { > + while (nr_pages--) > + free_reserved_page(page++); > + } > + } else { > + free_pages((unsigned long)(__va(addr)), page_order); > } > + > + vmemmap_remove_mapping(start, page_size); > } > } > #endif Reviewed-by: Balbir Singh <bsinghar...@gmail.com> Balbir Singh.