On Mon, 12 Nov 2012, Yinghai Lu wrote:
> Now all page table buf are pre-mapped, and could use virtual address directly.
> So don't need to remember physical address anymore.
> 
> Remove that phys pointer in alloc_low_page(), and that will allow us to merge
> alloc_low_page between 64bit and 32bit.
> 
> Signed-off-by: Yinghai Lu <ying...@kernel.org>

Acked-by: Stefano Stabellini <stefano.stabell...@eu.citrix.com>


>  arch/x86/mm/init_64.c |   19 +++++++------------
>  1 files changed, 7 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
> index 5ee9242..1960820 100644
> --- a/arch/x86/mm/init_64.c
> +++ b/arch/x86/mm/init_64.c
> @@ -314,14 +314,13 @@ void __init cleanup_highmap(void)
>       }
>  }
>  
> -static __ref void *alloc_low_page(unsigned long *phys)
> +static __ref void *alloc_low_page(void)
>  {
>       unsigned long pfn;
>       void *adr;
>  
>       if (after_bootmem) {
>               adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
> -             *phys = __pa(adr);
>  
>               return adr;
>       }
> @@ -342,7 +341,6 @@ static __ref void *alloc_low_page(unsigned long *phys)
>  
>       adr = __va(pfn * PAGE_SIZE);
>       clear_page(adr);
> -     *phys  = pfn * PAGE_SIZE;
>       return adr;
>  }
>  
> @@ -401,7 +399,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, 
> unsigned long end,
>       int i = pmd_index(address);
>  
>       for (; i < PTRS_PER_PMD; i++, address = next) {
> -             unsigned long pte_phys;
>               pmd_t *pmd = pmd_page + pmd_index(address);
>               pte_t *pte;
>               pgprot_t new_prot = prot;
> @@ -456,11 +453,11 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, 
> unsigned long end,
>                       continue;
>               }
>  
> -             pte = alloc_low_page(&pte_phys);
> +             pte = alloc_low_page();
>               last_map_addr = phys_pte_init(pte, address, end, new_prot);
>  
>               spin_lock(&init_mm.page_table_lock);
> -             pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
> +             pmd_populate_kernel(&init_mm, pmd, pte);
>               spin_unlock(&init_mm.page_table_lock);
>       }
>       update_page_count(PG_LEVEL_2M, pages);
> @@ -476,7 +473,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, 
> unsigned long end,
>       int i = pud_index(addr);
>  
>       for (; i < PTRS_PER_PUD; i++, addr = next) {
> -             unsigned long pmd_phys;
>               pud_t *pud = pud_page + pud_index(addr);
>               pmd_t *pmd;
>               pgprot_t prot = PAGE_KERNEL;
> @@ -530,12 +526,12 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, 
> unsigned long end,
>                       continue;
>               }
>  
> -             pmd = alloc_low_page(&pmd_phys);
> +             pmd = alloc_low_page();
>               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
>                                             prot);
>  
>               spin_lock(&init_mm.page_table_lock);
> -             pud_populate(&init_mm, pud, __va(pmd_phys));
> +             pud_populate(&init_mm, pud, pmd);
>               spin_unlock(&init_mm.page_table_lock);
>       }
>       __flush_tlb_all();
> @@ -560,7 +556,6 @@ kernel_physical_mapping_init(unsigned long start,
>  
>       for (; start < end; start = next) {
>               pgd_t *pgd = pgd_offset_k(start);
> -             unsigned long pud_phys;
>               pud_t *pud;
>  
>               next = (start + PGDIR_SIZE) & PGDIR_MASK;
> @@ -574,12 +569,12 @@ kernel_physical_mapping_init(unsigned long start,
>                       continue;
>               }
>  
> -             pud = alloc_low_page(&pud_phys);
> +             pud = alloc_low_page();
>               last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
>                                                page_size_mask);
>  
>               spin_lock(&init_mm.page_table_lock);
> -             pgd_populate(&init_mm, pgd, __va(pud_phys));
> +             pgd_populate(&init_mm, pgd, pud);
>               spin_unlock(&init_mm.page_table_lock);
>               pgd_changed = true;
>       }
> -- 
> 1.7.7
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to