could save some bit shifting operations.

Signed-off-by: Yinghai Lu <ying...@kernel.org>
---
 arch/x86/mm/init.c |   29 ++++++++++++++---------------
 1 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a4fdf31..e430f1e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -200,12 +200,11 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
                                     unsigned long end)
 {
        unsigned long start_pfn, end_pfn;
-       unsigned long pos;
+       unsigned long pfn;
        int i;
 
        /* head if not big page alignment ? */
-       start_pfn = PFN_DOWN(start);
-       pos = PFN_PHYS(start_pfn);
+       pfn = start_pfn = PFN_DOWN(start);
 #ifdef CONFIG_X86_32
        /*
         * Don't use a large page for the first 2/4MB of memory
@@ -213,26 +212,26 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
         * and overlapping MTRRs into large pages can cause
         * slowdowns.
         */
-       if (pos == 0)
+       if (pfn == 0)
                end_pfn = PFN_DOWN(PMD_SIZE);
        else
-               end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+               end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #else /* CONFIG_X86_64 */
-       end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #endif
        if (end_pfn > PFN_DOWN(end))
                end_pfn = PFN_DOWN(end);
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
        /* big page (2M) range */
-       start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #ifdef CONFIG_X86_32
        end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
 #else /* CONFIG_X86_64 */
-       end_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+       end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
        if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE)))
                end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
 #endif
@@ -240,32 +239,32 @@ static int __meminit split_mem_range(struct map_range 
*mr, int nr_range,
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
 #ifdef CONFIG_X86_64
        /* big page (1G) range */
-       start_pfn = PFN_DOWN(round_up(pos, PUD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
        end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask &
                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 
        /* tail is not big page (1G) alignment */
-       start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE));
+       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
        end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
-               pos = PFN_PHYS(end_pfn);
+               pfn = end_pfn;
        }
 #endif
 
        /* tail is not big page (2M) alignment */
-       start_pfn = PFN_DOWN(pos);
+       start_pfn = pfn;
        end_pfn = PFN_DOWN(end);
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
-- 
1.7.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to