It's used only twice and we do bit shifts in the parent function
anyway so it's not like it's hiding some uninteresting detail.

Signed-off-by: Baoquan He <b...@redhat.com>
Acked-by: Kees Cook <keesc...@chromium.org>
---
 arch/x86/mm/kaslr.c | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 5debf82ab06a..d7ea6b252594 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -92,12 +92,6 @@ static __initdata struct kaslr_memory_region {
        { &vmemmap_base, 1 },
 };
 
-/* Get size in bytes used by the memory region */
-static inline unsigned long get_padding(struct kaslr_memory_region *region)
-{
-       return (region->size_tb << TB_SHIFT);
-}
-
 /*
  * Apply no randomization if KASLR was disabled at boot or if KASAN
  * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
@@ -155,7 +149,7 @@ void __init kernel_randomize_memory(void)
        /* Calculate entropy available between regions */
        remain_entropy = vaddr_end - vaddr_start;
        for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
-               remain_entropy -= get_padding(&kaslr_regions[i]);
+               remain_entropy -= kaslr_regions[i].size_tb << TB_SHIFT;
 
        prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
 
@@ -176,7 +170,7 @@ void __init kernel_randomize_memory(void)
                 * Jump the region and add a minimum padding based on
                 * randomization alignment.
                 */
-               vaddr += get_padding(&kaslr_regions[i]);
+               vaddr += kaslr_regions[i].size_tb << TB_SHIFT;
                vaddr = round_up(vaddr + 1, PUD_SIZE);
                remain_entropy -= entropy;
        }
-- 
2.17.2

Reply via email to