Switch to memblock interfaces for early memory allocator

Cc: Yinghai Lu <ying...@kernel.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Andrew Morton <a...@linux-foundation.org>

Signed-off-by: Santosh Shilimkar <santosh.shilim...@ti.com>
---
 mm/percpu.c |   39 +++++++++++++++++++++++----------------
 1 file changed, 23 insertions(+), 16 deletions(-)

diff --git a/mm/percpu.c b/mm/percpu.c
index 8c8e08f..0b2117f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int 
nr_groups,
                          __alignof__(ai->groups[0].cpu_map[0]));
        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
 
-       ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
+       ptr = memblock_early_alloc_pages_nopanic(PFN_ALIGN(ai_size));
        if (!ptr)
                return NULL;
        ai = ptr;
@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int 
nr_groups,
  */
 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
 {
-       free_bootmem(__pa(ai), ai->__ai_size);
+       memblock_free_early(__pa(ai), ai->__ai_size);
 }
 
 /**
@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
 
        /* process group information and build config tables accordingly */
-       group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
-       group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
-       unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
-       unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
+       group_offsets = memblock_early_alloc(ai->nr_groups *
+                                            sizeof(group_offsets[0]));
+       group_sizes = memblock_early_alloc(ai->nr_groups *
+                                          sizeof(group_sizes[0]));
+       unit_map = memblock_early_alloc(nr_cpu_ids * sizeof(unit_map[0]));
+       unit_off = memblock_early_alloc(nr_cpu_ids * sizeof(unit_off[0]));
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
@@ -1311,7 +1313,7 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
         * empty chunks.
         */
        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
-       pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+       pcpu_slot = memblock_early_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]));
        for (i = 0; i < pcpu_nr_slots; i++)
                INIT_LIST_HEAD(&pcpu_slot[i]);
 
@@ -1322,7 +1324,7 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
         * covers static area + reserved area (mostly used for module
         * static percpu allocation).
         */
-       schunk = alloc_bootmem(pcpu_chunk_struct_size);
+       schunk = memblock_early_alloc(pcpu_chunk_struct_size);
        INIT_LIST_HEAD(&schunk->list);
        schunk->base_addr = base_addr;
        schunk->map = smap;
@@ -1346,7 +1348,7 @@ int __init pcpu_setup_first_chunk(const struct 
pcpu_alloc_info *ai,
 
        /* init dynamic chunk if necessary */
        if (dyn_size) {
-               dchunk = alloc_bootmem(pcpu_chunk_struct_size);
+               dchunk = memblock_early_alloc(pcpu_chunk_struct_size);
                INIT_LIST_HEAD(&dchunk->list);
                dchunk->base_addr = base_addr;
                dchunk->map = dmap;
@@ -1626,7 +1628,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, 
size_t dyn_size,
        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
 
-       areas = alloc_bootmem_nopanic(areas_size);
+       areas = memblock_early_alloc_pages_nopanic(areas_size);
        if (!areas) {
                rc = -ENOMEM;
                goto out_free;
@@ -1711,7 +1713,7 @@ out_free_areas:
 out_free:
        pcpu_free_alloc_info(ai);
        if (areas)
-               free_bootmem(__pa(areas), areas_size);
+               memblock_free_early(__pa(areas), areas_size);
        return rc;
 }
 #endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -1759,7 +1761,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        /* unaligned allocations can't be freed, round up to page size */
        pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
                               sizeof(pages[0]));
-       pages = alloc_bootmem(pages_size);
+       pages = memblock_early_alloc_pages(pages_size);
 
        /* allocate pages */
        j = 0;
@@ -1822,7 +1824,7 @@ enomem:
                free_fn(page_address(pages[j]), PAGE_SIZE);
        rc = -ENOMEM;
 out_free_ar:
-       free_bootmem(__pa(pages), pages_size);
+       memblock_free_early(__pa(pages), pages_size);
        pcpu_free_alloc_info(ai);
        return rc;
 }
@@ -1847,12 +1849,14 @@ EXPORT_SYMBOL(__per_cpu_offset);
 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
                                       size_t align)
 {
-       return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
+       return  memblock_early_alloc_try_nid_nopanic(MAX_NUMNODES, size, align,
+                                                     __pa(MAX_DMA_ADDRESS),
+                                                     BOOTMEM_ALLOC_ACCESSIBLE);
 }
 
 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
 {
-       free_bootmem(__pa(ptr), size);
+       memblock_free_early(__pa(ptr), size);
 }
 
 void __init setup_per_cpu_areas(void)
@@ -1895,7 +1899,10 @@ void __init setup_per_cpu_areas(void)
        void *fc;
 
        ai = pcpu_alloc_alloc_info(1, 1);
-       fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       fc = memblock_early_alloc_try_nid_nopanic(MAX_NUMNODES, unit_size,
+                                                 PAGE_SIZE,
+                                                 __pa(MAX_DMA_ADDRESS),
+                                                 BOOTMEM_ALLOC_ACCESSIBLE);
        if (!ai || !fc)
                panic("Failed to allocate memory for percpu areas.");
        /* kmemleak tracks the percpu allocations separately */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to