Hi Pavel,

On 06/29/18 at 11:09pm, Pavel Tatashin wrote:
> Change sprase_init() to only find the pnum ranges that belong to a specific
> node and call sprase_init_nid() for that range from sparse_init().
> 
> Delete all the code that became obsolete with this change.

> @@ -617,87 +491,24 @@ void __init sparse_init_nid(int nid, unsigned long 
> pnum_begin,
>   */
>  void __init sparse_init(void)
>  {
> -     unsigned long pnum;
> -     struct page *map;
> -     struct page **map_map;
> -     unsigned long *usemap;
> -     unsigned long **usemap_map;
> -     int size, size2;
> -     int nr_consumed_maps = 0;
> -
> -     /* see include/linux/mmzone.h 'struct mem_section' definition */
> -     BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
> +     unsigned long pnum_begin = first_present_section_nr();
> +     int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
> +     unsigned long pnum_end, map_count = 1;
>  
> -     /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
> -     set_pageblock_order();

Not very sure if removing set_pageblock_order() calling here is OK. What
if CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is enabled? usemap_size() depends
on value of 'pageblock_order'.

Thanks
Baoquan

> +     for_each_present_section_nr(pnum_begin + 1, pnum_end) {
> +             int nid = sparse_early_nid(__nr_to_section(pnum_end));
>  
> -     /*
> -      * map is using big page (aka 2M in x86 64 bit)
> -      * usemap is less one page (aka 24 bytes)
> -      * so alloc 2M (with 2M align) and 24 bytes in turn will
> -      * make next 2M slip to one more 2M later.
> -      * then in big system, the memory will have a lot of holes...
> -      * here try to allocate 2M pages continuously.
> -      *
> -      * powerpc need to call sparse_init_one_section right after each
> -      * sparse_early_mem_map_alloc, so allocate usemap_map at first.
> -      */
> -     size = sizeof(unsigned long *) * nr_present_sections;
> -     usemap_map = memblock_virt_alloc(size, 0);
> -     if (!usemap_map)
> -             panic("can not allocate usemap_map\n");
> -     alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
> -                             (void *)usemap_map,
> -                             sizeof(usemap_map[0]));
> -
> -     size2 = sizeof(struct page *) * nr_present_sections;
> -     map_map = memblock_virt_alloc(size2, 0);
> -     if (!map_map)
> -             panic("can not allocate map_map\n");
> -     alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
> -                             (void *)map_map,
> -                             sizeof(map_map[0]));
> -
> -     /* The numner of present sections stored in nr_present_sections
> -      * are kept the same since mem sections are marked as present in
> -      * memory_present(). In this for loop, we need check which sections
> -      * failed to allocate memmap or usemap, then clear its
> -      * ->section_mem_map accordingly. During this process, we need
> -      * increase 'nr_consumed_maps' whether its allocation of memmap
> -      * or usemap failed or not, so that after we handle the i-th
> -      * memory section, can get memmap and usemap of (i+1)-th section
> -      * correctly. */
> -     for_each_present_section_nr(0, pnum) {
> -             struct mem_section *ms;
> -
> -             if (nr_consumed_maps >= nr_present_sections) {
> -                     pr_err("nr_consumed_maps goes beyond 
> nr_present_sections\n");
> -                     break;
> -             }
> -             ms = __nr_to_section(pnum);
> -             usemap = usemap_map[nr_consumed_maps];
> -             if (!usemap) {
> -                     ms->section_mem_map = 0;
> -                     nr_consumed_maps++;
> -                     continue;
> -             }
> -
> -             map = map_map[nr_consumed_maps];
> -             if (!map) {
> -                     ms->section_mem_map = 0;
> -                     nr_consumed_maps++;
> +             if (nid == nid_begin) {
> +                     map_count++;
>                       continue;
>               }
> -
> -             sparse_init_one_section(__nr_to_section(pnum), pnum, map,
> -                                                             usemap);
> -             nr_consumed_maps++;
> +             sparse_init_nid(nid, pnum_begin, pnum_end, map_count);
> +             nid_begin = nid;
> +             pnum_begin = pnum_end;
> +             map_count = 1;
>       }
> -
> +     sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
>       vmemmap_populate_print_last();
> -
> -     memblock_free_early(__pa(map_map), size2);
> -     memblock_free_early(__pa(usemap_map), size);
>  }
>  
>  #ifdef CONFIG_MEMORY_HOTPLUG
> -- 
> 2.18.0
> 

Reply via email to