On 12/9/20 6:16 AM, John Hubbard wrote:
> On 12/8/20 9:28 AM, Joao Martins wrote:
>> Replace vmem_altmap with an vmem_context argument. That let us
>> express how the vmemmap is gonna be initialized e.g. passing
>> flags and a page size for reusing pages upon initializing the
>> vmemmap.
>
> How about this instead:
>
> Replace the vmem_altmap argument with a vmem_context argument that
> contains vmem_altmap for now. Subsequent patches will add additional
> member elements to vmem_context, such as flags and page size.
>
> No behavior changes are intended.
>
> ?
>
Yeap, it's better than way. Thanks.
>>
>> Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
>> ---
>> include/linux/memory_hotplug.h | 6 +++++-
>> include/linux/mm.h | 2 +-
>> mm/memory_hotplug.c | 3 ++-
>> mm/sparse-vmemmap.c | 6 +++++-
>> mm/sparse.c | 16 ++++++++--------
>> 5 files changed, 21 insertions(+), 12 deletions(-)
>>
>> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
>> index 551093b74596..73f8bcbb58a4 100644
>> --- a/include/linux/memory_hotplug.h
>> +++ b/include/linux/memory_hotplug.h
>> @@ -81,6 +81,10 @@ struct mhp_params {
>> pgprot_t pgprot;
>> };
>>
>> +struct vmem_context {
>> + struct vmem_altmap *altmap;
>> +};
>> +
>> /*
>> * Zone resizing functions
>> *
>> @@ -353,7 +357,7 @@ extern void remove_pfn_range_from_zone(struct zone *zone,
>> unsigned long nr_pages);
>> extern bool is_memblock_offlined(struct memory_block *mem);
>> extern int sparse_add_section(int nid, unsigned long pfn,
>> - unsigned long nr_pages, struct vmem_altmap *altmap);
>> + unsigned long nr_pages, struct vmem_context *ctx);
>> extern void sparse_remove_section(struct mem_section *ms,
>> unsigned long pfn, unsigned long nr_pages,
>> unsigned long map_offset, struct vmem_altmap *altmap);
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index db6ae4d3fb4e..2eb44318bb2d 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -3000,7 +3000,7 @@ static inline void print_vma_addr(char *prefix,
>> unsigned long rip)
>>
>> void *sparse_buffer_alloc(unsigned long size);
>> struct page * __populate_section_memmap(unsigned long pfn,
>> - unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
>> + unsigned long nr_pages, int nid, struct vmem_context *ctx);
>> pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
>> p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
>> pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 63b2e46b6555..f8870c53fe5e 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -313,6 +313,7 @@ int __ref __add_pages(int nid, unsigned long pfn,
>> unsigned long nr_pages,
>> unsigned long cur_nr_pages;
>> int err;
>> struct vmem_altmap *altmap = params->altmap;
>> + struct vmem_context ctx = { .altmap = params->altmap };
>
> OK, so this is the one place I can see where ctx is set up. And it's never
> null.
> Let's remember that point...
>
(...)
>>
>> if (WARN_ON_ONCE(!params->pgprot.pgprot))
>> return -EINVAL;
>> @@ -341,7 +342,7 @@ int __ref __add_pages(int nid, unsigned long pfn,
>> unsigned long nr_pages,
>> /* Select all remaining pages up to the next section boundary */
>> cur_nr_pages = min(end_pfn - pfn,
>> SECTION_ALIGN_UP(pfn + 1) - pfn);
>> - err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
>> + err = sparse_add_section(nid, pfn, cur_nr_pages, &ctx);
>> if (err)
>> break;
>> cond_resched();
>> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
>> index 16183d85a7d5..bcda68ba1381 100644
>> --- a/mm/sparse-vmemmap.c
>> +++ b/mm/sparse-vmemmap.c
>> @@ -249,15 +249,19 @@ int __meminit vmemmap_populate_basepages(unsigned long
>> start, unsigned long end,
>> }
>>
>> struct page * __meminit __populate_section_memmap(unsigned long pfn,
>> - unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
>> + unsigned long nr_pages, int nid, struct vmem_context *ctx)
>> {
>> unsigned long start = (unsigned long) pfn_to_page(pfn);
>> unsigned long end = start + nr_pages * sizeof(struct page);
>> + struct vmem_altmap *altmap = NULL;
>>
>> if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
>> !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
>> return NULL;
>>
>> + if (ctx)
>
> But...ctx can never be null, right?
>
Indeed.
This is an artifact of an old version of this where the passed parameter
could be null.
> I didn't spot any other issues, though.
>
> thanks,
>
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org