On 1/19/21 5:51 PM, David Hildenbrand wrote:
> On 18.01.21 14:12, Anshuman Khandual wrote:
>> This introduces memhp_range_allowed() which can be called in various memory
>> hotplug paths to prevalidate the address range which is being added, with
>> the platform. Then memhp_range_allowed() calls memhp_get_pluggable_range()
>> which provides applicable address range depending on whether linear mapping
>> is required or not. For ranges that require linear mapping, it calls a new
>> arch callback arch_get_mappable_range() which the platform can override. So
>> the new callback, in turn provides the platform an opportunity to configure
>> acceptable memory hotplug address ranges in case there are constraints.
>>
>> This mechanism will help prevent platform specific errors deep down during
>> hotplug calls. This drops now redundant check_hotplug_memory_addressable()
>> check in __add_pages() but instead adds a VM_BUG_ON() check which would
> 
> In this patch, you keep the __add_pages() checks. But as discussed, we
> could perform it in mm/memremap.c:pagemap_range() insted and convert it
> to a VM_BUG_ON().

Just to be sure, will the following change achieve what you are
suggesting here. pagemap_range() after this change, will again
be the same like the V1 series.

---
 mm/memory_hotplug.c |  3 +--
 mm/memremap.c       | 12 +++++-------
 2 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 46faa914aa25..10d4ec8f349c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -304,8 +304,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned 
long nr_pages,
        if (WARN_ON_ONCE(!params->pgprot.pgprot))
                return -EINVAL;
 
-       if(!memhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false))
-               return -E2BIG;
+       VM_BUG_ON(!memhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, 
false));
 
        if (altmap) {
                /*
diff --git a/mm/memremap.c b/mm/memremap.c
index e15b13736f6a..26c1825756cc 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -185,6 +185,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref 
*ref)
 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
                int range_id, int nid)
 {
+       const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
        struct range *range = &pgmap->ranges[range_id];
        struct dev_pagemap *conflict_pgmap;
        int error, is_ram;
@@ -230,6 +231,9 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct 
mhp_params *params,
        if (error)
                goto err_pfn_remap;
 
+       if (!memhp_range_allowed(range->start, range_len(range), !is_private))
+               goto err_pfn_remap;
+
        mem_hotplug_begin();
 
        /*
@@ -243,7 +247,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct 
mhp_params *params,
         * the CPU, we do want the linear mapping and thus use
         * arch_add_memory().
         */
-       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+       if (is_private) {
                error = add_pages(nid, PHYS_PFN(range->start),
                                PHYS_PFN(range_len(range)), params);
        } else {
@@ -253,12 +257,6 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct 
mhp_params *params,
                        goto err_kasan;
                }
 
-               if (!memhp_range_allowed(range->start, range_len(range), true)) 
{
-                       error = -ERANGE;
-                       mem_hotplug_done();
-                       goto err_add_memory;
-               }
-
                error = arch_add_memory(nid, range->start, range_len(range),
                                        params);
        }
-- 
2.20.1

Reply via email to