This overrides arch_get_mappabble_range() on s390 platform and drops now
redundant similar check in vmem_add_mapping(). This compensates by adding
a new check __segment_load() to preserve the existing functionality.

Cc: Heiko Carstens <h...@linux.ibm.com>
Cc: Vasily Gorbik <g...@linux.ibm.com>
Cc: David Hildenbrand <da...@redhat.com>
Cc: linux-s...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khand...@arm.com>
---
 arch/s390/mm/extmem.c |  5 +++++
 arch/s390/mm/vmem.c   | 13 +++++++++----
 2 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 5060956b8e7d..cc055a78f7b6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -337,6 +337,11 @@ __segment_load (char *name, int do_nonshared, unsigned 
long *addr, unsigned long
                goto out_free_resource;
        }
 
+       if (seg->end + 1 > VMEM_MAX_PHYS || seg->end + 1 < seg->start_addr) {
+               rc = -ERANGE;
+               goto out_resource;
+       }
+
        rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
        if (rc)
                goto out_resource;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b239f2ba93b0..06dddcc0ce06 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -532,14 +532,19 @@ void vmem_remove_mapping(unsigned long start, unsigned 
long size)
        mutex_unlock(&vmem_mutex);
 }
 
+struct range arch_get_mappable_range(void)
+{
+       struct range memhp_range;
+
+       memhp_range.start = 0;
+       memhp_range.end =  VMEM_MAX_PHYS;
+       return memhp_range;
+}
+
 int vmem_add_mapping(unsigned long start, unsigned long size)
 {
        int ret;
 
-       if (start + size > VMEM_MAX_PHYS ||
-           start + size < start)
-               return -ERANGE;
-
        mutex_lock(&vmem_mutex);
        ret = vmem_add_range(start, size);
        if (ret)
-- 
2.20.1

Reply via email to