Στις 2021-08-02 09:37, Mike Rapoport έγραψε:
From: Mike Rapoport <r...@linux.ibm.com>

There are a lot of uses of memblock_find_in_range() along with
memblock_reserve() from the times memblock allocation APIs did not exist.

memblock_find_in_range() is the very core of memblock allocations, so any future changes to its internal behaviour would mandate updates of all the
users outside memblock.

Replace the calls to memblock_find_in_range() with an equivalent calls to
memblock_phys_alloc() and memblock_phys_alloc_range() and make
memblock_find_in_range() private method of memblock.

This simplifies the callers, ensures that (unlikely) errors in
memblock_reserve() are handled and improves maintainability of
memblock_find_in_range().

Signed-off-by: Mike Rapoport <r...@linux.ibm.com>
---
v2: don't change error message in arm::reserve_crashkernel(), per Russell
v1: https://lore.kernel.org/lkml/20210730104039.7047-1-r...@kernel.org

 arch/arm/kernel/setup.c           | 18 +++++--------
 arch/arm64/kvm/hyp/reserved_mem.c |  9 +++----
 arch/arm64/mm/init.c              | 36 ++++++++-----------------
 arch/mips/kernel/setup.c          | 14 +++++-----
 arch/riscv/mm/init.c              | 44 ++++++++++---------------------
 arch/s390/kernel/setup.c          | 10 ++++---
 arch/x86/kernel/aperture_64.c     |  5 ++--
 arch/x86/mm/init.c                | 21 +++++++++------
 arch/x86/mm/numa.c                |  5 ++--
 arch/x86/mm/numa_emulation.c      |  5 ++--
 arch/x86/realmode/init.c          |  2 +-
 drivers/acpi/tables.c             |  5 ++--
 drivers/base/arch_numa.c          |  5 +---
 drivers/of/of_reserved_mem.c      | 12 ++++++---
 include/linux/memblock.h          |  2 --
 mm/memblock.c                     |  2 +-
 16 files changed, 78 insertions(+), 117 deletions(-)

diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f97eb2371672..67f5421b2af7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1012,31 +1012,25 @@ static void __init reserve_crashkernel(void)
                unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
                if (crash_max > lowmem_max)
                        crash_max = lowmem_max;
-               crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
-                                                   crash_size, CRASH_ALIGN);
+
+               crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+                                                      CRASH_ALIGN, crash_max);
                if (!crash_base) {
pr_err("crashkernel reservation failed - No suitable area found.\n");
                        return;
                }
        } else {
+               unsigned long long crash_max = crash_base + crash_size;
                unsigned long long start;

-               start = memblock_find_in_range(crash_base,
-                                              crash_base + crash_size,
-                                              crash_size, SECTION_SIZE);
+               start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
+                                                 crash_base, crash_max);
                if (start != crash_base) {
                        pr_err("crashkernel reservation failed - memory is in 
use.\n");
                        return;
                }
        }

-       ret = memblock_reserve(crash_base, crash_size);
-       if (ret < 0) {
- pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
-                       (unsigned long)crash_base);
-               return;
-       }
-
        pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System
RAM: %ldMB)\n",
                (unsigned long)(crash_size >> 20),
                (unsigned long)(crash_base >> 20),
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c
b/arch/arm64/kvm/hyp/reserved_mem.c
index d654921dd09b..578670e3f608 100644
--- a/arch/arm64/kvm/hyp/reserved_mem.c
+++ b/arch/arm64/kvm/hyp/reserved_mem.c
@@ -92,12 +92,10 @@ void __init kvm_hyp_reserve(void)
         * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
         */
        hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
-       hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
-                                             ALIGN(hyp_mem_size, PMD_SIZE),
-                                             PMD_SIZE);
+       hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
+                                          PMD_SIZE);
        if (!hyp_mem_base)
-               hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
-                                                     hyp_mem_size, PAGE_SIZE);
+               hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
        else
                hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);

@@ -105,7 +103,6 @@ void __init kvm_hyp_reserve(void)
                kvm_err("Failed to reserve hyp memory\n");
                return;
        }
-       memblock_reserve(hyp_mem_base, hyp_mem_size);

        kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
                 hyp_mem_base);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8490ed2917ff..d566478a06dd 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -74,6 +74,7 @@ phys_addr_t arm64_dma_phys_limit __ro_after_init;
 static void __init reserve_crashkernel(void)
 {
        unsigned long long crash_base, crash_size;
+       unsigned long crash_max = arm64_dma_phys_limit;
        int ret;

        ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
@@ -84,33 +85,18 @@ static void __init reserve_crashkernel(void)

        crash_size = PAGE_ALIGN(crash_size);

-       if (crash_base == 0) {
-               /* Current arm64 boot protocol requires 2MB alignment */
-               crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
-                               crash_size, SZ_2M);
-               if (crash_base == 0) {
-                       pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
-                               crash_size);
-                       return;
-               }
-       } else {
-               /* User specifies base address explicitly. */
-               if (!memblock_is_region_memory(crash_base, crash_size)) {
-                       pr_warn("cannot reserve crashkernel: region is not 
memory\n");
-                       return;
-               }
+       /* User specifies base address explicitly. */
+       if (crash_base)
+               crash_max = crash_base + crash_size;

-               if (memblock_is_region_reserved(crash_base, crash_size)) {
- pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
-                       return;
-               }
-
-               if (!IS_ALIGNED(crash_base, SZ_2M)) {
- pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
-                       return;
-               }
+       /* Current arm64 boot protocol requires 2MB alignment */
+       crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+                                              crash_base, crash_max);
+       if (!crash_base) {
+               pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
+                       crash_size);
+               return;
        }
-       memblock_reserve(crash_base, crash_size);

        pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
                crash_base, crash_base + crash_size, crash_size >> 20);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 23a140327a0b..f979adfd4fc2 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -452,8 +452,9 @@ static void __init mips_parse_crashkernel(void)
                return;

        if (crash_base <= 0) {
-               crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX,
-                                                       crash_size, 
CRASH_ALIGN);
+               crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+                                                      CRASH_ALIGN,
+                                                      CRASH_ADDR_MAX);
                if (!crash_base) {
pr_warn("crashkernel reservation failed - No suitable area found.\n");
                        return;
@@ -461,8 +462,9 @@ static void __init mips_parse_crashkernel(void)
        } else {
                unsigned long long start;

-               start = memblock_find_in_range(crash_base, crash_base + 
crash_size,
-                                               crash_size, 1);
+               start = memblock_phys_alloc_range(crash_size, 1,
+                                                 crash_base,
+                                                 crash_base + crash_size);
                if (start != crash_base) {
                        pr_warn("Invalid memory region reserved for crash 
kernel\n");
                        return;
@@ -656,10 +658,6 @@ static void __init arch_mem_init(char **cmdline_p)
        mips_reserve_vmcore();

        mips_parse_crashkernel();
-#ifdef CONFIG_KEXEC
-       if (crashk_res.start != crashk_res.end)
-               memblock_reserve(crashk_res.start, resource_size(&crashk_res));
-#endif
        device_tree_init();

        /*
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index a14bf3910eec..88649337c568 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -812,38 +812,22 @@ static void __init reserve_crashkernel(void)

        crash_size = PAGE_ALIGN(crash_size);

-       if (crash_base == 0) {
-               /*
-                * Current riscv boot protocol requires 2MB alignment for
-                * RV64 and 4MB alignment for RV32 (hugepage size)
-                */
-               crash_base = memblock_find_in_range(search_start, search_end,
-                                                   crash_size, PMD_SIZE);
-
-               if (crash_base == 0) {
-                       pr_warn("crashkernel: couldn't allocate %lldKB\n",
-                               crash_size >> 10);
-                       return;
-               }
-       } else {
-               /* User specifies base address explicitly. */
-               if (!memblock_is_region_memory(crash_base, crash_size)) {
-                       pr_warn("crashkernel: requested region is not 
memory\n");
-                       return;
-               }
-
-               if (memblock_is_region_reserved(crash_base, crash_size)) {
-                       pr_warn("crashkernel: requested region is reserved\n");
-                       return;
-               }
-
+       if (crash_base) {
+               search_start = crash_base;
+               search_end = crash_base + crash_size;
+       }

-               if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
-                       pr_warn("crashkernel: requested region is 
misaligned\n");
-                       return;
-               }
+       /*
+        * Current riscv boot protocol requires 2MB alignment for
+        * RV64 and 4MB alignment for RV32 (hugepage size)
+        */
+       crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+                                              search_start, search_end);
+       if (crash_base == 0) {
+               pr_warn("crashkernel: couldn't allocate %lldKB\n",
+                       crash_size >> 10);
+               return;
        }
-       memblock_reserve(crash_base, crash_size);

        pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
                crash_base, crash_base + crash_size, crash_size >> 20);

For the riscv part:

Acked-by: Nick Kossifidis <m...@ics.forth.gr>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to