From: Mike Rapoport <r...@linux.ibm.com>

for_each_memblock() is used exclusively to iterate over memblock.memory in
a few places that use data from memblock_region rather than the memory
ranges.

Remove type parameter from the for_each_memblock() iterator to improve
encapsulation of memblock internals from its users.

Signed-off-by: Mike Rapoport <r...@linux.ibm.com>
---
 arch/arm64/kernel/setup.c      |  2 +-
 arch/arm64/mm/numa.c           |  2 +-
 arch/mips/netlogic/xlp/setup.c |  2 +-
 include/linux/memblock.h       | 10 +++++++---
 mm/memblock.c                  |  4 ++--
 mm/page_alloc.c                |  8 ++++----
 6 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 93b3844cf442..23da7908cbed 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
        if (!standard_resources)
                panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
-       for_each_memblock(memory, region) {
+       for_each_memblock(region) {
                res = &standard_resources[i++];
                if (memblock_is_nomap(region)) {
                        res->name  = "reserved";
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 0cbdbcc885fb..08721d2c0b79 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -350,7 +350,7 @@ static int __init numa_register_nodes(void)
        struct memblock_region *mblk;
 
        /* Check that valid nid is set to memblks */
-       for_each_memblock(memory, mblk) {
+       for_each_memblock(mblk) {
                int mblk_nid = memblock_get_region_node(mblk);
 
                if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 1a0fc5b62ba4..e69d9fc468cf 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -70,7 +70,7 @@ static void nlm_fixup_mem(void)
        const int pref_backup = 512;
        struct memblock_region *mem;
 
-       for_each_memblock(memory, mem) {
+       for_each_memblock(mem) {
                memblock_remove(mem->base + mem->size - pref_backup,
                        pref_backup);
        }
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d70c2835e913..c901cb8ecf92 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -527,9 +527,13 @@ static inline unsigned long 
memblock_region_reserved_end_pfn(const struct memblo
        return PFN_UP(reg->base + reg->size);
 }
 
-#define for_each_memblock(memblock_type, region)                               
        \
-       for (region = memblock.memblock_type.regions;                           
        \
-            region < (memblock.memblock_type.regions + 
memblock.memblock_type.cnt);    \
+/**
+ * for_each_memblock - itereate over registered memory regions
+ * @region: loop variable
+ */
+#define for_each_memblock(region)                                      \
+       for (region = memblock.memory.regions;                          \
+            region < (memblock.memory.regions + memblock.memory.cnt);  \
             region++)
 
 extern void *alloc_large_system_hash(const char *tablename,
diff --git a/mm/memblock.c b/mm/memblock.c
index 2ad5e6e47215..550bb72cf6cb 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1694,7 +1694,7 @@ static phys_addr_t __init_memblock 
__find_max_addr(phys_addr_t limit)
         * the memory memblock regions, if the @limit exceeds the total size
         * of those regions, max_addr will keep original value PHYS_ADDR_MAX
         */
-       for_each_memblock(memory, r) {
+       for_each_memblock(r) {
                if (limit <= r->size) {
                        max_addr = r->base + limit;
                        break;
@@ -1864,7 +1864,7 @@ void __init_memblock memblock_trim_memory(phys_addr_t 
align)
        phys_addr_t start, end, orig_start, orig_end;
        struct memblock_region *r;
 
-       for_each_memblock(memory, r) {
+       for_each_memblock(r) {
                orig_start = r->base;
                orig_end = r->base + r->size;
                start = round_up(orig_start, align);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95af111d69d3..8a19f46dc86e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5927,7 +5927,7 @@ overlap_memmap_init(unsigned long zone, unsigned long 
*pfn)
 
        if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
                if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
-                       for_each_memblock(memory, r) {
+                       for_each_memblock(r) {
                                if (*pfn < memblock_region_memory_end_pfn(r))
                                        break;
                        }
@@ -6528,7 +6528,7 @@ static unsigned long __init zone_absent_pages_in_node(int 
nid,
                unsigned long start_pfn, end_pfn;
                struct memblock_region *r;
 
-               for_each_memblock(memory, r) {
+               for_each_memblock(r) {
                        start_pfn = clamp(memblock_region_memory_base_pfn(r),
                                          zone_start_pfn, zone_end_pfn);
                        end_pfn = clamp(memblock_region_memory_end_pfn(r),
@@ -7122,7 +7122,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
         * options.
         */
        if (movable_node_is_enabled()) {
-               for_each_memblock(memory, r) {
+               for_each_memblock(r) {
                        if (!memblock_is_hotpluggable(r))
                                continue;
 
@@ -7143,7 +7143,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
        if (mirrored_kernelcore) {
                bool mem_below_4gb_not_mirrored = false;
 
-               for_each_memblock(memory, r) {
+               for_each_memblock(r) {
                        if (memblock_is_mirror(r))
                                continue;
 
-- 
2.26.2

Reply via email to