Let's factor out determining the minimum alignment requirement for CMA
and add a helpful comment.

No functional change intended.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 arch/powerpc/include/asm/fadump-internal.h |  5 -----
 arch/powerpc/kernel/fadump.c               |  2 +-
 drivers/of/of_reserved_mem.c               |  9 +++------
 include/linux/cma.h                        |  9 +++++++++
 kernel/dma/contiguous.c                    |  4 +---
 mm/cma.c                                   | 20 +++++---------------
 6 files changed, 19 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/fadump-internal.h 
b/arch/powerpc/include/asm/fadump-internal.h
index 52189928ec08..81bcb9abb371 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -19,11 +19,6 @@
 
 #define memblock_num_regions(memblock_type)    (memblock.memblock_type.cnt)
 
-/* Alignment per CMA requirement. */
-#define FADUMP_CMA_ALIGNMENT   (PAGE_SIZE <<                           \
-                                max_t(unsigned long, MAX_ORDER - 1,    \
-                                pageblock_order))
-
 /* FAD commands */
 #define FADUMP_REGISTER                        1
 #define FADUMP_UNREGISTER              2
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index d03e488cfe9c..7eb67201ea41 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -544,7 +544,7 @@ int __init fadump_reserve_mem(void)
                if (!fw_dump.nocma) {
                        fw_dump.boot_memory_size =
                                ALIGN(fw_dump.boot_memory_size,
-                                     FADUMP_CMA_ALIGNMENT);
+                                     CMA_MIN_ALIGNMENT_BYTES);
                }
 #endif
 
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 9c0fb962c22b..75caa6f5d36f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/memblock.h>
 #include <linux/kmemleak.h>
+#include <linux/cma.h>
 
 #include "of_private.h"
 
@@ -116,12 +117,8 @@ static int __init __reserved_mem_alloc_size(unsigned long 
node,
        if (IS_ENABLED(CONFIG_CMA)
            && of_flat_dt_is_compatible(node, "shared-dma-pool")
            && of_get_flat_dt_prop(node, "reusable", NULL)
-           && !nomap) {
-               unsigned long order =
-                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
-
-               align = max(align, (phys_addr_t)PAGE_SIZE << order);
-       }
+           && !nomap)
+               align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
 
        prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
        if (prop) {
diff --git a/include/linux/cma.h b/include/linux/cma.h
index bd801023504b..75fe188ec4a1 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -20,6 +20,15 @@
 
 #define CMA_MAX_NAME 64
 
+/*
+ * TODO: once the buddy -- especially pageblock merging and 
alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ *  MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES max_t(phys_addr_t, MAX_ORDER_NR_PAGES, \
+                                     pageblock_nr_pages)
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
+
 struct cma;
 
 extern unsigned long totalcma_pages;
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 3d63d91cba5c..6ea80ae42622 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -399,8 +399,6 @@ static const struct reserved_mem_ops rmem_cma_ops = {
 
 static int __init rmem_cma_setup(struct reserved_mem *rmem)
 {
-       phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
-       phys_addr_t mask = align - 1;
        unsigned long node = rmem->fdt_node;
        bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
        struct cma *cma;
@@ -416,7 +414,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
            of_get_flat_dt_prop(node, "no-map", NULL))
                return -EINVAL;
 
-       if ((rmem->base & mask) || (rmem->size & mask)) {
+       if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
                pr_err("Reserved memory: incorrect alignment of CMA region\n");
                return -EINVAL;
        }
diff --git a/mm/cma.c b/mm/cma.c
index bc9ca8f3c487..5a2cd5851658 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -168,7 +168,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, 
phys_addr_t size,
                                 struct cma **res_cma)
 {
        struct cma *cma;
-       phys_addr_t alignment;
 
        /* Sanity checks */
        if (cma_area_count == ARRAY_SIZE(cma_areas)) {
@@ -179,15 +178,12 @@ int __init cma_init_reserved_mem(phys_addr_t base, 
phys_addr_t size,
        if (!size || !memblock_is_region_reserved(base, size))
                return -EINVAL;
 
-       /* ensure minimal alignment required by mm core */
-       alignment = PAGE_SIZE <<
-                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
-
        /* alignment should be aligned with order_per_bit */
-       if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
+       if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
                return -EINVAL;
 
-       if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
+       /* ensure minimal alignment required by mm core */
+       if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
                return -EINVAL;
 
        /*
@@ -262,14 +258,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
        if (alignment && !is_power_of_2(alignment))
                return -EINVAL;
 
-       /*
-        * Sanitise input arguments.
-        * Pages both ends in CMA area could be merged into adjacent unmovable
-        * migratetype page by page allocator's buddy algorithm. In the case,
-        * you couldn't get a contiguous memory, which is not what we want.
-        */
-       alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
-                         max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+       /* Sanitise input arguments. */
+       alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
        if (fixed && base & (alignment - 1)) {
                ret = -EINVAL;
                pr_err("Region at %pa must be aligned to %pa bytes\n",
-- 
2.34.1

Reply via email to