This logic to reserve space for BL31 and to add memory holes from mem_map only makes sense on ARM64. This change simply wraps all of this logic to only run if CONFIG_ARM64 is enabled. On non-ARM64, it falls back to accepting memory info from the TPL unmodified.
This is useful on e.g. RK3506, which is not ARM64, but which does receive memory information from the TPL. Signed-off-by: Aaron Griffith <[email protected]> --- arch/arm/mach-rockchip/sdram.c | 154 +++++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 76 deletions(-) diff --git a/arch/arm/mach-rockchip/sdram.c b/arch/arm/mach-rockchip/sdram.c index 287073c0e50839cc9366baacbc47512dbe2b4476..82e8e2876f6faca71ff0a8622f08ab3c10a7390a 100644 --- a/arch/arm/mach-rockchip/sdram.c +++ b/arch/arm/mach-rockchip/sdram.c @@ -115,103 +115,105 @@ static u32 js_hash(const void *buf, u32 len) __weak int rockchip_dram_init_banksize_add_bank(u8 *next_bank, phys_addr_t start_addr, phys_size_t size) { - struct mm_region *tmp_mem_map = mem_map; - phys_addr_t end_addr; + if (IS_ENABLED(CONFIG_ARM64)) { + struct mm_region *tmp_mem_map = mem_map; + phys_addr_t end_addr; - /* - * BL31 (TF-A) reserves the first 2MB but DDR_MEM tag may not - * have it, so force this space as reserved. - */ - if (start_addr < CFG_SYS_SDRAM_BASE + SZ_2M) { - size -= CFG_SYS_SDRAM_BASE + SZ_2M - start_addr; - start_addr = CFG_SYS_SDRAM_BASE + SZ_2M; - } - - /* - * Put holes for reserved memory areas from mem_map. - * - * Only check for at most one overlap with one reserved memory - * area. - */ - while (tmp_mem_map->size) { - const phys_addr_t rsrv_start = tmp_mem_map->phys; - const phys_size_t rsrv_size = tmp_mem_map->size; - const phys_addr_t rsrv_end = rsrv_start + rsrv_size; - - /* - * DRAM memories are expected by Arm to be marked as - * Normal Write-back cacheable, Inner shareable[1], so - * let's filter on that to put holes in non-DRAM areas. - * - * [1] https://developer.arm.com/documentation/102376/0200/Cacheability-and-shareability-attributes - */ - const u64 dram_attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | - PTE_BLOCK_INNER_SHARE; /* - * (AttrIndx | SH) in Lower Attributes of Block - * Descriptor[2]. - * [2] https://developer.arm.com/documentation/102376/0200/Describing-memory-in-AArch64 + * BL31 (TF-A) reserves the first 2MB but DDR_MEM tag may not + * have it, so force this space as reserved. */ - const u64 attrs_mask = PMD_ATTRINDX_MASK | GENMASK(9, 8); - - if ((tmp_mem_map->attrs & attrs_mask) == dram_attrs) { - tmp_mem_map++; - continue; + if (start_addr < CFG_SYS_SDRAM_BASE + SZ_2M) { + size -= CFG_SYS_SDRAM_BASE + SZ_2M - start_addr; + start_addr = CFG_SYS_SDRAM_BASE + SZ_2M; } /* - * If the start of the DDR_MEM tag is in a reserved - * memory area, move start address and resize. + * Put holes for reserved memory areas from mem_map. + * + * Only check for at most one overlap with one reserved memory + * area. */ - if (start_addr >= rsrv_start && start_addr < rsrv_end) { - if (rsrv_end - start_addr > size) { - debug("Would be negative memory size\n"); - return -EINVAL; - } - - size -= rsrv_end - (start_addr - CFG_SYS_SDRAM_BASE); - start_addr = rsrv_end; - break; - } + while (tmp_mem_map->size) { + const phys_addr_t rsrv_start = tmp_mem_map->phys; + const phys_size_t rsrv_size = tmp_mem_map->size; + const phys_addr_t rsrv_end = rsrv_start + rsrv_size; - if (start_addr < rsrv_start) { - end_addr = start_addr + size; + /* + * DRAM memories are expected by Arm to be marked as + * Normal Write-back cacheable, Inner shareable[1], so + * let's filter on that to put holes in non-DRAM areas. + * + * [1] https://developer.arm.com/documentation/102376/0200/Cacheability-and-shareability-attributes + */ + const u64 dram_attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | + PTE_BLOCK_INNER_SHARE; + /* + * (AttrIndx | SH) in Lower Attributes of Block + * Descriptor[2]. + * [2] https://developer.arm.com/documentation/102376/0200/Describing-memory-in-AArch64 + */ + const u64 attrs_mask = PMD_ATTRINDX_MASK | GENMASK(9, 8); - if (end_addr <= rsrv_start) { + if ((tmp_mem_map->attrs & attrs_mask) == dram_attrs) { tmp_mem_map++; continue; } /* - * If the memory area overlaps a reserved memory - * area with start address outside of reserved - * memory area and... - * - * ... ends in the middle of reserved memory - * area, resize. + * If the start of the DDR_MEM tag is in a reserved + * memory area, move start address and resize. */ - if (end_addr <= rsrv_end) { - size = rsrv_start - start_addr; + if (start_addr >= rsrv_start && start_addr < rsrv_end) { + if (rsrv_end - start_addr > size) { + debug("Would be negative memory size\n"); + return -EINVAL; + } + + size -= rsrv_end - (start_addr - CFG_SYS_SDRAM_BASE); + start_addr = rsrv_end; break; } - /* - * ... ends after the reserved memory area, - * split the region in two, one for before the - * reserved memory area and one for after. - */ - gd->bd->bi_dram[*next_bank].start = start_addr; - gd->bd->bi_dram[*next_bank].size = rsrv_start - start_addr; + if (start_addr < rsrv_start) { + end_addr = start_addr + size; + + if (end_addr <= rsrv_start) { + tmp_mem_map++; + continue; + } + + /* + * If the memory area overlaps a reserved memory + * area with start address outside of reserved + * memory area and... + * + * ... ends in the middle of reserved memory + * area, resize. + */ + if (end_addr <= rsrv_end) { + size = rsrv_start - start_addr; + break; + } + + /* + * ... ends after the reserved memory area, + * split the region in two, one for before the + * reserved memory area and one for after. + */ + gd->bd->bi_dram[*next_bank].start = start_addr; + gd->bd->bi_dram[*next_bank].size = rsrv_start - start_addr; + + *next_bank += 1; + + size = end_addr - rsrv_end; + start_addr = rsrv_end; - *next_bank += 1; - - size = end_addr - rsrv_end; - start_addr = rsrv_end; + break; + } - break; + tmp_mem_map++; } - - tmp_mem_map++; } if (*next_bank > CONFIG_NR_DRAM_BANKS) { -- 2.47.3

