From: Julien Grall <jgr...@amazon.com> At the moment, xen_second is used to cover the first 2GB of the virtual address space. With the recent rework of the page-tables, only the first 1GB region (where Xen resides) is effectively used.
In addition to that, I would like to reshuffle the memory layout. So Xen mappings may not be anymore in the first 2GB of the virtual address space. Therefore, rework xen_second so it only covers the 1GB region where Xen will reside. With this change, xen_second doesn't cover anymore the xenheap area on arm32. So, we first need to add memory to the boot allocator before setting up the xenheap mappings. Take the opportunity to update the comments on top of xen_fixmap and xen_xenmap. Signed-off-by: Julien Grall <jgr...@amazon.com> Reviewed-by: Michal Orzel <michal.or...@arm.com> ---- Changes in v2: - Add Michal's reviewed-by --- xen/arch/arm/mm.c | 32 +++++++++++--------------------- xen/arch/arm/setup.c | 13 +++++++++++-- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index ad26ad740308..3d2c046bbb5c 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -116,17 +116,14 @@ static DEFINE_PAGE_TABLE(cpu0_pgtable); #endif /* Common pagetable leaves */ -/* Second level page tables. - * - * The second-level table is 2 contiguous pages long, and covers all - * addresses from 0 to 0x7fffffff. Offsets into it are calculated - * with second_linear_offset(), not second_table_offset(). - */ -static DEFINE_PAGE_TABLES(xen_second, 2); -/* First level page table used for fixmap */ +/* Second level page table used to cover Xen virtual address space */ +static DEFINE_PAGE_TABLE(xen_second); +/* Third level page table used for fixmap */ DEFINE_BOOT_PAGE_TABLE(xen_fixmap); -/* First level page table used to map Xen itself with the XN bit set - * as appropriate. */ +/* + * Third level page table used to map Xen itself with the XN bit set + * as appropriate. + */ static DEFINE_PAGE_TABLE(xen_xenmap); /* Non-boot CPUs use this to find the correct pagetables. */ @@ -168,7 +165,6 @@ static void __init __maybe_unused build_assertions(void) BUILD_BUG_ON(zeroeth_table_offset(XEN_VIRT_START)); #endif BUILD_BUG_ON(first_table_offset(XEN_VIRT_START)); - BUILD_BUG_ON(second_linear_offset(XEN_VIRT_START) >= XEN_PT_LPAE_ENTRIES); #ifdef CONFIG_DOMAIN_PAGE BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK); #endif @@ -482,14 +478,10 @@ void __init setup_pagetables(unsigned long boot_phys_offset) p = (void *) cpu0_pgtable; #endif - /* Initialise first level entries, to point to second level entries */ - for ( i = 0; i < 2; i++) - { - p[i] = pte_of_xenaddr((uintptr_t)(xen_second + - i * XEN_PT_LPAE_ENTRIES)); - p[i].pt.table = 1; - p[i].pt.xn = 0; - } + /* Map xen second level page-table */ + p[0] = pte_of_xenaddr((uintptr_t)(xen_second)); + p[0].pt.table = 1; + p[0].pt.xn = 0; /* Break up the Xen mapping into 4k pages and protect them separately. */ for ( i = 0; i < XEN_PT_LPAE_ENTRIES; i++ ) @@ -618,8 +610,6 @@ void __init setup_xenheap_mappings(unsigned long base_mfn, /* Record where the xenheap is, for translation routines. */ xenheap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE; - xenheap_mfn_start = _mfn(base_mfn); - xenheap_mfn_end = _mfn(base_mfn + nr_mfns); } #else /* CONFIG_ARM_64 */ void __init setup_xenheap_mappings(unsigned long base_mfn, diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 068e84b10335..500307edc08d 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -774,11 +774,20 @@ static void __init setup_mm(void) opt_xenheap_megabytes ? ", from command-line" : ""); printk("Dom heap: %lu pages\n", domheap_pages); - setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages); + /* + * We need some memory to allocate the page-tables used for the + * xenheap mappings. So populate the boot allocator first. + * + * This requires us to set xenheap_mfn_{start, end} first so the Xenheap + * region can be avoided. + */ + xenheap_mfn_start = _mfn((e >> PAGE_SHIFT) - xenheap_pages); + xenheap_mfn_end = mfn_add(xenheap_mfn_start, xenheap_pages); - /* Add non-xenheap memory */ populate_boot_allocator(); + setup_xenheap_mappings(mfn_x(xenheap_mfn_start), xenheap_pages); + /* Frame table covers all of RAM region, including holes */ setup_frametable_mappings(ram_start, ram_end); max_page = PFN_DOWN(ram_end); -- 2.32.0