Later, we want to use acquire_domstatic_pages() for populating memory for static domain on runtime, however, there are a lot of pointless work (checking mfn_valid(), scrubbing the free part, cleaning the cache...) considering we know the page is valid and belong to the guest.
This commit splits acquire_staticmem_pages() in two parts, and introduces prepare_staticmem_pages to bypass all "pointless work". Signed-off-by: Penny Zheng <penny.zh...@arm.com> Acked-by: Jan Beulich <jbeul...@suse.com> Acked-by: Julien Grall <jgr...@amazon.com> --- v8 changes: - no change --- v7 changes: - no change --- v6 changes: - adapt to PGC_static --- v5 changes: - new commit --- xen/common/page_alloc.c | 61 ++++++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index a568be55e3..9e150946f9 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -2686,26 +2686,13 @@ void free_domstatic_page(struct page_info *page) put_domain(d); } -/* - * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of - * static memory. - * This function needs to be reworked if used outside of boot. - */ -static struct page_info * __init acquire_staticmem_pages(mfn_t smfn, - unsigned long nr_mfns, - unsigned int memflags) +static bool __init prepare_staticmem_pages(struct page_info *pg, + unsigned long nr_mfns, + unsigned int memflags) { bool need_tlbflush = false; uint32_t tlbflush_timestamp = 0; unsigned long i; - struct page_info *pg; - - ASSERT(nr_mfns); - for ( i = 0; i < nr_mfns; i++ ) - if ( !mfn_valid(mfn_add(smfn, i)) ) - return NULL; - - pg = mfn_to_page(smfn); spin_lock(&heap_lock); @@ -2716,7 +2703,7 @@ static struct page_info * __init acquire_staticmem_pages(mfn_t smfn, { printk(XENLOG_ERR "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n", - i, mfn_x(smfn) + i, + i, mfn_x(page_to_mfn(pg)) + i, pg[i].count_info, pg[i].tlbflush_timestamp); goto out_err; } @@ -2740,6 +2727,38 @@ static struct page_info * __init acquire_staticmem_pages(mfn_t smfn, if ( need_tlbflush ) filtered_flush_tlb_mask(tlbflush_timestamp); + return true; + + out_err: + while ( i-- ) + pg[i].count_info = PGC_static | PGC_state_free; + + spin_unlock(&heap_lock); + + return false; +} + +/* + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of + * static memory. + * This function needs to be reworked if used outside of boot. + */ +static struct page_info * __init acquire_staticmem_pages(mfn_t smfn, + unsigned long nr_mfns, + unsigned int memflags) +{ + unsigned long i; + struct page_info *pg; + + ASSERT(nr_mfns); + for ( i = 0; i < nr_mfns; i++ ) + if ( !mfn_valid(mfn_add(smfn, i)) ) + return NULL; + + pg = mfn_to_page(smfn); + if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) ) + return NULL; + /* * Ensure cache and RAM are consistent for platforms where the guest * can control its own visibility of/through the cache. @@ -2748,14 +2767,6 @@ static struct page_info * __init acquire_staticmem_pages(mfn_t smfn, flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush)); return pg; - - out_err: - while ( i-- ) - pg[i].count_info = PGC_static | PGC_state_free; - - spin_unlock(&heap_lock); - - return NULL; } /* -- 2.25.1