Later, we want to use acquire_domstatic_pages() for populating memory
for static domain on runtime, however, there are a lot of pointless work
(checking mfn_valid(), scrubbing the free part, cleaning the cache...)
considering we know the page is valid and belong to the guest.
This commit splits acquire_staticmem_pages() in two parts, and
introduces prepare_staticmem_pages to bypass all "pointless work".
Signed-off-by: Penny Zheng
---
v5 changes:
- new commit
---
xen/common/page_alloc.c | 61 -
1 file changed, 36 insertions(+), 25 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b1350fc238..bdd2e62865 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,26 +2661,13 @@ void free_staticmem_pages(struct page_info *pg,
unsigned long nr_mfns,
}
}
-/*
- * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
- * static memory.
- * This function needs to be reworked if used outside of boot.
- */
-static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool __init prepare_staticmem_pages(struct page_info *pg,
+ unsigned long nr_mfns,
+ unsigned int memflags)
{
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
unsigned long i;
-struct page_info *pg;
-
-ASSERT(nr_mfns);
-for ( i = 0; i < nr_mfns; i++ )
-if ( !mfn_valid(mfn_add(smfn, i)) )
-return NULL;
-
-pg = mfn_to_page(smfn);
spin_lock(&heap_lock);
@@ -2691,7 +2678,7 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
{
printk(XENLOG_ERR
"pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
- i, mfn_x(smfn) + i,
+ i, mfn_x(page_to_mfn(pg)) + i,
pg[i].count_info, pg[i].tlbflush_timestamp);
goto out_err;
}
@@ -2715,6 +2702,38 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
if ( need_tlbflush )
filtered_flush_tlb_mask(tlbflush_timestamp);
+return true;
+
+ out_err:
+while ( i-- )
+pg[i].count_info = PGC_staticmem | PGC_state_free;
+
+spin_unlock(&heap_lock);
+
+return false;
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+unsigned long i;
+struct page_info *pg;
+
+ASSERT(nr_mfns);
+for ( i = 0; i < nr_mfns; i++ )
+if ( !mfn_valid(mfn_add(smfn, i)) )
+return NULL;
+
+pg = mfn_to_page(smfn);
+if ( !prepare_staticmem_pages(pg, nr_mfns, memflags) )
+return NULL;
+
/*
* Ensure cache and RAM are consistent for platforms where the guest
* can control its own visibility of/through the cache.
@@ -2723,14 +2742,6 @@ static struct page_info * __init
acquire_staticmem_pages(mfn_t smfn,
flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
return pg;
-
- out_err:
-while ( i-- )
-pg[i].count_info = PGC_staticmem | PGC_state_free;
-
-spin_unlock(&heap_lock);
-
-return NULL;
}
/*
--
2.25.1