In order to deal with the trouble of count-to-order conversion when page number
is not in a power-of-two, this commit re-define assign_pages for nr pages and
assign_page for original page with a single order.

Signed-off-by: Penny Zheng <penny.zh...@arm.com>
Acked-by: Jan Beulich <jbeul...@suse.com>
---
v5 change:
- Having both functions assign_pages/assign_page with similar parameter
arrangement
---
 xen/arch/x86/pv/dom0_build.c |  2 +-
 xen/common/grant_table.c     |  2 +-
 xen/common/memory.c          |  4 ++--
 xen/common/page_alloc.c      | 21 +++++++++++++--------
 xen/include/xen/mm.h         |  6 ++++++
 5 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index d5a1a6a4e2..ebbafe48f3 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -557,7 +557,7 @@ int __init dom0_construct_pv(struct domain *d,
         else
         {
             while ( count-- )
-                if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
+                if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0) )
                     BUG();
         }
         initrd->mod_end = 0;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index fab77ab9cc..1e138201a5 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2342,7 +2342,7 @@ gnttab_transfer(
          * is respected and speculative execution is blocked accordingly
          */
         if ( unlikely(!evaluate_nospec(okay)) ||
-            unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) )
+            unlikely(assign_pages(e, page, 1, MEMF_no_refcount)) )
         {
             bool drop_dom_ref;
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index e07bd9a5ea..e22dad2c37 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -728,7 +728,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         /* Assign each output page to the domain. */
         for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
         {
-            if ( assign_pages(d, page, exch.out.extent_order,
+            if ( assign_page(d, page, exch.out.extent_order,
                               MEMF_no_refcount) )
             {
                 unsigned long dec_count;
@@ -797,7 +797,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
      * cleared PGC_allocated.
      */
     while ( (page = page_list_remove_head(&in_chunk_list)) )
-        if ( assign_pages(d, page, 0, MEMF_no_refcount) )
+        if ( assign_pages(d, page, 1, MEMF_no_refcount) )
         {
             BUG_ON(!d->is_dying);
             free_domheap_page(page);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 2b4591bc56..05c9834dc2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2260,7 +2260,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
 int assign_pages(
     struct domain *d,
     struct page_info *pg,
-    unsigned int order,
+    unsigned long nr,
     unsigned int memflags)
 {
     int rc = 0;
@@ -2280,7 +2280,7 @@ int assign_pages(
     {
         unsigned int extra_pages = 0;
 
-        for ( i = 0; i < (1ul << order); i++ )
+        for ( i = 0; i < nr; i++ )
         {
             ASSERT(!(pg[i].count_info & ~PGC_extra));
             if ( pg[i].count_info & PGC_extra )
@@ -2289,18 +2289,18 @@ int assign_pages(
 
         ASSERT(!extra_pages ||
                ((memflags & MEMF_no_refcount) &&
-                extra_pages == 1u << order));
+                extra_pages == nr));
     }
 #endif
 
     if ( pg[0].count_info & PGC_extra )
     {
-        d->extra_pages += 1u << order;
+        d->extra_pages += nr;
         memflags &= ~MEMF_no_refcount;
     }
     else if ( !(memflags & MEMF_no_refcount) )
     {
-        unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
+        unsigned int tot_pages = domain_tot_pages(d) + nr;
 
         if ( unlikely(tot_pages > d->max_pages) )
         {
@@ -2312,10 +2312,10 @@ int assign_pages(
     }
 
     if ( !(memflags & MEMF_no_refcount) &&
-         unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
+         unlikely(domain_adjust_tot_pages(d, nr) == nr) )
         get_knownalive_domain(d);
 
-    for ( i = 0; i < (1 << order); i++ )
+    for ( i = 0; i < nr; i++ )
     {
         ASSERT(page_get_owner(&pg[i]) == NULL);
         page_set_owner(&pg[i], d);
@@ -2330,6 +2330,11 @@ int assign_pages(
     return rc;
 }
 
+int assign_page(struct domain *d, struct page_info *pg, unsigned int order,
+                unsigned int memflags)
+{
+    return assign_pages(d, pg, 1UL << order, memflags);
+}
 
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int memflags)
@@ -2372,7 +2377,7 @@ struct page_info *alloc_domheap_pages(
                 pg[i].count_info = PGC_extra;
             }
         }
-        if ( assign_pages(d, pg, order, memflags) )
+        if ( assign_page(d, pg, order, memflags) )
         {
             free_heap_pages(pg, order, memflags & MEMF_no_scrub);
             return NULL;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 8e8fb5a615..f243ff88d7 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -132,6 +132,12 @@ int query_page_offline(mfn_t mfn, uint32_t *status);
 void heap_init_late(void);
 
 int assign_pages(
+    struct domain *d,
+    struct page_info *pg,
+    unsigned long nr,
+    unsigned int memflags);
+
+int assign_page(
     struct domain *d,
     struct page_info *pg,
     unsigned int order,
-- 
2.25.1


Reply via email to