Re: [PATCH 13/16] shr_pages field is MEM_SHARING-only

2022-02-14 Thread George Dunlap


> On Jul 5, 2021, at 5:13 PM, Jan Beulich  wrote:
> 
> Conditionalize it and its uses accordingly. The main goal though is to
> demonstrate that x86's p2m_teardown() is now empty when !HVM, which in
> particular means the last remaining use of p2m_lock() in this cases goes
> away.
> 
> Signed-off-by: Jan Beulich 

Reviewed-by: George Dunlap 



signature.asc
Description: Message signed with OpenPGP


Re: [PATCH 13/16] shr_pages field is MEM_SHARING-only

2021-07-06 Thread Tamas K Lengyel
On Mon, Jul 5, 2021 at 12:13 PM Jan Beulich  wrote:
>
> Conditionalize it and its uses accordingly. The main goal though is to
> demonstrate that x86's p2m_teardown() is now empty when !HVM, which in
> particular means the last remaining use of p2m_lock() in this cases goes
> away.
>
> Signed-off-by: Jan Beulich 

Reviewed-by: Tamas K Lengyel 



[PATCH 13/16] shr_pages field is MEM_SHARING-only

2021-07-05 Thread Jan Beulich
Conditionalize it and its uses accordingly. The main goal though is to
demonstrate that x86's p2m_teardown() is now empty when !HVM, which in
particular means the last remaining use of p2m_lock() in this cases goes
away.

Signed-off-by: Jan Beulich 
---
I was on the edge of introducing a helper for atomic_read(>shr_pages)
but decided against because of dump_domains() not being able to use it
sensibly (I really want to omit the output field altogether there when
!MEM_SHARING).

--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -159,7 +159,6 @@ void p2m_teardown(struct p2m_domain *p2m
 {
 #ifdef CONFIG_HVM
 struct page_info *pg;
-#endif
 struct domain *d;
 
 if ( !p2m )
@@ -169,16 +168,17 @@ void p2m_teardown(struct p2m_domain *p2m
 
 p2m_lock(p2m);
 
+#ifdef CONFIG_MEM_SHARING
 ASSERT(atomic_read(>shr_pages) == 0);
+#endif
 
-#ifdef CONFIG_HVM
 p2m->phys_table = pagetable_null();
 
 while ( (pg = page_list_remove_head(>pages)) )
 d->arch.paging.free_page(d, pg);
-#endif
 
 p2m_unlock(p2m);
+#endif
 }
 
 void p2m_final_teardown(struct domain *d)
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -109,7 +109,11 @@ void getdomaininfo(struct domain *d, str
 info->tot_pages = domain_tot_pages(d);
 info->max_pages = d->max_pages;
 info->outstanding_pages = d->outstanding_pages;
+#ifdef CONFIG_MEM_SHARING
 info->shr_pages = atomic_read(>shr_pages);
+#else
+info->shr_pages = 0;
+#endif
 info->paged_pages   = atomic_read(>paged_pages);
 info->shared_info_frame =
 gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info;
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -274,9 +274,16 @@ static void dump_domains(unsigned char k
 printk("refcnt=%d dying=%d pause_count=%d\n",
atomic_read(>refcnt), d->is_dying,
atomic_read(>pause_count));
-printk("nr_pages=%d xenheap_pages=%d shared_pages=%u 
paged_pages=%u "
-   "dirty_cpus={%*pbl} max_pages=%u\n",
-   domain_tot_pages(d), d->xenheap_pages, 
atomic_read(>shr_pages),
+printk("nr_pages=%u xenheap_pages=%u"
+#ifdef CONFIG_MEM_SHARING
+   " shared_pages=%u"
+#endif
+   " paged_pages=%u"
+   " dirty_cpus={%*pbl} max_pages=%u\n",
+   domain_tot_pages(d), d->xenheap_pages,
+#ifdef CONFIG_MEM_SHARING
+   atomic_read(>shr_pages),
+#endif
atomic_read(>paged_pages), CPUMASK_PR(d->dirty_cpumask),
d->max_pages);
 printk("handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -385,7 +385,11 @@ struct domain
 unsigned int outstanding_pages; /* pages claimed but not possessed */
 unsigned int max_pages; /* maximum value for 
domain_tot_pages() */
 unsigned int extra_pages;   /* pages not included in 
domain_tot_pages() */
+
+#ifdef CONFIG_MEM_SHARING
 atomic_t shr_pages; /* shared pages */
+#endif
+
 atomic_t paged_pages;   /* paged-out pages */
 
 /* Scheduling. */