Drop the ap2m_active boolean, and consistently use the unlocking form:
if ( p2m != hostp2m )
__put_gfn(p2m, gfn);
__put_gfn(hostp2m, gfn);
which makes it clear that we always unlock the altp2m's gfn if it is in use,
and always unlock the hostp2m's gfn. This also drops the ternary expression
in the logdirty case.
Extend the logdirty comment to identify where the locking violation is liable
to occur.
No (intended) overall change in behaviour.
Signed-off-by: Andrew Cooper
---
CC: Jan Beulich
CC: Wei Liu
CC: Roger Pau Monné
CC: Razvan Cojocaru
CC: Tamas K Lengyel
CC: George Dunlap
---
xen/arch/x86/hvm/hvm.c | 21 ++---
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 94fe441..db60f23 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1689,7 +1689,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
int rc, fall_through = 0, paged = 0;
int sharing_enomem = 0;
vm_event_request_t *req_ptr = NULL;
-bool_t ap2m_active, sync = 0;
+bool sync = false;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
@@ -1747,8 +1747,6 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
goto out;
}
-ap2m_active = altp2m_active(currd);
-
/*
* Take a lock on the host p2m speculatively, to avoid potential
* locking order problems later and to handle unshare etc.
@@ -1758,7 +1756,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE :
0),
NULL);
-if ( ap2m_active )
+if ( altp2m_active(currd) )
{
p2m = p2m_get_altp2m(curr);
@@ -1882,13 +1880,14 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned
long gla,
{
paging_mark_pfn_dirty(currd, _pfn(gfn));
/*
- * If p2m is really an altp2m, unlock here to avoid lock ordering
- * violation when the change below is propagated from host p2m.
+ * If p2m is really an altp2m, unlock it before changing the type,
+ * as p2m_altp2m_propagate_change() needs to acquire the
+ * altp2m_list lock.
*/
-if ( ap2m_active )
+if ( p2m != hostp2m )
__put_gfn(p2m, gfn);
p2m_change_type_one(currd, gfn, p2m_ram_logdirty, p2m_ram_rw);
-__put_gfn(ap2m_active ? hostp2m : p2m, gfn);
+__put_gfn(hostp2m, gfn);
goto out;
}
@@ -1909,9 +1908,9 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
rc = fall_through;
out_put_gfn:
-__put_gfn(p2m, gfn);
-if ( ap2m_active )
-__put_gfn(hostp2m, gfn);
+if ( p2m != hostp2m )
+__put_gfn(p2m, gfn);
+__put_gfn(hostp2m, gfn);
out:
/* All of these are delayed until we exit, since we might
* sleep on event ring wait queues, and we must not hold
--
2.1.4
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel