*_mapped_gfn are currently read before acquiring the lock. However, they may be modified by the p2m code before the lock was acquired. This means we will use the wrong values.
Fix it by moving the read inside the section protected by the p2m lock. Signed-off-by: Julien Grall <julien.gr...@arm.com> --- This patch should be backported to Xen 4.9 and Xen 4.8 --- xen/arch/arm/p2m.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index c484469e6c..d1260d3b4e 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -1292,13 +1292,13 @@ int relinquish_p2m_mapping(struct domain *d) p2m_type_t t; int rc = 0; unsigned int order; - - /* Convenience alias */ - gfn_t start = p2m->lowest_mapped_gfn; - gfn_t end = p2m->max_mapped_gfn; + gfn_t start, end; p2m_write_lock(p2m); + start = p2m->lowest_mapped_gfn; + end = p2m->max_mapped_gfn; + for ( ; gfn_x(start) < gfn_x(end); start = gfn_next_boundary(start, order) ) { @@ -1353,9 +1353,6 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr) p2m_type_t t; unsigned int order; - start = gfn_max(start, p2m->lowest_mapped_gfn); - end = gfn_min(end, p2m->max_mapped_gfn); - /* * The operation cache flush will invalidate the RAM assigned to the * guest in a given range. It will not modify the page table and @@ -1364,6 +1361,9 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr) */ p2m_read_lock(p2m); + start = gfn_max(start, p2m->lowest_mapped_gfn); + end = gfn_min(end, p2m->max_mapped_gfn); + for ( ; gfn_x(start) < gfn_x(end); start = next_gfn ) { mfn_t mfn = p2m_get_entry(p2m, start, &t, NULL, &order); -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel