For domain heap pages assigned to a domain dropping the page reference tied to PGC_allocated may not drop the last reference, as otherwise the test_and_clear_bit() might already act on an unowned page.
Work around this where possible, but the need to acquire extra page references is a fair hint that references should have been acquired in other places instead. Signed-off-by: Jan Beulich <jbeul...@suse.com> --- Compile tested only, as I have neither a mem-sharing nor a mem-paging environment set up ready to be used for such testing. --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -964,6 +964,15 @@ static int share_pages(struct domain *sd goto err_out; } + /* Acquire an extra reference, for the freeing below to be safe. */ + if ( !get_page(cpage, cd) ) + { + ret = -EOVERFLOW; + mem_sharing_page_unlock(secondpg); + mem_sharing_page_unlock(firstpg); + goto err_out; + } + /* Merge the lists together */ rmap_seed_iterator(cpage, &ri); while ( (gfn = rmap_iterate(cpage, &ri)) != NULL) @@ -993,6 +1002,7 @@ static int share_pages(struct domain *sd /* Free the client page */ if(test_and_clear_bit(_PGC_allocated, &cpage->count_info)) put_page(cpage); + put_page(cpage); /* We managed to free a domain page. */ atomic_dec(&nr_shared_mfns); @@ -1066,9 +1076,16 @@ int mem_sharing_add_to_physmap(struct do if ( mfn_valid(cmfn) ) { struct page_info *cpage = mfn_to_page(cmfn); - ASSERT(cpage != NULL); + + if ( !get_page(cpage, cd) ) + { + domain_crash(cd); + ret = -EOVERFLOW; + goto err_unlock; + } if ( test_and_clear_bit(_PGC_allocated, &cpage->count_info) ) put_page(cpage); + put_page(cpage); } } } @@ -1153,9 +1170,18 @@ int __mem_sharing_unshare_page(struct do mem_sharing_gfn_destroy(page, d, gfn_info); put_page_and_type(page); mem_sharing_page_unlock(page); - if ( last_gfn && - test_and_clear_bit(_PGC_allocated, &page->count_info) ) + if ( last_gfn ) + { + if ( !get_page(page, d) ) + { + put_gfn(d, gfn); + domain_crash(d); + return -EOVERFLOW; + } + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) + put_page(page); put_page(page); + } put_gfn(d, gfn); return 0; --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -313,20 +313,36 @@ int guest_remove_page(struct domain *d, if ( unlikely(p2m_is_paging(p2mt)) ) { + /* + * If the page hasn't yet been paged out, there is an + * actual page that needs to be released. + */ + if ( p2mt == p2m_ram_paging_out ) + { + ASSERT(mfn_valid(mfn)); + page = mfn_to_page(mfn); + rc = -ENXIO; + if ( !get_page(page, d) ) + goto out_put_gfn; + } + else + page = NULL; + rc = guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0); if ( rc ) + { + if ( page ) + put_page(page); goto out_put_gfn; + } put_gfn(d, gmfn); - /* If the page hasn't yet been paged out, there is an - * actual page that needs to be released. */ - if ( p2mt == p2m_ram_paging_out ) + if ( page ) { - ASSERT(mfn_valid(mfn)); - page = mfn_to_page(mfn); if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); + put_page(page); } p2m_mem_paging_drop_page(d, gmfn, p2mt); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel