As noted regarding the mixture of checks in p2m_pt_set_entry(), introduce an new P2M type group allowing to be used everywhere we just care about accepting operations with either a valid MFN or a type permitting to be used without (valid) MFN.
Note that p2m_mmio_dm is not included in P2M_NO_MFN_TYPES, as for the intended purpose that one ought to be treated similar to p2m_invalid (perhaps the two should ultimately get folded anyway). Signed-off-by: Jan Beulich <jbeul...@suse.com> --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -780,8 +780,7 @@ ept_set_entry(struct p2m_domain *p2m, un ept_entry = table + (gfn_remainder >> (i * EPT_TABLE_ORDER)); } - if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) || - (p2mt == p2m_ram_paging_in) ) + if ( mfn_valid(mfn_x(mfn)) || p2m_allows_invalid_mfn(p2mt) ) { int emt = epte_get_entry_emt(p2m->domain, gfn, mfn, i * EPT_TABLE_ORDER, &ipat, direct_mmio); --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -753,7 +753,7 @@ p2m_pod_zero_check_superpage(struct p2m_ } /* Try to remove the page, restoring old mapping if it fails. */ - p2m_set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M, + p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M, p2m_populate_on_demand, p2m->default_access); /* Make none of the MFNs are used elsewhere... for example, mapped @@ -869,7 +869,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 } /* Try to remove the page, restoring old mapping if it fails. */ - p2m_set_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K, + p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_populate_on_demand, p2m->default_access); /* See if the page was successfully unmapped. (Allow one refcount @@ -1070,7 +1070,7 @@ p2m_pod_demand_populate(struct p2m_domai * NOTE: In a fine-grained p2m locking scenario this operation * may need to promote its locking from gfn->1g superpage */ - p2m_set_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M, + p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M, p2m_populate_on_demand, p2m->default_access); return 0; } @@ -1152,7 +1152,7 @@ remap_and_retry: * need promoting the gfn lock from gfn->2M superpage */ gfn_aligned = (gfn>>order)<<order; for(i=0; i<(1<<order); i++) - p2m_set_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K, + p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_populate_on_demand, p2m->default_access); if ( tb_init_done ) { @@ -1210,8 +1210,8 @@ guest_physmap_mark_populate_on_demand(st } /* Now, actually do the two-way mapping */ - rc = p2m_set_entry(p2m, gfn, _mfn(0), order, p2m_populate_on_demand, - p2m->default_access); + rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, + p2m_populate_on_demand, p2m->default_access); if ( rc == 0 ) { pod_lock(p2m); --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -571,7 +571,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, } ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); - l3e_content = mfn_valid(mfn) + l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ? l3e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE) : l3e_empty(); @@ -607,8 +607,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry))); old_mfn = l1e_get_pfn(*p2m_entry); - if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) - || p2m_is_paging(p2mt) ) + if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) entry_content = p2m_l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn)); else @@ -644,7 +643,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, } ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); - if ( mfn_valid(mfn) || p2m_is_pod(p2mt) ) + if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) l2e_content = l2e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE); --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -141,6 +141,11 @@ typedef unsigned int p2m_query_t; | p2m_to_mask(p2m_ram_logdirty) ) #define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared)) +/* Valid types not necessarily associated with a (valid) MFN. */ +#define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \ + | p2m_to_mask(p2m_mmio_direct) \ + | P2M_PAGING_TYPES) + /* Broken type: the frame backing this pfn has failed in hardware * and must not be touched. */ #define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken)) @@ -171,6 +176,8 @@ typedef unsigned int p2m_query_t; (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ p2m_to_mask(p2m_map_foreign))) +#define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES) + typedef enum { p2m_host, p2m_nested,
x86/P2M: consolidate handling of types not requiring a valid MFN As noted regarding the mixture of checks in p2m_pt_set_entry(), introduce an new P2M type group allowing to be used everywhere we just care about accepting operations with either a valid MFN or a type permitting to be used without (valid) MFN. Note that p2m_mmio_dm is not included in P2M_NO_MFN_TYPES, as for the intended purpose that one ought to be treated similar to p2m_invalid (perhaps the two should ultimately get folded anyway). Signed-off-by: Jan Beulich <jbeul...@suse.com> --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -780,8 +780,7 @@ ept_set_entry(struct p2m_domain *p2m, un ept_entry = table + (gfn_remainder >> (i * EPT_TABLE_ORDER)); } - if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) || - (p2mt == p2m_ram_paging_in) ) + if ( mfn_valid(mfn_x(mfn)) || p2m_allows_invalid_mfn(p2mt) ) { int emt = epte_get_entry_emt(p2m->domain, gfn, mfn, i * EPT_TABLE_ORDER, &ipat, direct_mmio); --- a/xen/arch/x86/mm/p2m-pod.c +++ b/xen/arch/x86/mm/p2m-pod.c @@ -753,7 +753,7 @@ p2m_pod_zero_check_superpage(struct p2m_ } /* Try to remove the page, restoring old mapping if it fails. */ - p2m_set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M, + p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M, p2m_populate_on_demand, p2m->default_access); /* Make none of the MFNs are used elsewhere... for example, mapped @@ -869,7 +869,7 @@ p2m_pod_zero_check(struct p2m_domain *p2 } /* Try to remove the page, restoring old mapping if it fails. */ - p2m_set_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K, + p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_populate_on_demand, p2m->default_access); /* See if the page was successfully unmapped. (Allow one refcount @@ -1070,7 +1070,7 @@ p2m_pod_demand_populate(struct p2m_domai * NOTE: In a fine-grained p2m locking scenario this operation * may need to promote its locking from gfn->1g superpage */ - p2m_set_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M, + p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M, p2m_populate_on_demand, p2m->default_access); return 0; } @@ -1152,7 +1152,7 @@ remap_and_retry: * need promoting the gfn lock from gfn->2M superpage */ gfn_aligned = (gfn>>order)<<order; for(i=0; i<(1<<order); i++) - p2m_set_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K, + p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_populate_on_demand, p2m->default_access); if ( tb_init_done ) { @@ -1210,8 +1210,8 @@ guest_physmap_mark_populate_on_demand(st } /* Now, actually do the two-way mapping */ - rc = p2m_set_entry(p2m, gfn, _mfn(0), order, p2m_populate_on_demand, - p2m->default_access); + rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), order, + p2m_populate_on_demand, p2m->default_access); if ( rc == 0 ) { pod_lock(p2m); --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -571,7 +571,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, } ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); - l3e_content = mfn_valid(mfn) + l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ? l3e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE) : l3e_empty(); @@ -607,8 +607,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry))); old_mfn = l1e_get_pfn(*p2m_entry); - if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) - || p2m_is_paging(p2mt) ) + if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) entry_content = p2m_l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn)); else @@ -644,7 +643,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, } ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct); - if ( mfn_valid(mfn) || p2m_is_pod(p2mt) ) + if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) l2e_content = l2e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt, mfn) | _PAGE_PSE); --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -141,6 +141,11 @@ typedef unsigned int p2m_query_t; | p2m_to_mask(p2m_ram_logdirty) ) #define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared)) +/* Valid types not necessarily associated with a (valid) MFN. */ +#define P2M_INVALID_MFN_TYPES (P2M_POD_TYPES \ + | p2m_to_mask(p2m_mmio_direct) \ + | P2M_PAGING_TYPES) + /* Broken type: the frame backing this pfn has failed in hardware * and must not be touched. */ #define P2M_BROKEN_TYPES (p2m_to_mask(p2m_ram_broken)) @@ -171,6 +176,8 @@ typedef unsigned int p2m_query_t; (P2M_RAM_TYPES | P2M_GRANT_TYPES | \ p2m_to_mask(p2m_map_foreign))) +#define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES) + typedef enum { p2m_host, p2m_nested,
_______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel