Introduce an inline wrapper dealing with the non-translated-domain case,
while stripping that logic from the main function, which gets renamed to
p2m_get_gfn_type_access(). HVM-only callers can then directly use the
main function.
Along with renaming the main function also make its and the new inline
helper's GFN parameters type-safe.
Signed-off-by: Jan Beulich
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1711,7 +1711,7 @@ static void svm_do_nested_pgfault(struct
} _d;
p2m = p2m_get_p2m(v);
-mfn = __get_gfn_type_access(p2m, gfn, , , 0, NULL, 0);
+mfn = p2m_get_gfn_type_access(p2m, _gfn(gfn), , , 0, NULL,
0);
_d.gpa = gpa;
_d.qualification = 0;
@@ -1736,7 +1736,7 @@ static void svm_do_nested_pgfault(struct
if ( p2m == NULL )
{
p2m = p2m_get_p2m(v);
-mfn = __get_gfn_type_access(p2m, gfn, , , 0, NULL, 0);
+mfn = p2m_get_gfn_type_access(p2m, _gfn(gfn), , , 0, NULL,
0);
}
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -299,8 +299,9 @@ static int set_mem_access(struct domain
{
p2m_access_t _a;
p2m_type_t t;
-mfn_t mfn = __get_gfn_type_access(p2m, gfn_x(gfn), , &_a,
- P2M_ALLOC, NULL, false);
+mfn_t mfn = p2m_get_gfn_type_access(p2m, gfn, , &_a,
+P2M_ALLOC, NULL, false);
+
rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
}
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -478,12 +478,12 @@ do {
#undef assign_pointers
/* Now do the gets. */
-*first_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
-gfn_x(rval->first_gfn), first_t,
-first_a, q, NULL, lock);
-*second_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
-gfn_x(rval->second_gfn), second_t,
-second_a, q, NULL, lock);
+*first_mfn = p2m_get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
+ rval->first_gfn, first_t,
+ first_a, q, NULL, lock);
+*second_mfn = p2m_get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
+ rval->second_gfn, second_t,
+ second_a, q, NULL, lock);
}
static void put_two_gfns(const struct two_gfns *arg)
@@ -936,8 +936,8 @@ static int nominate_page(struct domain *
if ( !ap2m )
continue;
-amfn = __get_gfn_type_access(ap2m, gfn_x(gfn), , ,
- 0, NULL, false);
+amfn = p2m_get_gfn_type_access(ap2m, gfn, , ,
+ 0, NULL, false);
if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) )
{
altp2m_list_unlock(d);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -286,25 +286,13 @@ void p2m_unlock_and_tlb_flush(struct p2m
mm_write_unlock(>lock);
}
-mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
-p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-unsigned int *page_order, bool_t locked)
-{
#ifdef CONFIG_HVM
-mfn_t mfn;
-gfn_t gfn = _gfn(gfn_l);
-if ( !p2m || !paging_mode_translate(p2m->domain) )
-{
-#endif
-/*
- * Not necessarily true, but for non-translated guests we claim
- * it's the most generic kind of memory.
- */
-*t = p2m_ram_rw;
-return _mfn(gfn_l);
-#ifdef CONFIG_HVM
-}
+mfn_t p2m_get_gfn_type_access(struct p2m_domain *p2m, gfn_t gfn,
+ p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
+ unsigned int *page_order, bool_t locked)
+{
+mfn_t mfn;
/* Unshare makes no sense without populate. */
if ( q & P2M_UNSHARE )
@@ -329,8 +317,8 @@ mfn_t __get_gfn_type_access(struct p2m_d
* Try to unshare. If we fail, communicate ENOMEM without
* sleeping.
*/
-if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 )
-mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
+if ( mem_sharing_unshare_page(p2m->domain, gfn_x(gfn)) < 0 )
+mem_sharing_notify_enomem(p2m->domain, gfn_x(gfn), false);
mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
}
@@ -343,9 +331,10 @@ mfn_t __get_gfn_type_access(struct p2m_d
}
return mfn;
-#endif
}
+#endif /* CONFIG_HVM */
+
void __put_gfn(struct p2m_domain *p2m, unsigned long gfn)
{
if ( !p2m ||