This is a preparation for the proposal "allow setting up shared memory areas between VMs from xl config file". See: V2: https://lists.xen.org/archives/html/xen-devel/2017-06/msg02256.html V1: https://lists.xen.org/archives/html/xen-devel/2017-05/msg01288.html
The plan is to use XENMEM_add_to_physmap_batch in xl to map foregin pages from one DomU to another so that the page could be shared. But currently there is no wrapper for XENMEM_add_to_physmap_batch in libxc, so we just add a wrapper for it. Signed-off-by: Zhongze Liu <blacksk...@gmail.com> --- Changed Since v2: * fix coding style issue * let rc = 1 on buffer bouncing failures Changed Since v1: * explain why such a sudden wrapper * change the parameters' types Cc: Ian Jackson <ian.jack...@eu.citrix.com>, Cc: Wei Liu <wei.l...@citrix.com>, Cc: Stefano Stabellini <sstabell...@kernel.org> Cc: Julien Grall <julien.gr...@arm.com> Cc: Jan Beulich <jbeul...@suse.com --- tools/libxc/include/xenctrl.h | 9 +++++++++ tools/libxc/xc_domain.c | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 1629f412dd..9501818558 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -1372,6 +1372,15 @@ int xc_domain_add_to_physmap(xc_interface *xch, unsigned long idx, xen_pfn_t gpfn); +int xc_domain_add_to_physmap_batch(xc_interface *xch, + domid_t domid, + domid_t foreign_domid, + unsigned int space, + unsigned int size, + xen_ulong_t *idxs, + xen_pfn_t *gfpns, + int *errs); + int xc_domain_populate_physmap(xc_interface *xch, uint32_t domid, unsigned long nr_extents, diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 5d192ea0e4..3bab4e8bab 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1032,6 +1032,51 @@ int xc_domain_add_to_physmap(xc_interface *xch, return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp)); } +int xc_domain_add_to_physmap_batch(xc_interface *xch, + domid_t domid, + domid_t foreign_domid, + unsigned int space, + unsigned int size, + xen_ulong_t *idxs, + xen_pfn_t *gpfns, + int *errs) +{ + int rc; + DECLARE_HYPERCALL_BOUNCE(idxs, size * sizeof(*idxs), XC_HYPERCALL_BUFFER_BOUNCE_IN); + DECLARE_HYPERCALL_BOUNCE(gpfns, size * sizeof(*gpfns), XC_HYPERCALL_BUFFER_BOUNCE_IN); + DECLARE_HYPERCALL_BOUNCE(errs, size * sizeof(*errs), XC_HYPERCALL_BUFFER_BOUNCE_OUT); + + struct xen_add_to_physmap_batch xatp_batch = { + .domid = domid, + .space = space, + .size = size, + .u = { .foreign_domid = foreign_domid } + }; + + if ( xc_hypercall_bounce_pre(xch, idxs) || + xc_hypercall_bounce_pre(xch, gpfns) || + xc_hypercall_bounce_pre(xch, errs) ) + { + PERROR("Could not bounce memory for XENMEM_add_to_physmap_batch"); + rc = -1; + goto out; + } + + set_xen_guest_handle(xatp_batch.idxs, idxs); + set_xen_guest_handle(xatp_batch.gpfns, gpfns); + set_xen_guest_handle(xatp_batch.errs, errs); + + rc = do_memory_op(xch, XENMEM_add_to_physmap_batch, + &xatp_batch, sizeof(xatp_batch)); + +out: + xc_hypercall_bounce_post(xch, idxs); + xc_hypercall_bounce_post(xch, gpfns); + xc_hypercall_bounce_post(xch, errs); + + return rc; +} + int xc_domain_claim_pages(xc_interface *xch, uint32_t domid, unsigned long nr_pages) -- 2.13.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel