Re: [Xen-devel] [PATCH v3 6/8] dm_op: convert HVMOP_set_mem_type

2017-01-13 Thread Jan Beulich
>>> On 12.01.17 at 15:58,  wrote:
> This patch removes the need for handling HVMOP restarts, so that
> infrastructure is removed.
> 
> NOTE: This patch also modifies the type of the 'nr' argument of
>   xc_hvm_set_mem_type() from uint64_t to uint32_t. In practice the
>   value passed was always truncated to 32 bits.
> 
> Suggested-by: Jan Beulich 
> Signed-off-by: Paul Durrant 

Hypervisor parts
Reviewed-by: Jan Beulich 
with the same remark as on the previous patch.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v3 6/8] dm_op: convert HVMOP_set_mem_type

2017-01-12 Thread Paul Durrant
This patch removes the need for handling HVMOP restarts, so that
infrastructure is removed.

NOTE: This patch also modifies the type of the 'nr' argument of
  xc_hvm_set_mem_type() from uint64_t to uint32_t. In practice the
  value passed was always truncated to 32 bits.

Suggested-by: Jan Beulich 
Signed-off-by: Paul Durrant 
---
Cc: Jan Beulich 
Cc: Ian Jackson 
Acked-by: Wei Liu 
Cc: Andrew Cooper 
Cc: Daniel De Graaf 

v3:
- Addressed more comments from Jan.

v2:
- Addressed several comments from Jan.
---
 tools/libxc/include/xenctrl.h   |   2 +-
 tools/libxc/xc_misc.c   |  29 +++-
 xen/arch/x86/hvm/dm.c   |  92 
 xen/arch/x86/hvm/hvm.c  | 136 +---
 xen/include/public/hvm/dm_op.h  |  22 ++
 xen/include/public/hvm/hvm_op.h |  20 --
 xen/xsm/flask/policy/access_vectors |   2 +-
 7 files changed, 127 insertions(+), 176 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index a5c234f..13431bb 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1634,7 +1634,7 @@ int xc_hvm_modified_memory(
  * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
  */
 int xc_hvm_set_mem_type(
-xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, 
uint64_t nr);
+xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, 
uint32_t nr);
 
 /*
  * Injects a hardware/software CPU trap, to take effect the next time the HVM 
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index 597df99..5b06d6b 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -590,30 +590,21 @@ int xc_hvm_modified_memory(
 }
 
 int xc_hvm_set_mem_type(
-xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t 
first_pfn, uint64_t nr)
+xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t 
first_pfn, uint32_t nr)
 {
-DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
-int rc;
-
-arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-if ( arg == NULL )
-{
-PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
-return -1;
-}
+struct xen_dm_op op;
+struct xen_dm_op_set_mem_type *data;
 
-arg->domid= dom;
-arg->hvmmem_type  = mem_type;
-arg->first_pfn= first_pfn;
-arg->nr   = nr;
+memset(, 0, sizeof(op));
 
-rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-  HVMOP_set_mem_type,
-  HYPERCALL_BUFFER_AS_ARG(arg));
+op.op = XEN_DMOP_set_mem_type;
+data = _mem_type;
 
-xc_hypercall_buffer_free(xch, arg);
+data->mem_type = mem_type;
+data->first_pfn = first_pfn;
+data->nr = nr;
 
-return rc;
+return do_dm_op(xch, dom, 1, , sizeof(op));
 }
 
 int xc_hvm_inject_trap(
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 1a7c913..97c1f07 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -198,6 +198,84 @@ static int modified_memory(struct domain *d, xen_pfn_t 
*first_pfn,
 return rc;
 }
 
+static bool allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
+{
+return p2m_is_ram(old) ||
+   (p2m_is_hole(old) && new == p2m_mmio_dm) ||
+   (old == p2m_ioreq_server && new == p2m_ram_rw);
+}
+
+static int set_mem_type(struct domain *d, hvmmem_type_t mem_type,
+xen_pfn_t *first_pfn, unsigned int *nr)
+{
+xen_pfn_t last_pfn = *first_pfn + *nr - 1;
+unsigned int iter;
+int rc;
+
+/* Interface types to internal p2m types */
+static const p2m_type_t memtype[] = {
+[HVMMEM_ram_rw]  = p2m_ram_rw,
+[HVMMEM_ram_ro]  = p2m_ram_ro,
+[HVMMEM_mmio_dm] = p2m_mmio_dm,
+[HVMMEM_unused] = p2m_invalid,
+[HVMMEM_ioreq_server] = p2m_ioreq_server
+};
+
+if ( (*first_pfn > last_pfn) ||
+ (last_pfn > domain_get_maximum_gpfn(d)) )
+return -EINVAL;
+
+if ( mem_type >= ARRAY_SIZE(memtype) ||
+ unlikely(mem_type == HVMMEM_unused) )
+return -EINVAL;
+
+iter = 0;
+rc = 0;
+while ( iter < *nr )
+{
+unsigned long pfn = *first_pfn + iter;
+p2m_type_t t;
+
+get_gfn_unshare(d, pfn, );
+if ( p2m_is_paging(t) )
+{
+put_gfn(d, pfn);
+p2m_mem_paging_populate(d, pfn);
+return -EAGAIN;
+}
+
+if ( p2m_is_shared(t) )
+rc = -EAGAIN;
+else if ( !allow_p2m_type_change(t, memtype[mem_type]) )
+rc = -EINVAL;
+else
+rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
+
+put_gfn(d, pfn);
+
+if ( rc )
+break;
+
+iter++;
+
+