Re: [Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around
On 21/04/17 15:44, Jennifer Herbert wrote: Hi Julien, Hello Jenny, This is extending an existing feature. Once 4.9 is released, the existing feature will be frozen, and the only way to later get the extra functionality would be to created a completely new dm_op, which does something very similar to an existing one. Although not the end of the world, this wouldnt look so nice. The benefits of the feature are that a VM can request multiple extents to be marked as modified at once, without having to loop though them, calling the existing call many many times. This will be more efficient and faster. As an extra, additional accessors have been created for dm_op operations, which new dm_ops can take advantage of. The benefits of introducing the feature for 4.9 as opposed to later is that we wont' have to support the same feature, with multiple dm_opts with varying parameters - which as well as looking less good, also unnesseserily bloats the code. I think risks are low, with a minor, affecting dm_op operations only. The core change, in 5/5 will only affect the modified memory call, which has been tested. The remaining patches are to tidy up and fix existing behaviour. It would have been useful to have a cover letter explaining that. Anyway, I think I agree it would be better to get the DM OP ABI in shape for continuability before it gets stable. Although, it would be nice if we can get that done in early RCs. Release-acked-by: Julien Grall Cheers, -- Julien Grall ___ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel
Re: [Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around
Hi Julien, This is extending an existing feature. Once 4.9 is released, the existing feature will be frozen, and the only way to later get the extra functionality would be to created a completely new dm_op, which does something very similar to an existing one. Although not the end of the world, this wouldnt look so nice. The benefits of the feature are that a VM can request multiple extents to be marked as modified at once, without having to loop though them, calling the existing call many many times. This will be more efficient and faster. As an extra, additional accessors have been created for dm_op operations, which new dm_ops can take advantage of. The benefits of introducing the feature for 4.9 as opposed to later is that we wont' have to support the same feature, with multiple dm_opts with varying parameters - which as well as looking less good, also unnesseserily bloats the code. I think risks are low, with a minor, affecting dm_op operations only. The core change, in 5/5 will only affect the modified memory call, which has been tested. The remaining patches are to tidy up and fix existing behaviour. -jenny On 21/04/17 15:17, Julien Grall wrote: Hi Jennifer, I don't see any cover letter for this series, so I will answer here. Looking at the code, it looks like a new feature rather than a bug fix. Am I right? Could you explain what would be the benefits and risks to get this code in the release? I also like to hear the opinion of the x86 maintainers about getting this code in Xen 4.9. Cheers, On 21/04/17 15:05, jennifer.herb...@citrix.com wrote: From: Jennifer Herbert No functional change. Signed-off-by: Jennifer Herbert Signed-off-by: Andrew Cooper Reviewed-by: Jan Beulich Reviewed-by: Paul Durrant -- CC: Paul Durrant CC: Andrew Cooper CC: Jan Beulich CC: Julien Grall --- No change. --- xen/arch/x86/hvm/dm.c | 49 + 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index d72b7bd..e583e41 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -25,6 +25,13 @@ #include +struct dmop_args { +domid_t domid; +unsigned int nr_bufs; +/* Reserve enough buf elements for all current hypercalls. */ +struct xen_dm_op_buf buf[2]; +}; + static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[], unsigned int nr_bufs, void *dst, unsigned int idx, size_t dst_size) @@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[], } static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, -unsigned int nr, struct xen_dm_op_buf *buf) +unsigned int nr, const struct xen_dm_op_buf *buf) { if ( nr > (GB(1) >> PAGE_SHIFT) ) return -EINVAL; @@ -287,16 +294,14 @@ static int inject_event(struct domain *d, return 0; } -static int dm_op(domid_t domid, - unsigned int nr_bufs, - xen_dm_op_buf_t bufs[]) +static int dm_op(const struct dmop_args *op_args) { struct domain *d; struct xen_dm_op op; bool const_op = true; long rc; -rc = rcu_lock_remote_domain_by_id(domid, &d); +rc = rcu_lock_remote_domain_by_id(op_args->domid, &d); if ( rc ) return rc; @@ -307,7 +312,7 @@ static int dm_op(domid_t domid, if ( rc ) goto out; -if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) ) +if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, sizeof(op)) ) { rc = -EFAULT; goto out; @@ -466,10 +471,10 @@ static int dm_op(domid_t domid, if ( data->pad ) break; -if ( nr_bufs < 2 ) +if ( op_args->nr_bufs < 2 ) break; -rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]); +rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]); break; } @@ -564,7 +569,7 @@ static int dm_op(domid_t domid, if ( (!rc || rc == -ERESTART) && !const_op && - !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) ) + !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, sizeof(op)) ) rc = -EFAULT; out: @@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type; CHECK_dm_op_inject_event; CHECK_dm_op_inject_msi; -#define MAX_NR_BUFS 2 - int compat_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(void) bufs) { -struct xen_dm_op_buf nat[MAX_NR_BUFS]; +struct dmop_args args; unsigned int i; int rc; -if ( nr_bufs > MAX_NR_BUFS ) +if ( nr_bufs > ARRAY_SIZE(args.buf) ) return -E2BIG; -for ( i = 0; i < nr_bufs; i++ ) +args.domid = domid; +args.nr_bufs = nr_bufs; + +for ( i = 0; i < args.nr_bufs; i++ ) { str
Re: [Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around
On 21/04/17 15:17, Julien Grall wrote: > Hi Jennifer, > > I don't see any cover letter for this series, so I will answer here. > > Looking at the code, it looks like a new feature rather than a bug > fix. Am I right? > > Could you explain what would be the benefits and risks to get this > code in the release? > > I also like to hear the opinion of the x86 maintainers about getting > this code in Xen 4.9. Patch 1 is a bug in the existing implementation, which absolutely needs fixing. Patch 4 it is a correction to the DM OP ABI (which is still modifiable, before becoming properly stable when 4.9 releases). If that were pospond to 4.10, We'd have to burn the existing modified_memory subop and introduce a new corrected one. The intermediate patches are fallout from previous rounds of review. One item on my TODO list is to re-review all dmops for proper continuability before 4.9 release, so I make no promises that there wont be further bugfixes needing to get into 4.9. ~Andrew ___ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel
Re: [Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around
Hi Jennifer, I don't see any cover letter for this series, so I will answer here. Looking at the code, it looks like a new feature rather than a bug fix. Am I right? Could you explain what would be the benefits and risks to get this code in the release? I also like to hear the opinion of the x86 maintainers about getting this code in Xen 4.9. Cheers, On 21/04/17 15:05, jennifer.herb...@citrix.com wrote: From: Jennifer Herbert No functional change. Signed-off-by: Jennifer Herbert Signed-off-by: Andrew Cooper Reviewed-by: Jan Beulich Reviewed-by: Paul Durrant -- CC: Paul Durrant CC: Andrew Cooper CC: Jan Beulich CC: Julien Grall --- No change. --- xen/arch/x86/hvm/dm.c | 49 + 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index d72b7bd..e583e41 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -25,6 +25,13 @@ #include +struct dmop_args { +domid_t domid; +unsigned int nr_bufs; +/* Reserve enough buf elements for all current hypercalls. */ +struct xen_dm_op_buf buf[2]; +}; + static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[], unsigned int nr_bufs, void *dst, unsigned int idx, size_t dst_size) @@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[], } static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, -unsigned int nr, struct xen_dm_op_buf *buf) +unsigned int nr, const struct xen_dm_op_buf *buf) { if ( nr > (GB(1) >> PAGE_SHIFT) ) return -EINVAL; @@ -287,16 +294,14 @@ static int inject_event(struct domain *d, return 0; } -static int dm_op(domid_t domid, - unsigned int nr_bufs, - xen_dm_op_buf_t bufs[]) +static int dm_op(const struct dmop_args *op_args) { struct domain *d; struct xen_dm_op op; bool const_op = true; long rc; -rc = rcu_lock_remote_domain_by_id(domid, &d); +rc = rcu_lock_remote_domain_by_id(op_args->domid, &d); if ( rc ) return rc; @@ -307,7 +312,7 @@ static int dm_op(domid_t domid, if ( rc ) goto out; -if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) ) +if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, sizeof(op)) ) { rc = -EFAULT; goto out; @@ -466,10 +471,10 @@ static int dm_op(domid_t domid, if ( data->pad ) break; -if ( nr_bufs < 2 ) +if ( op_args->nr_bufs < 2 ) break; -rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]); +rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]); break; } @@ -564,7 +569,7 @@ static int dm_op(domid_t domid, if ( (!rc || rc == -ERESTART) && !const_op && - !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) ) + !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, sizeof(op)) ) rc = -EFAULT; out: @@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type; CHECK_dm_op_inject_event; CHECK_dm_op_inject_msi; -#define MAX_NR_BUFS 2 - int compat_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(void) bufs) { -struct xen_dm_op_buf nat[MAX_NR_BUFS]; +struct dmop_args args; unsigned int i; int rc; -if ( nr_bufs > MAX_NR_BUFS ) +if ( nr_bufs > ARRAY_SIZE(args.buf) ) return -E2BIG; -for ( i = 0; i < nr_bufs; i++ ) +args.domid = domid; +args.nr_bufs = nr_bufs; + +for ( i = 0; i < args.nr_bufs; i++ ) { struct compat_dm_op_buf cmp; @@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid, #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \ guest_from_compat_handle((_d_)->h, (_s_)->h) -XLAT_dm_op_buf(&nat[i], &cmp); +XLAT_dm_op_buf(&args.buf[i], &cmp); #undef XLAT_dm_op_buf_HNDL_h } -rc = dm_op(domid, nr_bufs, nat); +rc = dm_op(&args); if ( rc == -ERESTART ) rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", @@ -628,16 +634,19 @@ long do_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs) { -struct xen_dm_op_buf nat[MAX_NR_BUFS]; +struct dmop_args args; int rc; -if ( nr_bufs > MAX_NR_BUFS ) +if ( nr_bufs > ARRAY_SIZE(args.buf) ) return -E2BIG; -if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) ) +args.domid = domid; +args.nr_bufs = nr_bufs; + +if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) ) return -EFAULT; -rc = dm_op(domid, nr_bufs, nat); +rc = dm_op(&args); if ( rc == -ERESTART ) rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", -- Julien Grall _
[Xen-devel] [PATCH v8 for-4.9 1/5] hvm/dmop: Box dmop_args rather than passing multiple parameters around
From: Jennifer Herbert No functional change. Signed-off-by: Jennifer Herbert Signed-off-by: Andrew Cooper Reviewed-by: Jan Beulich Reviewed-by: Paul Durrant -- CC: Paul Durrant CC: Andrew Cooper CC: Jan Beulich CC: Julien Grall --- No change. --- xen/arch/x86/hvm/dm.c | 49 + 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index d72b7bd..e583e41 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -25,6 +25,13 @@ #include +struct dmop_args { +domid_t domid; +unsigned int nr_bufs; +/* Reserve enough buf elements for all current hypercalls. */ +struct xen_dm_op_buf buf[2]; +}; + static bool copy_buf_from_guest(const xen_dm_op_buf_t bufs[], unsigned int nr_bufs, void *dst, unsigned int idx, size_t dst_size) @@ -56,7 +63,7 @@ static bool copy_buf_to_guest(const xen_dm_op_buf_t bufs[], } static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, -unsigned int nr, struct xen_dm_op_buf *buf) +unsigned int nr, const struct xen_dm_op_buf *buf) { if ( nr > (GB(1) >> PAGE_SHIFT) ) return -EINVAL; @@ -287,16 +294,14 @@ static int inject_event(struct domain *d, return 0; } -static int dm_op(domid_t domid, - unsigned int nr_bufs, - xen_dm_op_buf_t bufs[]) +static int dm_op(const struct dmop_args *op_args) { struct domain *d; struct xen_dm_op op; bool const_op = true; long rc; -rc = rcu_lock_remote_domain_by_id(domid, &d); +rc = rcu_lock_remote_domain_by_id(op_args->domid, &d); if ( rc ) return rc; @@ -307,7 +312,7 @@ static int dm_op(domid_t domid, if ( rc ) goto out; -if ( !copy_buf_from_guest(bufs, nr_bufs, &op, 0, sizeof(op)) ) +if ( !copy_buf_from_guest(&op_args->buf[0], op_args->nr_bufs, &op, 0, sizeof(op)) ) { rc = -EFAULT; goto out; @@ -466,10 +471,10 @@ static int dm_op(domid_t domid, if ( data->pad ) break; -if ( nr_bufs < 2 ) +if ( op_args->nr_bufs < 2 ) break; -rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]); +rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]); break; } @@ -564,7 +569,7 @@ static int dm_op(domid_t domid, if ( (!rc || rc == -ERESTART) && !const_op && - !copy_buf_to_guest(bufs, nr_bufs, 0, &op, sizeof(op)) ) + !copy_buf_to_guest(&op_args->buf[0], op_args->nr_bufs, 0, &op, sizeof(op)) ) rc = -EFAULT; out: @@ -587,20 +592,21 @@ CHECK_dm_op_set_mem_type; CHECK_dm_op_inject_event; CHECK_dm_op_inject_msi; -#define MAX_NR_BUFS 2 - int compat_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(void) bufs) { -struct xen_dm_op_buf nat[MAX_NR_BUFS]; +struct dmop_args args; unsigned int i; int rc; -if ( nr_bufs > MAX_NR_BUFS ) +if ( nr_bufs > ARRAY_SIZE(args.buf) ) return -E2BIG; -for ( i = 0; i < nr_bufs; i++ ) +args.domid = domid; +args.nr_bufs = nr_bufs; + +for ( i = 0; i < args.nr_bufs; i++ ) { struct compat_dm_op_buf cmp; @@ -610,12 +616,12 @@ int compat_dm_op(domid_t domid, #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \ guest_from_compat_handle((_d_)->h, (_s_)->h) -XLAT_dm_op_buf(&nat[i], &cmp); +XLAT_dm_op_buf(&args.buf[i], &cmp); #undef XLAT_dm_op_buf_HNDL_h } -rc = dm_op(domid, nr_bufs, nat); +rc = dm_op(&args); if ( rc == -ERESTART ) rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", @@ -628,16 +634,19 @@ long do_dm_op(domid_t domid, unsigned int nr_bufs, XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs) { -struct xen_dm_op_buf nat[MAX_NR_BUFS]; +struct dmop_args args; int rc; -if ( nr_bufs > MAX_NR_BUFS ) +if ( nr_bufs > ARRAY_SIZE(args.buf) ) return -E2BIG; -if ( copy_from_guest_offset(nat, bufs, 0, nr_bufs) ) +args.domid = domid; +args.nr_bufs = nr_bufs; + +if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) ) return -EFAULT; -rc = dm_op(domid, nr_bufs, nat); +rc = dm_op(&args); if ( rc == -ERESTART ) rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", -- 2.1.4 ___ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel