Re: [Xen-devel] [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...

2015-07-13 Thread Paul Durrant
> -Original Message-
> From: Jan Beulich [mailto:jbeul...@suse.com]
> Sent: 13 July 2015 09:44
> To: Paul Durrant
> Cc: Andrew Cooper; xen-devel@lists.xen.org; Keir (Xen.org)
> Subject: Re: [PATCH v2] x86/hvm: add support for broadcast of buffered
> ioreqs...
> 
> >>> On 10.07.15 at 18:07,  wrote:
> > @@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct
> hvm_ioreq_server *s, ioreq_t *proto_p)
> >  return X86EMUL_UNHANDLEABLE;
> >  }
> >
> > -void hvm_broadcast_assist_req(ioreq_t *p)
> > +int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
> >  {
> >  struct domain *d = current->domain;
> >  struct hvm_ioreq_server *s;
> > +unsigned int failed = 0;
> >
> >  ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
> >
> >  list_for_each_entry ( s,
> >&d->arch.hvm_domain.ioreq_server.list,
> >list_entry )
> > -(void) hvm_send_assist_req(s, p);
> > +if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
> > +failed++;
> > +
> > +return failed;
> 
> I'll try to remember fixing up the mismatch between function return
> type and return expression upon commit. Looks good beyond that.
> 

Ok. Thanks,

  Paul

> Thanks, Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...

2015-07-13 Thread Jan Beulich
>>> On 10.07.15 at 18:07,  wrote:
> @@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, 
> ioreq_t *proto_p)
>  return X86EMUL_UNHANDLEABLE;
>  }
>  
> -void hvm_broadcast_assist_req(ioreq_t *p)
> +int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
>  {
>  struct domain *d = current->domain;
>  struct hvm_ioreq_server *s;
> +unsigned int failed = 0;
>  
>  ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
>  
>  list_for_each_entry ( s,
>&d->arch.hvm_domain.ioreq_server.list,
>list_entry )
> -(void) hvm_send_assist_req(s, p);
> +if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
> +failed++;
> +
> +return failed;

I'll try to remember fixing up the mismatch between function return
type and return expression upon commit. Looks good beyond that.

Thanks, Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH v2] x86/hvm: add support for broadcast of buffered ioreqs...

2015-07-10 Thread Paul Durrant
...and make RTC timeoffset ioreqs use it.

Without this patch RTC timeoffset updates go nowhere and Xen complains
with a (non-rate-limited) printk.

Signed-off-by: Paul Durrant 
Cc: Keir Fraser 
Cc: Jan Beulich 
Cc: Andrew Cooper 
---

v2:
- Add (g)printk back in as requested by Jan. It will be emitted if
  one or more time-offset ioreqs fails.
- Also added code to flag up a map-cache invalidation ioreq failure
  in a similar fashion.
---
 xen/arch/x86/hvm/emulate.c|2 +-
 xen/arch/x86/hvm/hvm.c|   29 +
 xen/arch/x86/hvm/io.c |7 ---
 xen/arch/x86/hvm/stdvga.c |8 +---
 xen/include/asm-x86/hvm/hvm.h |4 ++--
 xen/include/asm-x86/hvm/io.h  |1 -
 6 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 01ee972..795321c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -161,7 +161,7 @@ static int hvmemul_do_io(
 }
 else
 {
-rc = hvm_send_assist_req(s, &p);
+rc = hvm_send_ioreq(s, &p, 0);
 if ( rc != X86EMUL_RETRY || curr->domain->is_shutting_down )
 vio->io_req.state = STATE_IOREQ_NONE;
 else if ( data_is_addr )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ebcf7a9..36b7408 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2561,10 +2561,9 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
 return d->arch.hvm_domain.default_ioreq_server;
 }
 
-int hvm_buffered_io_send(ioreq_t *p)
+static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
 {
 struct domain *d = current->domain;
-struct hvm_ioreq_server *s = hvm_select_ioreq_server(d, p);
 struct hvm_ioreq_page *iorp;
 buffered_iopage_t *pg;
 buf_ioreq_t bp = { .data = p->data,
@@ -2577,14 +2576,11 @@ int hvm_buffered_io_send(ioreq_t *p)
 /* Ensure buffered_iopage fits in a page */
 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
-if ( !s )
-return 0;
-
 iorp = &s->bufioreq;
 pg = iorp->va;
 
 if ( !pg )
-return 0;
+return X86EMUL_UNHANDLEABLE;
 
 /*
  * Return 0 for the cases we can't deal with:
@@ -2614,7 +2610,7 @@ int hvm_buffered_io_send(ioreq_t *p)
 break;
 default:
 gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
-return 0;
+return X86EMUL_UNHANDLEABLE;
 }
 
 spin_lock(&s->bufioreq_lock);
@@ -2624,7 +2620,7 @@ int hvm_buffered_io_send(ioreq_t *p)
 {
 /* The queue is full: send the iopacket through the normal path. */
 spin_unlock(&s->bufioreq_lock);
-return 0;
+return X86EMUL_UNHANDLEABLE;
 }
 
 pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -2654,16 +2650,21 @@ int hvm_buffered_io_send(ioreq_t *p)
 notify_via_xen_event_channel(d, s->bufioreq_evtchn);
 spin_unlock(&s->bufioreq_lock);
 
-return 1;
+return X86EMUL_OKAY;
 }
 
-int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
+int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
+   bool_t buffered)
 {
 struct vcpu *curr = current;
 struct domain *d = curr->domain;
 struct hvm_ioreq_vcpu *sv;
 
 ASSERT(s);
+
+if ( buffered )
+return hvm_send_buffered_ioreq(s, proto_p);
+
 if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
 return X86EMUL_RETRY;
 
@@ -2710,17 +2711,21 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, 
ioreq_t *proto_p)
 return X86EMUL_UNHANDLEABLE;
 }
 
-void hvm_broadcast_assist_req(ioreq_t *p)
+int hvm_broadcast_ioreq(ioreq_t *p, bool_t buffered)
 {
 struct domain *d = current->domain;
 struct hvm_ioreq_server *s;
+unsigned int failed = 0;
 
 ASSERT(p->type == IOREQ_TYPE_INVALIDATE);
 
 list_for_each_entry ( s,
   &d->arch.hvm_domain.ioreq_server.list,
   list_entry )
-(void) hvm_send_assist_req(s, p);
+if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+failed++;
+
+return failed;
 }
 
 void hvm_hlt(unsigned long rflags)
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 3b51d59..9dc6c71 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -60,8 +60,8 @@ void send_timeoffset_req(unsigned long timeoff)
 if ( timeoff == 0 )
 return;
 
-if ( !hvm_buffered_io_send(&p) )
-printk("Unsuccessful timeoffset update\n");
+if ( hvm_broadcast_ioreq(&p, 1) != 0 )
+gprintk(XENLOG_ERR, "Unsuccessful timeoffset update\n");
 }
 
 /* Ask ioemu mapcache to invalidate mappings. */
@@ -74,7 +74,8 @@ void send_invalidate_req(void)
 .data = ~0UL, /* flush all */
 };
 
-hvm_broadcast_assist_req(&p);
+if ( hvm_broadcast_ioreq(&p, 0) != 0 )
+gprintk(XENLOG_ERR,