From: David Woodhouse <d...@amazon.co.uk> Extract requests, return ENOSYS to all of them. This is enough to allow older Linux guests to boot, as they need *something* back but it doesn't matter much what.
In the first instance we're likely to wire this up over a UNIX socket to an actual xenstored implementation, but in the fullness of time it would be nice to have a fully single-tenant "virtual" xenstore within qemu. Signed-off-by: David Woodhouse <d...@amazon.co.uk> --- hw/i386/kvm/xen_xenstore.c | 227 ++++++++++++++++++++++++++++++++++++- 1 file changed, 224 insertions(+), 3 deletions(-) diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 63530059fa..219291866f 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -188,18 +188,239 @@ uint16_t xen_xenstore_get_port(void) return s->guest_port; } +static bool req_pending(XenXenstoreState *s) +{ + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + + return s->req_offset == XENSTORE_HEADER_SIZE + req->len; +} + +static void reset_req(XenXenstoreState *s) +{ + memset(s->req_data, 0, sizeof(s->req_data)); + s->req_offset = 0; +} + +static void reset_rsp(XenXenstoreState *s) +{ + s->rsp_pending = false; + + memset(s->rsp_data, 0, sizeof(s->rsp_data)); + s->rsp_offset = 0; +} + +static void process_req(XenXenstoreState *s) +{ + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + const char enosys[] = "ENOSYS"; + + assert(req_pending(s)); + assert(!s->rsp_pending); + + qemu_hexdump(stdout, "Xenstore req:", req, sizeof(req) + req->len); + + rsp->type = XS_ERROR; + rsp->req_id = req->req_id; + rsp->tx_id = req->tx_id; + rsp->len = sizeof(enosys); + memcpy((void *)&rsp[1], enosys, sizeof(enosys)); + + qemu_hexdump(stdout, "XenStore rsp:", rsp, sizeof(rsp) + rsp->len); + + s->rsp_pending = true; + reset_req(s); +} + +static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len) +{ + if (!len) + return 0; + + XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod); + XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons); + unsigned int copied = 0; + + smp_mb(); + + while (len) { + unsigned int avail = prod - cons; + unsigned int offset = MASK_XENSTORE_IDX(cons); + unsigned int copylen = avail; + + if (avail > XENSTORE_RING_SIZE) { + error_report("XenStore ring handling error"); + s->fatal_error = true; + break; + } else if (avail == 0) + break; + + if (copylen > len) { + copylen = len; + } + if (copylen > XENSTORE_RING_SIZE - offset) { + copylen = XENSTORE_RING_SIZE - offset; + } + + memcpy(ptr, &s->xs->req[offset], copylen); + copied += copylen; + + ptr += copylen; + len -= copylen; + + cons += copylen; + } + + smp_mb(); + + qatomic_set(&s->xs->req_cons, cons); + + return copied; +} + +static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len) +{ + if (!len) + return 0; + + XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons); + XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod); + unsigned int copied = 0; + + smp_mb(); + + while (len) { + unsigned int avail = cons + XENSTORE_RING_SIZE - prod; + unsigned int offset = MASK_XENSTORE_IDX(prod); + unsigned int copylen = len; + + if (avail > XENSTORE_RING_SIZE) { + error_report("XenStore ring handling error"); + s->fatal_error = true; + break; + } else if (avail == 0) + break; + + if (copylen > avail) { + copylen = avail; + } + if (copylen > XENSTORE_RING_SIZE - offset) { + copylen = XENSTORE_RING_SIZE - offset; + } + + + memcpy(&s->xs->rsp[offset], ptr, copylen); + copied += copylen; + + ptr += copylen; + len -= copylen; + + prod += copylen; + } + + smp_mb(); + + qatomic_set(&s->xs->rsp_prod, prod); + + return copied; +} + +static unsigned int get_req(XenXenstoreState *s) +{ + unsigned int copied = 0; + + if (s->fatal_error) + return 0; + + assert(!req_pending(s)); + + if (s->req_offset < XENSTORE_HEADER_SIZE) { + void *ptr = s->req_data + s->req_offset; + unsigned int len = XENSTORE_HEADER_SIZE; + unsigned int copylen = copy_from_ring(s, ptr, len); + + copied += copylen; + s->req_offset += copylen; + } + + if (s->req_offset >= XENSTORE_HEADER_SIZE) { + struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data; + + if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) { + error_report("Illegal XenStore request"); + s->fatal_error = true; + return 0; + } + + void *ptr = s->req_data + s->req_offset; + unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset; + unsigned int copylen = copy_from_ring(s, ptr, len); + + copied += copylen; + s->req_offset += copylen; + } + + return copied; +} + +static unsigned int put_rsp(XenXenstoreState *s) +{ + if (s->fatal_error) + return 0; + + assert(s->rsp_pending); + + struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data; + assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len); + + void *ptr = s->rsp_data + s->rsp_offset; + unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset; + unsigned int copylen = copy_to_ring(s, ptr, len); + + s->rsp_offset += copylen; + + /* Have we produced a complete response? */ + if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len) + reset_rsp(s); + + return copylen; +} + static void xen_xenstore_event(void *opaque) { XenXenstoreState *s = opaque; evtchn_port_t port = xen_be_evtchn_pending(s->eh); + unsigned int copied_to, copied_from; + bool processed, notify = false; + if (port != s->be_port) { return; } - printf("xenstore event\n"); + /* We know this is a no-op. */ xen_be_evtchn_unmask(s->eh, port); - qemu_hexdump(stdout, "", s->xs, sizeof(*s->xs)); - xen_be_evtchn_notify(s->eh, s->be_port); + + do { + copied_to = copied_from = 0; + processed = false; + + if (s->rsp_pending) + copied_to = put_rsp(s); + + if (!req_pending(s)) + copied_from = get_req(s); + + if (req_pending(s) && !s->rsp_pending) { + process_req(s); + processed = true; + } + + notify |= copied_to || copied_from; + } while (copied_to || copied_from || processed); + + if (notify) { + xen_be_evtchn_notify(s->eh, s->be_port); + } } static void alloc_guest_port(XenXenstoreState *s) -- 2.35.3