Add a thread "data-plane" structure and callback functions. This is inspired by virtio-blk data-plane.
The following commits will use and start the data-plane. Signed-off-by: Marc-André Lureau <marcandre.lur...@redhat.com> --- hw/display/virtio-gpu-3d.c | 103 +++++++++++++++++++++++++++++++++++++++++ hw/display/virtio-gpu.c | 13 ++++++ include/hw/virtio/virtio-gpu.h | 23 +++++++++ 3 files changed, 139 insertions(+) diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c index e7115bf..6dd44d6 100644 --- a/hw/display/virtio-gpu-3d.c +++ b/hw/display/virtio-gpu-3d.c @@ -16,6 +16,7 @@ #include "qemu/iov.h" #include "trace.h" #include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-gpu.h" #include "qapi/error.h" @@ -632,6 +633,108 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g) } } +static void virtio_gpu_from_thread_read(EventNotifier *n) +{ + VirtIOGPUDataPlane *dp = container_of(n, VirtIOGPUDataPlane, + thread_to_qemu); + VirtIOGPU *g = dp->gpu; + struct virtio_gpu_thread_msg *qmsg; + + event_notifier_test_and_clear(n); + + qemu_mutex_lock(&dp->thread_msg_lock); + qmsg = &dp->thread_msg; + qemu_mutex_unlock(&dp->thread_msg_lock); + + switch (qmsg->id) { + case VIRTIO_GPU_CMD_SET_SCANOUT: + virtio_gpu_do_set_scanout(g, &qmsg->u.ss); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virtio_gpu_do_resource_flush(g, &qmsg->u.fl); + break; + default: + fprintf(stderr, "unknown msg received %d\n", qmsg->id); + } + event_notifier_set(&dp->qemu_to_thread_ack); +} + +static void notify_guest_vq(int i, void *opaque) +{ + VirtIOGPU *g = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(g); + VirtQueue *vq = virtio_get_queue(vdev, i); + + if (virtio_should_notify(vdev, vq)) { + event_notifier_set(virtio_queue_get_guest_notifier(vq)); + } +} + +static void notify_guest_bh(void *opaque) +{ + VirtIOGPUDataPlane *dp = opaque; + VirtIOGPU *g = dp->gpu; + + bitmap_foreach(dp->batch_notify_vqs, 2, notify_guest_vq, g); +} + +static void thread_process_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + + virtio_gpu_process_cmdq(g); +} + +int virtio_gpu_virgl_dp_create(VirtIOGPU *g, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(g); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VirtIOGPUDataPlane *dp; + AioContext *ctx; + + /* Don't try if transport does not support notifiers. */ + if (!k->set_guest_notifiers || !k->ioeventfd_started) { + error_setg(errp, + "device is incompatible with dataplane " + "(transport does not support notifiers)"); + return -1; + } + + dp = g_new0(VirtIOGPUDataPlane, 1); + qemu_mutex_init(&dp->thread_msg_lock); + ctx = iothread_get_aio_context(g->iothread); + dp->thread_process_bh = aio_bh_new(ctx, thread_process_bh, g); + dp->notify_guest_bh = aio_bh_new(ctx, notify_guest_bh, dp); + dp->batch_notify_vqs = bitmap_new(2); + + event_notifier_init(&dp->thread_to_qemu, 0); + event_notifier_init(&dp->qemu_to_thread_ack, 0); + + event_notifier_set_handler(&dp->thread_to_qemu, true, + virtio_gpu_from_thread_read); + dp->gpu = g; + g->dp = dp; + + return 0; +} + +void virtio_gpu_virgl_dp_destroy(VirtIOGPU *g) +{ + VirtIOGPUDataPlane *dp = g->dp; + + if (!dp) { + return; + } + + g_free(dp->batch_notify_vqs); + qemu_bh_delete(dp->notify_guest_bh); + qemu_bh_delete(dp->thread_process_bh); + event_notifier_cleanup(&dp->thread_to_qemu); + event_notifier_cleanup(&dp->qemu_to_thread_ack); + g_free(dp); +} + int virtio_gpu_virgl_init(VirtIOGPU *g) { int ret; diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 5fca1e7..816f37c 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1200,6 +1200,16 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); g->virtio_config.num_capsets = 1; +#if defined(CONFIG_VIRGL) + { + Error *err = NULL; + if (g->iothread && virtio_gpu_virgl_dp_create(g, &err) != 0) { + error_propagate(errp, err); + virtio_cleanup(vdev); + return; + } + } +#endif } else { g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); @@ -1237,6 +1247,9 @@ static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) { VirtIOGPU *g = VIRTIO_GPU(qdev); +#if defined(CONFIG_VIRGL) + virtio_gpu_virgl_dp_destroy(g); +#endif qemu_bh_delete(g->update_cursor_bh); g_free(g->set_cursor_bitmap); g_free(g->define_cursor_bitmap); diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index d8f9b12..82d1e45 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -102,6 +102,26 @@ struct virtio_gpu_thread_msg { } u; }; +typedef struct VirtIOGPU VirtIOGPU; + +typedef struct VirtIOGPUDataPlane { + VirtIOGPU *gpu; + bool starting; + bool started; + bool disabled; + bool stopping; + QEMUBH *notify_guest_bh; + unsigned long *batch_notify_vqs; + QEMUBH *thread_process_bh; + + EventNotifier thread_to_qemu; + EventNotifier qemu_to_thread_ack; + struct virtio_gpu_thread_msg thread_msg; + QemuMutex thread_msg_lock; +} VirtIOGPUDataPlane; + +#define VIRTIO_GPU_DATA_PLANE_OK(dp) ((dp) && (dp)->started && !(dp)->disabled) + typedef struct VirtIOGPU { VirtIODevice parent_obj; @@ -137,6 +157,7 @@ typedef struct VirtIOGPU { IOThread *iothread; QEMUGLContext thread_ctx; + VirtIOGPUDataPlane *dp; QEMUTimer *fence_poll; QEMUTimer *print_stats; @@ -194,5 +215,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); void virtio_gpu_virgl_reset(VirtIOGPU *g); int virtio_gpu_virgl_init(VirtIOGPU *g); +int virtio_gpu_virgl_dp_create(VirtIOGPU *g, Error **errp); +void virtio_gpu_virgl_dp_destroy(VirtIOGPU *g); #endif -- 2.9.0