Properly destroy virgl resources on virtio-gpu reset to not leak resources on a hot reboot of a VM.
Suggested-by: Akihiko Odaki <[email protected]> Signed-off-by: Dmitry Osipenko <[email protected]> --- hw/display/virtio-gpu-gl.c | 18 +---- hw/display/virtio-gpu-virgl.c | 117 ++++++++++++++++++++++++++------- include/hw/virtio/virtio-gpu.h | 6 +- 3 files changed, 101 insertions(+), 40 deletions(-) diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index b941e9a4b789..8b71dd6fc26f 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -63,29 +63,14 @@ static void virtio_gpu_gl_flushed(VirtIOGPUBase *b) static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOGPU *g = VIRTIO_GPU(vdev); - VirtIOGPUGL *gl = VIRTIO_GPU_GL(vdev); struct virtio_gpu_ctrl_command *cmd; if (!virtio_queue_ready(vq)) { return; } - switch (gl->renderer_state) { - case RS_RESET: - virtio_gpu_virgl_reset(g); - /* fallthrough */ - case RS_START: - if (virtio_gpu_virgl_init(g)) { - gl->renderer_state = RS_INIT_FAILED; - return; - } - - gl->renderer_state = RS_INITED; - break; - case RS_INIT_FAILED: + if (!virtio_gpu_virgl_update_render_state(g)) { return; - case RS_INITED: - break; } cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); @@ -201,6 +186,7 @@ static void virtio_gpu_gl_class_init(ObjectClass *klass, const void *data) vgc->process_cmd = virtio_gpu_virgl_process_cmd; vgc->update_cursor_data = virtio_gpu_gl_update_cursor_data; + vgc->resource_destroy = virtio_gpu_virgl_resource_destroy; vdc->realize = virtio_gpu_gl_device_realize; vdc->unrealize = virtio_gpu_gl_device_unrealize; vdc->reset = virtio_gpu_gl_reset; diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 342e93728df0..15a98336969b 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -90,6 +90,10 @@ static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque) { VirtIOGPU *g = opaque; + if (!virtio_gpu_virgl_update_render_state(g)) { + return; + } + virtio_gpu_process_cmdq(g); } @@ -322,14 +326,46 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, virgl_renderer_resource_create(&args, NULL, 0); } +static int +virtio_gpu_virgl_resource_unref(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + bool *suspended) +{ + struct iovec *res_iovs = NULL; + int num_iovs = 0; +#if VIRGL_VERSION_MAJOR >= 1 + int ret; + + ret = virtio_gpu_virgl_unmap_resource_blob(g, res, suspended); + if (ret) { + return ret; + } + if (*suspended) { + return 0; + } +#endif + + virgl_renderer_resource_detach_iov(res->base.resource_id, + &res_iovs, + &num_iovs); + if (res_iovs != NULL && num_iovs != 0) { + virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); + } + virgl_renderer_resource_unref(res->base.resource_id); + + QTAILQ_REMOVE(&g->reslist, &res->base, next); + + g_free(res); + + return 0; +} + static void virgl_cmd_resource_unref(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd, bool *cmd_suspended) { struct virtio_gpu_resource_unref unref; struct virtio_gpu_virgl_resource *res; - struct iovec *res_iovs = NULL; - int num_iovs = 0; VIRTIO_GPU_FILL_CMD(unref); trace_virtio_gpu_cmd_res_unref(unref.resource_id); @@ -342,27 +378,21 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, return; } -#if VIRGL_VERSION_MAJOR >= 1 - if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) { - cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; - return; - } - if (*cmd_suspended) { - return; - } -#endif + virtio_gpu_virgl_resource_unref(g, res, cmd_suspended); +} - virgl_renderer_resource_detach_iov(unref.resource_id, - &res_iovs, - &num_iovs); - if (res_iovs != NULL && num_iovs != 0) { - virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); - } - virgl_renderer_resource_unref(unref.resource_id); +void virtio_gpu_virgl_resource_destroy(VirtIOGPU *g, + struct virtio_gpu_simple_resource *base, + Error **errp) +{ + struct virtio_gpu_virgl_resource *res; + bool suspended = false; - QTAILQ_REMOVE(&g->reslist, &res->base, next); + res = container_of(base, struct virtio_gpu_virgl_resource, base); - g_free(res); + if (virtio_gpu_virgl_resource_unref(g, res, &suspended)) { + error_setg(errp, "failed to destroy virgl resource"); + } } static void virgl_cmd_context_create(VirtIOGPU *g, @@ -1291,14 +1321,30 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g) } } -void virtio_gpu_virgl_reset(VirtIOGPU *g) +static bool virtio_gpu_virgl_reset(VirtIOGPU *g) { + struct virtio_gpu_simple_resource *res, *tmp; + + /* + * Virgl blob resource unmapping can be suspended and + * deferred on unref, ensure that destruction is completed. + */ + QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { + virtio_gpu_virgl_resource_destroy(g, res, NULL); + } + + if (!QTAILQ_EMPTY(&g->reslist)) { + return false; + } + virgl_renderer_reset(); virtio_gpu_virgl_reset_async_fences(g); + + return true; } -int virtio_gpu_virgl_init(VirtIOGPU *g) +static int virtio_gpu_virgl_init(VirtIOGPU *g) { int ret; uint32_t flags = 0; @@ -1376,6 +1422,33 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) return 0; } +bool virtio_gpu_virgl_update_render_state(VirtIOGPU *g) +{ + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); + + switch (gl->renderer_state) { + case RS_RESET: + if (!virtio_gpu_virgl_reset(g)) { + return false; + } + /* fallthrough */ + case RS_START: + if (virtio_gpu_virgl_init(g)) { + gl->renderer_state = RS_INIT_FAILED; + return false; + } + + gl->renderer_state = RS_INITED; + break; + case RS_INIT_FAILED: + return false; + case RS_INITED: + break; + } + + return true; +} + static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id) { g_array_append_val(capset_ids, capset_id); diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 718332284305..1f509d0d5beb 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -389,9 +389,11 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g); -void virtio_gpu_virgl_reset(VirtIOGPU *g); -int virtio_gpu_virgl_init(VirtIOGPU *g); GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g); void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g); +void virtio_gpu_virgl_resource_destroy(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res, + Error **errp); +bool virtio_gpu_virgl_update_render_state(VirtIOGPU *g); #endif -- 2.52.0
