From: Robert Beckett <bob.beck...@collabora.com> Support BLOB resources creation, mapping, unmapping and set-scanout by calling the new stable virglrenderer 0.10 interface. Only enabled when available and via the blob config. E.g. -device virtio-vga-gl,blob=true
Signed-off-by: Antonio Caggiano <antonio.caggi...@collabora.com> Signed-off-by: Robert Beckett <bob.beck...@collabora.com> # added set_scanout_blob Signed-off-by: Xenia Ragiadakou <xenia.ragiada...@amd.com> Signed-off-by: Huang Rui <ray.hu...@amd.com> Signed-off-by: Dmitry Osipenko <dmitry.osipe...@collabora.com> --- hw/display/virtio-gpu-gl.c | 3 + hw/display/virtio-gpu-virgl.c | 414 ++++++++++++++++++++++++++++++++- hw/display/virtio-gpu.c | 18 +- include/hw/virtio/virtio-gpu.h | 9 + 4 files changed, 434 insertions(+), 10 deletions(-) diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index 4fe9e6a0c21c..5f27568d3ec8 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -160,6 +160,9 @@ static void virtio_gpu_gl_device_unrealize(DeviceState *qdev) VirtIOGPUGL *gl = VIRTIO_GPU_GL(qdev); if (gl->renderer_state >= RS_INITED) { +#if VIRGL_VERSION_MAJOR >= 1 + qemu_bh_delete(gl->cmdq_resume_bh); +#endif if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { timer_free(gl->print_stats); } diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 3ffea478e723..b2f4e215a7ad 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -17,6 +17,8 @@ #include "trace.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" #include "ui/egl-helpers.h" @@ -24,6 +26,7 @@ struct virtio_gpu_virgl_resource { struct virtio_gpu_simple_resource base; + MemoryRegion *mr; }; static struct virtio_gpu_virgl_resource * @@ -47,6 +50,145 @@ virgl_get_egl_display(G_GNUC_UNUSED void *cookie) } #endif +#if VIRGL_VERSION_MAJOR >= 1 +struct virtio_gpu_virgl_hostmem_region { + MemoryRegion mr; + struct VirtIOGPU *g; + bool finish_unmapping; +}; + +static struct virtio_gpu_virgl_hostmem_region * +to_hostmem_region(MemoryRegion *mr) +{ + return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr); +} + +static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_virgl_hostmem_region_free(void *obj) +{ + MemoryRegion *mr = MEMORY_REGION(obj); + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b; + VirtIOGPUGL *gl; + + vmr = to_hostmem_region(mr); + vmr->finish_unmapping = true; + + b = VIRTIO_GPU_BASE(vmr->g); + b->renderer_blocked--; + + /* + * memory_region_unref() is executed from RCU thread context, while + * virglrenderer works only on the main-loop thread that's holding GL + * context. + */ + gl = VIRTIO_GPU_GL(vmr->g); + qemu_bh_schedule(gl->cmdq_resume_bh); +} + +static int +virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + uint64_t offset) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr; + uint64_t size; + void *data; + int ret; + + if (!virtio_gpu_hostmem_enabled(b->conf)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__); + return -EOPNOTSUPP; + } + + ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + + vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1); + vmr->g = g; + + mr = &vmr->mr; + memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data); + memory_region_add_subregion(&b->hostmem, offset, mr); + memory_region_set_enabled(mr, true); + + /* + * MR could outlive the resource if MR's reference is held outside of + * virtio-gpu. In order to prevent unmapping resource while MR is alive, + * and thus, making the data pointer invalid, we will block virtio-gpu + * command processing until MR is fully unreferenced and freed. + */ + OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free; + + res->mr = mr; + + return 0; +} + +static int +virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, + struct virtio_gpu_virgl_resource *res, + bool *cmd_suspended) +{ + struct virtio_gpu_virgl_hostmem_region *vmr; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + MemoryRegion *mr = res->mr; + int ret; + + if (!mr) { + return 0; + } + + vmr = to_hostmem_region(res->mr); + + /* + * Perform async unmapping in 3 steps: + * + * 1. Begin async unmapping with memory_region_del_subregion() + * and suspend/block cmd processing. + * 2. Wait for res->mr to be freed and cmd processing resumed + * asynchronously by virtio_gpu_virgl_hostmem_region_free(). + * 3. Finish the unmapping with final virgl_renderer_resource_unmap(). + */ + if (vmr->finish_unmapping) { + res->mr = NULL; + g_free(vmr); + + ret = virgl_renderer_resource_unmap(res->base.resource_id); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to unmap virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + } else { + *cmd_suspended = true; + + /* render will be unblocked once MR is freed */ + b->renderer_blocked++; + + /* memory region owns self res->mr object and frees it by itself */ + memory_region_set_enabled(mr, false); + memory_region_del_subregion(&b->hostmem, mr); + object_unparent(OBJECT(mr)); + } + + return 0; +} +#endif + static void virgl_cmd_create_resource_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -78,6 +220,7 @@ static void virgl_cmd_create_resource_2d(VirtIOGPU *g, res->base.height = c2d.height; res->base.format = c2d.format; res->base.resource_id = c2d.resource_id; + res->base.dmabuf_fd = -1; QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); args.handle = c2d.resource_id; @@ -125,6 +268,7 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, res->base.height = c3d.height; res->base.format = c3d.format; res->base.resource_id = c3d.resource_id; + res->base.dmabuf_fd = -1; QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); args.handle = c3d.resource_id; @@ -142,7 +286,8 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, } static void virgl_cmd_resource_unref(VirtIOGPU *g, - struct virtio_gpu_ctrl_command *cmd) + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) { struct virtio_gpu_resource_unref unref; struct virtio_gpu_virgl_resource *res; @@ -160,6 +305,16 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, return; } +#if VIRGL_VERSION_MAJOR >= 1 + if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + if (*cmd_suspended) { + return; + } +#endif + virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); @@ -509,9 +664,241 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, g_free(resp); } +#if VIRGL_VERSION_MAJOR >= 1 +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; + g_autofree struct virtio_gpu_virgl_resource *res = NULL; + struct virtio_gpu_resource_create_blob cblob; + struct virgl_renderer_resource_info info; + int ret; + + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + VIRTIO_GPU_FILL_CMD(cblob); + virtio_gpu_create_blob_bswap(&cblob); + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); + + if (cblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_virgl_find_resource(g, cblob.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, cblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_virgl_resource, 1); + res->base.resource_id = cblob.resource_id; + res->base.blob_size = cblob.size; + res->base.dmabuf_fd = -1; + + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), + cmd, &res->base.addrs, + &res->base.iov, &res->base.iov_cnt); + if (!ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + } + + virgl_args.res_handle = cblob.resource_id; + virgl_args.ctx_id = cblob.hdr.ctx_id; + virgl_args.blob_mem = cblob.blob_mem; + virgl_args.blob_id = cblob.blob_id; + virgl_args.blob_flags = cblob.blob_flags; + virgl_args.size = cblob.size; + virgl_args.iovecs = res->base.iov; + virgl_args.num_iovs = res->base.iov_cnt; + + ret = virgl_renderer_resource_create_blob(&virgl_args); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", + __func__, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + return; + } + + ret = virgl_renderer_resource_get_info(cblob.resource_id, &info); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: resource does not have info %d: %s\n", + __func__, cblob.resource_id, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + virtio_gpu_cleanup_mapping(g, &res->base); + virgl_renderer_resource_unref(cblob.resource_id); + return; + } + + res->base.dmabuf_fd = info.fd; + + QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next); + res = NULL; +} + +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_map_blob mblob; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_resp_map_info resp; + int ret; + + VIRTIO_GPU_FILL_CMD(mblob); + virtio_gpu_map_blob_bswap(&mblob); + + res = virtio_gpu_virgl_find_resource(g, mblob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, mblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + memset(&resp, 0, sizeof(resp)); + resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; + virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + bool *cmd_suspended) +{ + struct virtio_gpu_resource_unmap_blob ublob; + struct virtio_gpu_virgl_resource *res; + int ret; + + VIRTIO_GPU_FILL_CMD(ublob); + virtio_gpu_unmap_blob_bswap(&ublob); + + res = virtio_gpu_virgl_find_resource(g, ublob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ublob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended); + if (ret) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } +} + +static void virgl_cmd_set_scanout_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_framebuffer fb = { 0 }; + struct virtio_gpu_virgl_resource *res; + struct virtio_gpu_set_scanout_blob ss; + uint64_t fbend; + + VIRTIO_GPU_FILL_CMD(ss); + virtio_gpu_scanout_blob_bswap(&ss); + trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, + ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + if (ss.resource_id == 0) { + virtio_gpu_disable_scanout(g, ss.scanout_id); + return; + } + + if (ss.width < 16 || + ss.height < 16 || + ss.r.x + ss.r.width > ss.width || + ss.r.y + ss.r.height > ss.height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" + " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", + __func__, ss.scanout_id, ss.resource_id, + ss.r.x, ss.r.y, ss.r.width, ss.r.height, + ss.width, ss.height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + res = virtio_gpu_virgl_find_resource(g, ss.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + if (res->base.dmabuf_fd < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + fb.format = virtio_gpu_get_pixman_format(ss.format); + if (!fb.format) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: pixel format not supported %d\n", + __func__, ss.format); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); + fb.width = ss.width; + fb.height = ss.height; + fb.stride = ss.strides[0]; + fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; + + fbend = fb.offset; + fbend += fb.stride * (ss.r.height - 1); + fbend += fb.bytes_pp * ss.r.width; + if (fbend > res->base.blob_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: fb end out of range\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + g->parent_obj.enable = 1; + if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r); +} +#endif + void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + bool cmd_suspended = false; + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); virgl_renderer_force_ctx_0(); @@ -553,7 +940,7 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, virgl_cmd_resource_flush(g, cmd); break; case VIRTIO_GPU_CMD_RESOURCE_UNREF: - virgl_cmd_resource_unref(g, cmd); + virgl_cmd_resource_unref(g, cmd, &cmd_suspended); break; case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: /* TODO add security */ @@ -575,12 +962,26 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, case VIRTIO_GPU_CMD_GET_EDID: virtio_gpu_get_edid(g, cmd); break; +#if VIRGL_VERSION_MAJOR >= 1 + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: + virgl_cmd_resource_create_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB: + virgl_cmd_resource_map_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB: + virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: + virgl_cmd_set_scanout_blob(g, cmd); + break; +#endif default: cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; break; } - if (cmd->finished) { + if (cmd_suspended || cmd->finished) { return; } if (cmd->error) { @@ -749,6 +1150,13 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); } + +#if VIRGL_VERSION_MAJOR >= 1 + gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(), + virtio_gpu_virgl_resume_cmdq_bh, + g); +#endif + return 0; } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index b348805fbaee..a5db2256a4bb 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -380,7 +380,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g, QTAILQ_INSERT_HEAD(&g->reslist, res, next); } -static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) { struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; struct virtio_gpu_simple_resource *res; @@ -597,11 +597,11 @@ static void virtio_unref_resource(pixman_image_t *image, void *data) pixman_image_unref(data); } -static void virtio_gpu_update_scanout(VirtIOGPU *g, - uint32_t scanout_id, - struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb, - struct virtio_gpu_rect *r) +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { struct virtio_gpu_simple_resource *ores; struct virtio_gpu_scanout *scanout; @@ -1497,10 +1497,14 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) return; } +#ifdef VIRGL_VERSION_MAJOR + #if VIRGL_VERSION_MAJOR < 1 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { - error_setg(errp, "blobs and virgl are not compatible (yet)"); + error_setg(errp, "old virglrenderer, blob resources unsupported"); return; } + #endif +#endif } if (!virtio_gpu_base_device_realize(qdev, diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 6e71d799e5da..775005abb337 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -239,6 +239,8 @@ struct VirtIOGPUGL { QEMUTimer *fence_poll; QEMUTimer *print_stats; + + QEMUBH *cmdq_resume_bh; }; struct VhostUserGPU { @@ -338,6 +340,13 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g, struct virtio_gpu_framebuffer *fb, struct virtio_gpu_rect *r); +void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r); +void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id); + /* virtio-gpu-3d.c */ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); -- 2.45.2