Support mapping virgl blobs to a fixed location of a hostmem memory region using new virglrenderer MAP_FIXED API.
This new feature closes multiple problems for virtio-gpu on QEMU: - Having dedicated memory region for each mapped blob works notoriously slow due to QEMU's memory region software design built around RCU that isn't optimized for frequent removal of the regions - KVM isn't optimized for a frequent slot changes too - QEMU/KVM has a limit for a total number of created memory regions, crashing QEMU when limit is reached This patch makes virtio-gpu-gl to pre-create a single anonymous memory region covering whole hostmem area to which blobs will be mapped using the MAP_FIXED API. Not all virgl resources will support mapping at a fixed memory address. For them, we will continue to create individual nested memory sub-regions. In particular, vrend resources may not have MAP_FIXED capability. Venus and DRM native contexts will largely benefit from the MAP_FIXED feature in terms of performance and stability improvement. Signed-off-by: Dmitry Osipenko <[email protected]> --- hw/display/virtio-gpu-gl.c | 45 ++++++++++++++++++++- hw/display/virtio-gpu-virgl.c | 74 +++++++++++++++++++++++++++++++++- include/hw/virtio/virtio-gpu.h | 3 ++ 3 files changed, 119 insertions(+), 3 deletions(-) diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c index b640900fc6f1..23e0dd26c67b 100644 --- a/hw/display/virtio-gpu-gl.c +++ b/hw/display/virtio-gpu-gl.c @@ -121,7 +121,12 @@ static void virtio_gpu_gl_reset(VirtIODevice *vdev) static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) { ERRP_GUARD(); - VirtIOGPU *g = VIRTIO_GPU(qdev); + VirtIOGPUBase *b = VIRTIO_GPU_BASE(qdev); + VirtIOGPU *g = VIRTIO_GPU(b); +#if !defined(CONFIG_WIN32) + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); + void *map; +#endif #if HOST_BIG_ENDIAN error_setg(errp, "virgl is not supported on bigendian platforms"); @@ -152,6 +157,33 @@ static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) #endif virtio_gpu_device_realize(qdev, errp); + + /* + * Check whether virtio_gpu_device_realize() failed. + */ + if (*errp) { + return; + } + +#if !defined(CONFIG_WIN32) + if (virtio_gpu_hostmem_enabled(b->conf)) { + map = mmap(NULL, b->conf.hostmem, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (map == MAP_FAILED) { + error_setg(errp, + "virgl hostmem region could not be initialized: %s", + strerror(errno)); + return; + } + + gl->hostmem_mmap = map; + gl->hostmem_background = g_new0(MemoryRegion, 1); + memory_region_init_ram_ptr(gl->hostmem_background, NULL, + "hostmem-background", b->conf.hostmem, + gl->hostmem_mmap); + memory_region_add_subregion(&b->hostmem, 0, gl->hostmem_background); + } +#endif } static const Property virtio_gpu_gl_properties[] = { @@ -165,6 +197,7 @@ static const Property virtio_gpu_gl_properties[] = { static void virtio_gpu_gl_device_unrealize(DeviceState *qdev) { + VirtIOGPUBase *b = VIRTIO_GPU_BASE(qdev); VirtIOGPU *g = VIRTIO_GPU(qdev); VirtIOGPUGL *gl = VIRTIO_GPU_GL(qdev); @@ -187,6 +220,16 @@ static void virtio_gpu_gl_device_unrealize(DeviceState *qdev) gl->renderer_state = RS_START; g_array_unref(g->capset_ids); + + /* + * It is not guaranteed that the memory region will be finalized + * immediately with memory_region_del_subregion(), there can be + * a remaining reference to gl->hostmem_mmap. VirtIO-GPU is not + * hotpluggable, hence no need to worry about the leaked mapping. + */ + if (gl->hostmem_background) { + memory_region_del_subregion(&b->hostmem, gl->hostmem_background); + } } static void virtio_gpu_gl_class_init(ObjectClass *klass, const void *data) diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index a6860f63b563..5a663ad2acfa 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -41,9 +41,14 @@ VIRGL_VERSION_MICRO >= (micro)) #endif +#if VIRGL_CHECK_VERSION(1, 2, 1) && !defined(CONFIG_WIN32) + #define VIRGL_HAS_MAP_FIXED +#endif + struct virtio_gpu_virgl_resource { struct virtio_gpu_simple_resource base; MemoryRegion *mr; + void *map_fixed; }; static struct virtio_gpu_virgl_resource * @@ -116,6 +121,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, { struct virtio_gpu_virgl_hostmem_region *vmr; VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); MemoryRegion *mr; uint64_t size; void *data; @@ -126,6 +132,41 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, return -EOPNOTSUPP; } +#ifdef VIRGL_HAS_MAP_FIXED + /* + * virgl_renderer_resource_map_fixed() allows to create multiple + * mappings of the same resource, while virgl_renderer_resource_map() + * not. Don't allow mapping same resource twice. + */ + if (res->map_fixed || res->mr) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to map(fixed) virgl resource: already mapped\n", + __func__); + return -EBUSY; + } + + ret = virgl_renderer_resource_map_fixed(res->base.resource_id, + gl->hostmem_mmap + offset); + switch (ret) { + case 0: + res->map_fixed = gl->hostmem_mmap + offset; + return 0; + + case -EOPNOTSUPP: + /* + * -MAP_FIXED is unsupported by this resource. + * Mapping falls back to a blob subregion method in that case. + */ + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to map(fixed) virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } +#endif + ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size); if (ret) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n", @@ -138,7 +179,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, mr = &vmr->mr; memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data); - memory_region_add_subregion(&b->hostmem, offset, mr); + memory_region_add_subregion_overlap(gl->hostmem_background, offset, mr, 1); /* * MR could outlive the resource if MR's reference is held outside of @@ -162,9 +203,28 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, { struct virtio_gpu_virgl_hostmem_region *vmr; VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); MemoryRegion *mr = res->mr; int ret; +#ifdef VIRGL_HAS_MAP_FIXED + if (res->map_fixed) { + res->map_fixed = mmap(res->map_fixed, res->base.blob_size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, + -1, 0); + if (res->map_fixed == MAP_FAILED) { + ret = -errno; + qemu_log_mask(LOG_GUEST_ERROR, + "%s: failed to unmap(fixed) virgl resource: %s\n", + __func__, strerror(-ret)); + return ret; + } + + res->map_fixed = NULL; + } +#endif + if (!mr) { return 0; } @@ -200,7 +260,7 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, b->renderer_blocked++; /* memory region owns self res->mr object and frees it by itself */ - memory_region_del_subregion(&b->hostmem, mr); + memory_region_del_subregion(gl->hostmem_background, mr); object_unparent(OBJECT(mr)); } @@ -1265,6 +1325,16 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g) void virtio_gpu_virgl_reset(VirtIOGPU *g) { +#ifdef VIRGL_HAS_MAP_FIXED + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(g); + + if (gl->hostmem_mmap && + mmap(gl->hostmem_mmap, b->conf.hostmem, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) { + error_report("failed to reset virgl hostmem: %s", strerror(errno)); + } +#endif virgl_renderer_reset(); virtio_gpu_virgl_reset_async_fences(g); diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 718332284305..bfe85a4e0449 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -263,6 +263,9 @@ struct VirtIOGPUGL { QEMUBH *async_fence_bh; QSLIST_HEAD(, virtio_gpu_virgl_context_fence) async_fenceq; + + MemoryRegion *hostmem_background; + void *hostmem_mmap; }; struct VhostUserGPU { -- 2.51.1
