[PATCH v2] drm/amdgpu: avoid using null object of framebuffer
Instead of using state->fb->obj[0] directly, get object from framebuffer by calling drm_gem_fb_get_obj() and return error code when object is null to avoid using null object of framebuffer. v2: Call drm_gem_fb_get_obj after check old_state->fb for NULL. Signed-off-by: Julia Zhang --- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 18 -- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d60c4a2eeb0c..212f6522859d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -2,6 +2,7 @@ #include #include +#include #include #include "amdgpu.h" @@ -311,7 +312,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, return 0; } afb = to_amdgpu_framebuffer(new_state->fb); - obj = new_state->fb->obj[0]; + + obj = drm_gem_fb_get_obj(new_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return -EINVAL; + } + rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); @@ -365,12 +372,19 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; + struct drm_gem_object *obj; int r; if (!old_state->fb) return; - rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + obj = drm_gem_fb_get_obj(old_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return; + } + + rbo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { DRM_ERROR("failed to reserve rbo before unpin\n"); -- 2.34.1
[PATCH] drm/amdgpu: avoid using null object of framebuffer
Instead of using state->fb->obj[0] directly, get object from framebuffer by calling drm_gem_fb_get_obj() and return error code when object is null to avoid using null object of framebuffer. Signed-off-by: Julia Zhang --- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 17 +++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index d60c4a2eeb0c..d50ee0828935 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -2,6 +2,7 @@ #include #include +#include #include #include "amdgpu.h" @@ -311,7 +312,13 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, return 0; } afb = to_amdgpu_framebuffer(new_state->fb); - obj = new_state->fb->obj[0]; + + obj = drm_gem_fb_get_obj(new_state->fb, 0); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return -EINVAL; + } + rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); @@ -365,12 +372,18 @@ static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; + struct drm_gem_object *obj = drm_gem_fb_get_obj(old_state->fb, 0); int r; if (!old_state->fb) return; - rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + if (!obj) { + DRM_ERROR("Failed to get obj from framebuffer\n"); + return; + } + + rbo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { DRM_ERROR("failed to reserve rbo before unpin\n"); -- 2.34.1
[PATCH v2 1/1] drm/virtio: Implement device_attach
As vram objects don't have backing pages and thus can't implement drm_gem_object_funcs.get_sg_table callback. This removes drm dma-buf callbacks in virtgpu_gem_map_dma_buf()/virtgpu_gem_unmap_dma_buf() and implement virtgpu specific map/unmap/attach callbacks to support both of shmem objects and vram objects. Signed-off-by: Julia Zhang --- drivers/gpu/drm/virtio/virtgpu_prime.c | 40 +++--- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 44425f20d91a..b490a5343b06 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -49,11 +49,26 @@ virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, { struct drm_gem_object *obj = attach->dmabuf->priv; struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct sg_table *sgt; + int ret; if (virtio_gpu_is_vram(bo)) return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); - return drm_gem_map_dma_buf(attach, dir); + sgt = drm_prime_pages_to_sg(obj->dev, + to_drm_gem_shmem_obj(obj)->pages, + obj->size >> PAGE_SHIFT); + if (IS_ERR(sgt)) + return sgt; + + ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) { + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(ret); + } + + return sgt; } static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, @@ -63,12 +78,29 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct drm_gem_object *obj = attach->dmabuf->priv; struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + if (!sgt) + return; + if (virtio_gpu_is_vram(bo)) { virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); - return; + } else { + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(sgt); } +} + +static int virtgpu_gem_device_attach(struct dma_buf *dma_buf, +struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + int ret = 0; + + if (!virtio_gpu_is_vram(bo) && obj->funcs->pin) + ret = obj->funcs->pin(obj); - drm_gem_unmap_dma_buf(attach, sgt, dir); + return ret; } static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { @@ -83,7 +115,7 @@ static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }, - .device_attach = drm_gem_map_attach, + .device_attach = virtgpu_gem_device_attach, .get_uuid = virtgpu_virtio_get_uuid, }; -- 2.34.1
[PATCH v2 0/1] Implement device_attach for virtio gpu
To realize dGPU prime feature for virtio gpu, we are trying to let dGPU import vram object of virtio gpu. As vram objects don't have backing pages and thus can't implement the drm_gem_object_funcs.get_sg_table callback, this removes calling drm_gem_map_dma_buf in virtgpu_gem_map_dma_buf and implement virtgpu specific map/unmap/attach callbacks to support both of shmem objects and vram objects. Changes from v1 to v2: -Reimplement virtgpu_gem_device_attach() -Remove calling drm dma-buf funcs in virtgpu callbacks and reimplement virtgpu specific dma-buf callbacks. Julia Zhang (1): drm/virtio: Implement device_attach drivers/gpu/drm/virtio/virtgpu_prime.c | 40 +++--- 1 file changed, 36 insertions(+), 4 deletions(-) -- 2.34.1
[PATCH 1/1] drm/virtio: Implement device_attach
drm_gem_map_attach() requires drm_gem_object_funcs.get_sg_table to be implemented, or else return ENOSYS. Virtio has no get_sg_table implemented for vram object. To fix this, add a new device_attach to call drm_gem_map_attach() for shmem object and return 0 for vram object instead of calling drm_gem_map_attach for both of these two kinds of object. Signed-off-by: Julia Zhang --- drivers/gpu/drm/virtio/virtgpu_prime.c | 14 +- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 44425f20d91a..f0b0ff6f3813 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -71,6 +71,18 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, drm_gem_unmap_dma_buf(attach, sgt, dir); } +static int virtgpu_gem_device_attach(struct dma_buf *dma_buf, +struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) + return 0; + + return drm_gem_map_attach(dma_buf, attach); +} + static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { .cache_sgt_mapping = true, @@ -83,7 +95,7 @@ static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }, - .device_attach = drm_gem_map_attach, + .device_attach = virtgpu_gem_device_attach, .get_uuid = virtgpu_virtio_get_uuid, }; -- 2.34.1
[PATCH 0/1] Implement device_attach for virtio gpu
To realize dGPU prime feature for virtio gpu, we are trying let dGPU import vram object of virtio gpu. But this feature would finally call function virtio_dma_buf_ops.device_attach(), which was set as drm_gem_map_attach(). drm_gem_map_attach() requires drm_gem_object_funcs.get_sg_table to be implemented, or else return ENOSYS. But virtio gpu driver has not implemented it for vram object and actually vram object does not require it. So this add a new implementation of device_attach() to call drm_gem_map_attach() for shmem object and return 0 for vram object as it actually did before the requirement was added. Julia Zhang (1): drm/virtio: Implement device_attach drivers/gpu/drm/virtio/virtgpu_prime.c | 14 +- 1 file changed, 13 insertions(+), 1 deletion(-) -- 2.34.1
[PATCH v2 0/1] Implementation of resource_query_layout
Hi all, Sorry to late reply. This is v2 of the implementation of resource_query_layout. This adds a new ioctl to let guest query information of host resource, which is originally from Daniel Stone. We add some changes to support query the correct stride of host resource before it's created, which is to support to blit data from dGPU to virtio iGPU for dGPU prime feature. Changes from v1 to v2: -Squash two patches to a single patch. -A small modification of VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT Below is description of v1: This add implementation of resource_query_layout to get the information of how the host has actually allocated the buffer. This function is now used to query the stride for guest linear resource for dGPU prime on guest VMs. v1 of kernel side: https: //lore.kernel.org/xen-devel/20231110074027.24862-1-julia.zh...@amd.com/T/#t v1 of qemu side: https: //lore.kernel.org/qemu-devel/20231110074027.24862-1-julia.zh...@amd.com/T/#t Daniel Stone (1): drm/virtio: Implement RESOURCE_GET_LAYOUT ioctl drivers/gpu/drm/virtio/virtgpu_drv.c | 1 + drivers/gpu/drm/virtio/virtgpu_drv.h | 22 - drivers/gpu/drm/virtio/virtgpu_ioctl.c | 66 ++ drivers/gpu/drm/virtio/virtgpu_kms.c | 8 +++- drivers/gpu/drm/virtio/virtgpu_vq.c| 63 include/uapi/drm/virtgpu_drm.h | 21 include/uapi/linux/virtio_gpu.h| 30 7 files changed, 208 insertions(+), 3 deletions(-) -- 2.34.1
[PATCH 1/1] drm/virtio: Implement RESOURCE_GET_LAYOUT ioctl
From: Daniel Stone Add a new ioctl to allow the guest VM to discover how the guest actually allocated the underlying buffer, which allows buffers to be used for GL<->Vulkan interop and through standard window systems. It's also a step towards properly supporting modifiers in the guest. Signed-off-by: Daniel Stone Co-developed-by: Julia Zhang # support query stride before it's created Signed-off-by: Julia Zhang --- drivers/gpu/drm/virtio/virtgpu_drv.c | 1 + drivers/gpu/drm/virtio/virtgpu_drv.h | 22 - drivers/gpu/drm/virtio/virtgpu_ioctl.c | 66 ++ drivers/gpu/drm/virtio/virtgpu_kms.c | 8 +++- drivers/gpu/drm/virtio/virtgpu_vq.c| 63 include/uapi/drm/virtgpu_drm.h | 21 include/uapi/linux/virtio_gpu.h| 30 7 files changed, 208 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 4334c7608408..98061b714b98 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -148,6 +148,7 @@ static unsigned int features[] = { VIRTIO_GPU_F_RESOURCE_UUID, VIRTIO_GPU_F_RESOURCE_BLOB, VIRTIO_GPU_F_CONTEXT_INIT, + VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT, }; static struct virtio_driver virtio_gpu_driver = { .feature_table = features, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 96365a772f77..bb5edcfeda54 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -214,6 +214,16 @@ struct virtio_gpu_drv_cap_cache { atomic_t is_valid; }; +struct virtio_gpu_query_info { + uint32_t num_planes; + uint64_t modifier; + struct { + uint64_t offset; + uint32_t stride; + } planes[VIRTIO_GPU_MAX_RESOURCE_PLANES]; + atomic_t is_valid; +}; + struct virtio_gpu_device { struct drm_device *ddev; @@ -246,6 +256,7 @@ struct virtio_gpu_device { bool has_resource_blob; bool has_host_visible; bool has_context_init; + bool has_resource_query_layout; struct virtio_shm_region host_visible_region; struct drm_mm host_visible_mm; @@ -277,7 +288,7 @@ struct virtio_gpu_fpriv { }; /* virtgpu_ioctl.c */ -#define DRM_VIRTIO_NUM_IOCTLS 12 +#define DRM_VIRTIO_NUM_IOCTLS 13 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -420,6 +431,15 @@ virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, uint32_t width, uint32_t height, uint32_t x, uint32_t y); +int +virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev, + struct virtio_gpu_query_info *bo_info, + uint32_t width, + uint32_t height, + uint32_t format, + uint32_t bind, + uint32_t hw_res_handle); + /* virtgpu_display.c */ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b24b11f25197..216c04314177 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -107,6 +107,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: value = vgdev->capset_id_mask; break; + case VIRTGPU_PARAM_RESOURCE_QUERY_LAYOUT: + value = vgdev->has_resource_query_layout ? 1 : 0; + break; default: return -EINVAL; } @@ -668,6 +671,65 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, return ret; } +static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_virtgpu_resource_query_layout *args = data; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_gem_object *obj = NULL; + struct virtio_gpu_object *bo = NULL; + struct virtio_gpu_query_info bo_info = {0}; + int ret = 0; + int i; + + if (!vgdev->has_resource_query_layout) { + DRM_ERROR("failing: no RQL on host\n"); + return -EINVAL; + } + + if (args->handle > 0) { + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) { +
[PATCH 2/2] drm/virtio: Modify RESOURCE_GET_LAYOUT ioctl
Modify RESOURCE_GET_LAYOUT ioctl to handle the use case that query correct stride for guest linear resource before it is created. Signed-off-by: Julia Zhang --- drivers/gpu/drm/virtio/virtgpu_drv.h | 26 -- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 47 -- drivers/gpu/drm/virtio/virtgpu_vq.c| 35 +++ include/uapi/drm/virtgpu_drm.h | 6 ++-- include/uapi/linux/virtio_gpu.h| 8 ++--- 5 files changed, 66 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index d6fc0d4ecb7d..82dffb3e4c6b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -93,15 +93,6 @@ struct virtio_gpu_object { bool host3d_blob, guest_blob; uint32_t blob_mem, blob_flags; - atomic_t layout_state; - struct { - uint64_t offset; - uint64_t size; - uint32_t stride; - } planes[VIRTIO_GPU_RES_MAX_PLANES]; - uint64_t modifier; - uint32_t num_planes; - int uuid_state; uuid_t uuid; @@ -225,6 +216,16 @@ struct virtio_gpu_drv_cap_cache { atomic_t is_valid; }; +struct virtio_gpu_query_info { + uint32_t num_planes; + uint64_t modifier; + struct { + uint64_t offset; + uint32_t stride; + } planes [VIRTIO_GPU_MAX_RESOURCE_PLANES]; + atomic_t is_valid; +}; + struct virtio_gpu_device { struct drm_device *ddev; @@ -448,7 +449,12 @@ void virtio_gpu_cmd_host_wait(struct virtio_gpu_device *vgdev, int virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo); + struct virtio_gpu_query_info *bo_info, + uint32_t width, + uint32_t height, + uint32_t format, + uint32_t bind, + uint32_t hw_res_handle); /* virtgpu_display.c */ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 51d04460d0d8..034a7c0927a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -685,9 +685,9 @@ static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev, { struct drm_virtgpu_resource_query_layout *args = data; struct virtio_gpu_device *vgdev = dev->dev_private; - struct drm_gem_object *obj; - struct virtio_gpu_object *bo; - int layout_state; + struct drm_gem_object *obj = NULL; + struct virtio_gpu_object *bo = NULL; + struct virtio_gpu_query_info bo_info = {0}; int ret = 0; int i; @@ -696,50 +696,45 @@ static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev, return -EINVAL; } - obj = drm_gem_object_lookup(file, args->handle); - if (obj == NULL) { - DRM_ERROR("invalid handle 0x%x\n", args->handle); - return -ENOENT; - } - bo = gem_to_virtio_gpu_obj(obj); - - layout_state = atomic_read(&bo->layout_state); - if (layout_state == STATE_ERR) { - ret = -EINVAL; - goto out; - } else if (layout_state == STATE_OK) { - goto valid; + if (args->handle > 0) { + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) { + DRM_ERROR("invalid handle 0x%x\n", args->handle); + return -ENOENT; + } + bo = gem_to_virtio_gpu_obj(obj); } - ret = virtio_gpu_cmd_get_resource_layout(vgdev, bo); + ret = virtio_gpu_cmd_get_resource_layout(vgdev, &bo_info, args->width, +args->height, args->format, +args->bind, bo ? bo->hw_res_handle : 0); if (ret) goto out; ret = wait_event_timeout(vgdev->resp_wq, -atomic_read(&bo->layout_state) == STATE_OK, +atomic_read(&bo_info.is_valid), 5 * HZ); if (!ret) goto out; valid: smp_rmb(); - WARN_ON(atomic_read(&bo->layout_state) != STATE_OK); - args->num_planes = bo->num_planes; - args->modifier = bo->modifier; + WARN_ON(atomic_read(&bo_info.is_valid)); + args->num_planes = bo_info.num_planes; + args->modifier = bo_info.modifier; for (i = 0; i < args->num_planes; i++) { - args->p
[PATCH 1/2] drm/virtio: Implement RESOURCE_GET_LAYOUT ioctl
From: Daniel Stone This ioctl allows the guest to discover how the guest actually allocated the underlying buffer, which allows buffers to be used for GL<->Vulkan interop and through standard window systems. It's also a step towards properly supporting modifiers in the guest. --- drivers/gpu/drm/virtio/virtgpu_drv.c | 1 + drivers/gpu/drm/virtio/virtgpu_drv.h | 16 +- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 71 ++ drivers/gpu/drm/virtio/virtgpu_kms.c | 8 ++- drivers/gpu/drm/virtio/virtgpu_vq.c| 56 include/uapi/drm/virtgpu_drm.h | 19 +++ include/uapi/linux/virtio_gpu.h| 30 +++ 7 files changed, 198 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 4f7140e27614..1ee09974d4b7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -190,6 +190,7 @@ static unsigned int features[] = { VIRTIO_GPU_F_RESOURCE_BLOB, VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_CONTEXT_FENCE_WAIT, + VIRTIO_GPU_F_RESOURCE_QUERY_LAYOUT, }; static struct virtio_driver virtio_gpu_driver = { .feature_table = features, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 7ef4b3df0ada..d6fc0d4ecb7d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -93,6 +93,15 @@ struct virtio_gpu_object { bool host3d_blob, guest_blob; uint32_t blob_mem, blob_flags; + atomic_t layout_state; + struct { + uint64_t offset; + uint64_t size; + uint32_t stride; + } planes[VIRTIO_GPU_RES_MAX_PLANES]; + uint64_t modifier; + uint32_t num_planes; + int uuid_state; uuid_t uuid; @@ -249,6 +258,7 @@ struct virtio_gpu_device { bool has_host_visible; bool has_context_init; bool has_host_fence_wait; + bool has_resource_query_layout; struct virtio_shm_region host_visible_region; struct drm_mm host_visible_mm; @@ -281,7 +291,7 @@ struct virtio_gpu_fpriv { }; /* virtgpu_ioctl.c */ -#define DRM_VIRTIO_NUM_IOCTLS 12 +#define DRM_VIRTIO_NUM_IOCTLS 13 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -436,6 +446,10 @@ int virtio_gpu_cmd_status_freezing(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_host_wait(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t fence_id); +int +virtio_gpu_cmd_get_resource_layout(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); + /* virtgpu_display.c */ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b6079d2bff69..51d04460d0d8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -107,6 +107,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: value = vgdev->capset_id_mask; break; + case VIRTGPU_PARAM_RESOURCE_QUERY_LAYOUT: + value = vgdev->has_resource_query_layout ? 1 : 0; + break; default: return -EINVAL; } @@ -676,6 +679,70 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, return ret; } +static int virtio_gpu_resource_query_layout_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_virtgpu_resource_query_layout *args = data; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_gem_object *obj; + struct virtio_gpu_object *bo; + int layout_state; + int ret = 0; + int i; + + if (!vgdev->has_resource_query_layout) { + DRM_ERROR("failing: no RQL on host\n"); + return -EINVAL; + } + + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) { + DRM_ERROR("invalid handle 0x%x\n", args->handle); + return -ENOENT; + } + bo = gem_to_virtio_gpu_obj(obj); + + layout_state = atomic_read(&bo->layout_state); + if (layout_state == STATE_ERR) { + ret = -EINVAL; + goto out; + } else if (layout_state == STATE_OK) { + goto valid; + } + + ret = virtio_gpu_cmd_get_resource_layout(vgdev, bo); + if (ret) + goto out; + + ret = wait_event_timeout(vgdev->resp_wq, +
[PATCH 0/2] Add RESOURCE_GET_LAYOUT ioctl
This is to add a new ioctl RESOURCE_GET_LAYOUT to virtio-gpu to get the information about how the host has actually allocated the buffer. It is implemented to query the stride of linear buffer for dGPU prime on guest VM, related mesa mr: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23896 Daniel Stone (1): drm/virtio: Implement RESOURCE_GET_LAYOUT ioctl Julia Zhang (1): drm/virtio: Modify RESOURCE_GET_LAYOUT ioctl drivers/gpu/drm/virtio/virtgpu_drv.c | 1 + drivers/gpu/drm/virtio/virtgpu_drv.h | 22 - drivers/gpu/drm/virtio/virtgpu_ioctl.c | 66 ++ drivers/gpu/drm/virtio/virtgpu_kms.c | 8 +++- drivers/gpu/drm/virtio/virtgpu_vq.c| 63 include/uapi/drm/virtgpu_drm.h | 21 include/uapi/linux/virtio_gpu.h| 30 7 files changed, 208 insertions(+), 3 deletions(-) -- 2.34.1
[PATCH] virtio-gpu: Remove stride and layer_stride check for dGPU prime on VM
Remove stride and layer_stride check in virtio-gpu so that virgl can send transfer data command with non zero stride to sync up data from guest to host for dGPU prime on VM. Link: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23896 Signed-off-by: Julia Zhang --- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 10 -- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5d05093014ac..4d3f8d36e3c7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -414,11 +414,6 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, goto err_put_free; } - if (!bo->host3d_blob && (args->stride || args->layer_stride)) { - ret = -EINVAL; - goto err_put_free; - } - ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; @@ -473,11 +468,6 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, } else { virtio_gpu_create_context(dev, file); - if (!bo->host3d_blob && (args->stride || args->layer_stride)) { - ret = -EINVAL; - goto err_put_free; - } - ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; -- 2.34.1