This patch add the necessary functions to map GEM backing memory into the kernel's virtual address space.
Signed-off-by: Haneen Mohammed <hamohammed...@gmail.com> --- drivers/gpu/drm/vkms/vkms_drv.c | 1 + drivers/gpu/drm/vkms/vkms_drv.h | 5 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 50 +++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index c56d66d9ec56..8ab53958e4b9 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -55,6 +55,7 @@ static struct drm_driver vkms_driver = { .dumb_create = vkms_dumb_create, .dumb_map_offset = vkms_dumb_map, .gem_vm_ops = &vkms_gem_vm_ops, + .gem_free_object_unlocked = vkms_gem_free_object, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index f115e7d1ae03..d6cb8824cee2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -41,6 +41,7 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages; + void *vaddr; }; #define drm_crtc_to_vkms_output(target) \ @@ -67,4 +68,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset); +void vkms_gem_free_object(struct drm_gem_object *obj); + +void *vkms_gem_vmap(struct drm_gem_object *obj); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 9f820f56b9e0..249855dded63 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -166,3 +166,53 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, return ret; } + +void vkms_gem_free_object(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = container_of(obj, + struct vkms_gem_object, + gem); + kvfree(vkms_obj->pages); + mutex_destroy(&vkms_obj->pages_lock); + drm_gem_object_release(obj); + kfree(vkms_obj); +} + +struct page **get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + struct page **pages = vkms_obj->pages; + + if (!pages) { + mutex_lock(&vkms_obj->pages_lock); + pages = drm_gem_get_pages(gem_obj); + if (IS_ERR(pages)) { + mutex_unlock(&vkms_obj->pages_lock); + return pages; + } + + vkms_obj->pages = pages; + mutex_unlock(&vkms_obj->pages_lock); + } + + return pages; +} + +void *vkms_gem_vmap(struct drm_gem_object *obj) +{ + void *vaddr = NULL; + struct vkms_gem_object *vkms_obj = container_of(obj, + struct vkms_gem_object, + gem); + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = get_pages(vkms_obj); + + if (IS_ERR(pages)) { + DRM_INFO("pages allocation failed %ld\n", PTR_ERR(pages)); + return vaddr; + } + + vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + + return vaddr; +} -- 2.17.1 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel