stolen/kernel buffers should not be mmapable by userland.
do not provide callbacks to facilitate this for these buffers.

Signed-off-by: Robert Beckett <bob.beck...@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 32 +++++++++++++++++++++----
 1 file changed, 27 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 4c25d9b2f138..d22a1af214ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1120,8 +1120,8 @@ static void i915_ttm_unmap_virtual(struct 
drm_i915_gem_object *obj)
        ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
 }
 
-static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
-       .name = "i915_gem_object_ttm",
+static const struct drm_i915_gem_object_ops i915_gem_ttm_user_obj_ops = {
+       .name = "i915_gem_object_ttm_user",
        .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
                 I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
 
@@ -1139,6 +1139,21 @@ static const struct drm_i915_gem_object_ops 
i915_gem_ttm_obj_ops = {
        .mmap_ops = &vm_ops_ttm,
 };
 
+static const struct drm_i915_gem_object_ops i915_gem_ttm_kern_obj_ops = {
+       .name = "i915_gem_object_ttm_kern",
+       .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
+                I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
+
+       .get_pages = i915_ttm_get_pages,
+       .put_pages = i915_ttm_put_pages,
+       .truncate = i915_ttm_truncate,
+       .shrink = i915_ttm_shrink,
+
+       .adjust_lru = i915_ttm_adjust_lru,
+       .delayed_free = i915_ttm_delayed_free,
+       .migrate = i915_ttm_migrate,
+};
+
 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
@@ -1193,10 +1208,19 @@ int __i915_gem_ttm_object_init(struct 
intel_memory_region *mem,
                .no_wait_gpu = false,
        };
        enum ttm_bo_type bo_type;
+       const struct drm_i915_gem_object_ops *ops;
        int ret;
 
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
-       i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+
+       if (flags & I915_BO_ALLOC_USER && intel_region_to_ttm_type(mem) != 
I915_PL_STOLEN) {
+               bo_type = ttm_bo_type_device;
+               ops = &i915_gem_ttm_user_obj_ops;
+       } else {
+               bo_type = ttm_bo_type_kernel;
+               ops = &i915_gem_ttm_kern_obj_ops;
+       }
+       i915_gem_object_init(obj, ops, &lock_class, flags);
 
        obj->bo_offset = offset;
 
@@ -1206,8 +1230,6 @@ int __i915_gem_ttm_object_init(struct intel_memory_region 
*mem,
 
        INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->ttm.get_io_page.lock);
-       bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
-               ttm_bo_type_kernel;
 
        obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
 
-- 
2.25.1

Reply via email to