Signed-off-by: Matthew Wilcox <wi...@infradead.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c       | 23 +++----
 drivers/gpu/drm/drm_gem.c                     | 67 +++++++------------
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c  | 12 ++--
 drivers/gpu/drm/i915/i915_debugfs.c           | 20 +++---
 drivers/gpu/drm/i915/i915_gem_object.h        |  2 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c |  7 +-
 drivers/gpu/drm/msm/msm_gem_submit.c          | 12 ++--
 drivers/gpu/drm/v3d/v3d_gem.c                 | 17 +++--
 drivers/gpu/drm/vc4/vc4_gem.c                 |  6 +-
 include/drm/drm_file.h                        |  9 +--
 10 files changed, 77 insertions(+), 98 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f4f00217546e..8c415fdfd828 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -98,16 +98,14 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
 
        list_for_each_entry(file, &ddev->filelist, lhead) {
                struct drm_gem_object *gobj;
-               int handle;
+               unsigned long handle;
 
                WARN_ONCE(1, "Still active user space clients!\n");
-               spin_lock(&file->table_lock);
-               idr_for_each_entry(&file->object_idr, gobj, handle) {
+               xa_for_each(&file->objects, handle, gobj) {
                        WARN_ONCE(1, "And also active allocations!\n");
                        drm_gem_object_put_unlocked(gobj);
                }
-               idr_destroy(&file->object_idr);
-               spin_unlock(&file->table_lock);
+               xa_destroy(&file->objects);
        }
 
        mutex_unlock(&ddev->filelist_mutex);
@@ -784,12 +782,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
                seq_printf((m), " " #flag);             \
        }
 
-static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+static int amdgpu_debugfs_gem_bo_info(unsigned int id,
+               struct drm_gem_object *gobj, struct seq_file *m)
 {
-       struct drm_gem_object *gobj = ptr;
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
-       struct seq_file *m = data;
-
        struct dma_buf_attachment *attachment;
        struct dma_buf *dma_buf;
        unsigned domain;
@@ -851,6 +847,8 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void 
*data)
 
        list_for_each_entry(file, &dev->filelist, lhead) {
                struct task_struct *task;
+               struct drm_gem_object *gobj;
+               unsigned long index;
 
                /*
                 * Although we have a valid reference on file->pid, that does
@@ -864,9 +862,10 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, 
void *data)
                           task ? task->comm : "<unknown>");
                rcu_read_unlock();
 
-               spin_lock(&file->table_lock);
-               idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
-               spin_unlock(&file->table_lock);
+               xa_lock(&file->objects);
+               xa_for_each(&file->objects, index, gobj)
+                       amdgpu_debugfs_gem_bo_info(index, gobj, m);
+               xa_unlock(&file->objects);
        }
 
        mutex_unlock(&dev->filelist_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0a52a958cffe..dc0d3cc3bb35 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -251,10 +251,9 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object 
*obj)
  * handle references on objects.
  */
 static int
-drm_gem_object_release_handle(int id, void *ptr, void *data)
+drm_gem_object_release_handle(struct drm_gem_object *obj,
+               struct drm_file *file_priv)
 {
-       struct drm_file *file_priv = data;
-       struct drm_gem_object *obj = ptr;
        struct drm_device *dev = obj->dev;
 
        if (obj->funcs && obj->funcs->close)
@@ -285,23 +284,17 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 {
        struct drm_gem_object *obj;
 
-       spin_lock(&filp->table_lock);
-
        /* Check if we currently have a reference on the object */
-       obj = idr_replace(&filp->object_idr, NULL, handle);
-       spin_unlock(&filp->table_lock);
-       if (IS_ERR_OR_NULL(obj))
-               return -EINVAL;
-
-       /* Release driver's reference and decrement refcount. */
-       drm_gem_object_release_handle(handle, obj, filp);
+       obj = xa_store(&filp->objects, handle, NULL, 0);
+       if (obj) {
+               /* Release driver's reference and decrement refcount. */
+               drm_gem_object_release_handle(obj, filp);
+       }
 
        /* And finally make the handle available for future allocations. */
-       spin_lock(&filp->table_lock);
-       idr_remove(&filp->object_idr, handle);
-       spin_unlock(&filp->table_lock);
+       xa_erase(&filp->objects, handle);
 
-       return 0;
+       return obj ? 0 : -EINVAL;
 }
 EXPORT_SYMBOL(drm_gem_handle_delete);
 
@@ -390,24 +383,14 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
        if (obj->handle_count++ == 0)
                drm_gem_object_get(obj);
 
-       /*
-        * Get the user-visible handle using idr.  Preload and perform
-        * allocation under our spinlock.
-        */
-       idr_preload(GFP_KERNEL);
-       spin_lock(&file_priv->table_lock);
-
-       ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
-       spin_unlock(&file_priv->table_lock);
-       idr_preload_end();
-
+       /* Get the user-visible handle */
+       ret = xa_alloc(&file_priv->objects, &handle, obj, xa_limit_31b,
+                       GFP_KERNEL);
        mutex_unlock(&dev->object_name_lock);
+
        if (ret < 0)
                goto err_unref;
 
-       handle = ret;
-
        ret = drm_vma_node_allow(&obj->vma_node, file_priv);
        if (ret)
                goto err_remove;
@@ -428,9 +411,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 err_revoke:
        drm_vma_node_revoke(&obj->vma_node, file_priv);
 err_remove:
-       spin_lock(&file_priv->table_lock);
-       idr_remove(&file_priv->object_idr, handle);
-       spin_unlock(&file_priv->table_lock);
+       xa_erase(&file_priv->objects, handle);
 err_unref:
        drm_gem_object_handle_put_unlocked(obj);
        return ret;
@@ -644,14 +625,12 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 {
        struct drm_gem_object *obj;
 
-       spin_lock(&filp->table_lock);
-
+       xa_lock(&filp->objects);
        /* Check if we currently have a reference on the object */
-       obj = idr_find(&filp->object_idr, handle);
+       obj = xa_load(&filp->objects, handle);
        if (obj)
                drm_gem_object_get(obj);
-
-       spin_unlock(&filp->table_lock);
+       xa_unlock(&filp->objects);
 
        return obj;
 }
@@ -784,8 +763,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
 void
 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
 {
-       idr_init_base(&file_private->object_idr, 1);
-       spin_lock_init(&file_private->table_lock);
+       xa_init_flags(&file_private->objects, XA_FLAGS_ALLOC1);
 }
 
 /**
@@ -800,9 +778,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file 
*file_private)
 void
 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 {
-       idr_for_each(&file_private->object_idr,
-                    &drm_gem_object_release_handle, file_private);
-       idr_destroy(&file_private->object_idr);
+       unsigned long index;
+       struct drm_gem_object *obj;
+
+       xa_for_each(&file_private->objects, index, obj)
+                    drm_gem_object_release_handle(obj, file_private);
+       xa_destroy(&file_private->objects);
 }
 
 /**
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 30875f8f2933..98f803510e0a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -65,7 +65,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit 
*submit,
        unsigned i;
        int ret = 0;
 
-       spin_lock(&file->table_lock);
+       xa_lock(&file->objects);
 
        for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
                struct drm_gem_object *obj;
@@ -79,9 +79,9 @@ static int submit_lookup_objects(struct etnaviv_gem_submit 
*submit,
                submit->bos[i].flags = bo->flags;
 
                /* normally use drm_gem_object_lookup(), but for bulk lookup
-                * all under single table_lock just hit object_idr directly:
+                * all under the lock just hit objects directly:
                 */
-               obj = idr_find(&file->object_idr, bo->handle);
+               obj = xa_load(&file->objects, bo->handle);
                if (!obj) {
                        DRM_ERROR("invalid handle %u at index %u\n",
                                  bo->handle, i);
@@ -90,8 +90,8 @@ static int submit_lookup_objects(struct etnaviv_gem_submit 
*submit,
                }
 
                /*
-                * Take a refcount on the object. The file table lock
-                * prevents the object_idr's refcount on this being dropped.
+                * Take a refcount on the object. The lock
+                * prevents the objects' refcount on this being dropped.
                 */
                drm_gem_object_get(obj);
 
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit 
*submit,
 
 out_unlock:
        submit->nr_bos = i;
-       spin_unlock(&file->table_lock);
+       xa_unlock(&file->objects);
 
        return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 40a61ef9aac1..030263870ba6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -304,10 +304,9 @@ struct file_stats {
        u64 active, inactive;
 };
 
-static int per_file_stats(int id, void *ptr, void *data)
+static
+int per_file_stats(struct drm_i915_gem_object *obj, struct file_stats *stats)
 {
-       struct drm_i915_gem_object *obj = ptr;
-       struct file_stats *stats = data;
        struct i915_vma *vma;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
@@ -370,7 +369,7 @@ static void print_batch_pool_stats(struct seq_file *m,
                        list_for_each_entry(obj,
                                            &engine->batch_pool.cache_list[j],
                                            batch_pool_link)
-                               per_file_stats(0, obj, &stats);
+                               per_file_stats(obj, &stats);
                }
        }
 
@@ -387,9 +386,9 @@ static int per_file_ctx_stats(int idx, void *ptr, void 
*data)
                struct intel_context *ce = to_intel_context(ctx, engine);
 
                if (ce->state)
-                       per_file_stats(0, ce->state->obj, data);
+                       per_file_stats(ce->state->obj, data);
                if (ce->ring)
-                       per_file_stats(0, ce->ring->vma->obj, data);
+                       per_file_stats(ce->ring->vma->obj, data);
        }
 
        return 0;
@@ -521,17 +520,20 @@ static int i915_gem_object_info(struct seq_file *m, void 
*data)
        print_context_stats(m, dev_priv);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
+               struct drm_i915_gem_object *obj;
                struct drm_i915_file_private *file_priv = file->driver_priv;
                struct i915_request *request;
                struct task_struct *task;
+               unsigned long index;
 
                mutex_lock(&dev->struct_mutex);
 
                memset(&stats, 0, sizeof(stats));
                stats.file_priv = file->driver_priv;
-               spin_lock(&file->table_lock);
-               idr_for_each(&file->object_idr, per_file_stats, &stats);
-               spin_unlock(&file->table_lock);
+               xa_lock(&file->objects);
+               xa_for_each(&file->objects, index, obj)
+                       per_file_stats(obj, &stats);
+               xa_unlock(&file->objects);
                /*
                 * Although we have a valid reference on file->pid, that does
                 * not guarantee that the task_struct who called get_pid() is
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h 
b/drivers/gpu/drm/i915/i915_gem_object.h
index a6dd7c46de0d..7f6493229f50 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -310,7 +310,7 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 
handle)
 #ifdef CONFIG_LOCKDEP
        WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
 #endif
-       return idr_find(&file->object_idr, handle);
+       return xa_load(&file->objects, handle);
 }
 
 static inline struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7d82043aff10..5e30bef22a8c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -492,14 +492,15 @@ static int cpu_check(struct drm_i915_gem_object *obj, 
unsigned int max)
 static int file_add_object(struct drm_file *file,
                            struct drm_i915_gem_object *obj)
 {
-       int err;
+       int err, id;
 
        GEM_BUG_ON(obj->base.handle_count);
 
        /* tie the object to the drm_file for easy reaping */
-       err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+       err = xa_alloc(&file->objects, &id, &obj->base, xa_limit_32b,
+                       GFP_KERNEL);
        if (err < 0)
-               return  err;
+               return err;
 
        i915_gem_object_get(obj);
        obj->base.handle_count++;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
index 12b983fc0b56..707d16e27e13 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -88,7 +88,7 @@ static int submit_lookup_objects(struct msm_gem_submit 
*submit,
        unsigned i;
        int ret = 0;
 
-       spin_lock(&file->table_lock);
+       xa_lock(&file->objects);
        pagefault_disable();
 
        for (i = 0; i < args->nr_bos; i++) {
@@ -105,12 +105,12 @@ static int submit_lookup_objects(struct msm_gem_submit 
*submit,
 
                if (copy_from_user_inatomic(&submit_bo, userptr, 
sizeof(submit_bo))) {
                        pagefault_enable();
-                       spin_unlock(&file->table_lock);
+                       xa_unlock(&file->objects);
                        if (copy_from_user(&submit_bo, userptr, 
sizeof(submit_bo))) {
                                ret = -EFAULT;
                                goto out;
                        }
-                       spin_lock(&file->table_lock);
+                       xa_lock(&file->objects);
                        pagefault_disable();
                }
 
@@ -129,9 +129,9 @@ static int submit_lookup_objects(struct msm_gem_submit 
*submit,
                submit->bos[i].iova  = submit_bo.presumed;
 
                /* normally use drm_gem_object_lookup(), but for bulk lookup
-                * all under single table_lock just hit object_idr directly:
+                * all under single lock just hit objects directly:
                 */
-               obj = idr_find(&file->object_idr, submit_bo.handle);
+               obj = xa_load(&file->objects, submit_bo.handle);
                if (!obj) {
                        DRM_ERROR("invalid handle %u at index %u\n", 
submit_bo.handle, i);
                        ret = -EINVAL;
@@ -156,7 +156,7 @@ static int submit_lookup_objects(struct msm_gem_submit 
*submit,
 
 out_unlock:
        pagefault_enable();
-       spin_unlock(&file->table_lock);
+       xa_unlock(&file->objects);
 
 out:
        submit->nr_bos = i;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 05ca6319065e..44eca1a99cc5 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -362,21 +362,21 @@ v3d_cl_lookup_bos(struct drm_device *dev,
                goto fail;
        }
 
-       spin_lock(&file_priv->table_lock);
+       xa_lock(&file_priv->objects);
        for (i = 0; i < exec->bo_count; i++) {
-               struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
+               struct drm_gem_object *bo = xa_load(&file_priv->objects,
                                                     handles[i]);
                if (!bo) {
                        DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
                                  i, handles[i]);
                        ret = -ENOENT;
-                       spin_unlock(&file_priv->table_lock);
+                       xa_unlock(&file_priv->objects);
                        goto fail;
                }
                drm_gem_object_get(bo);
                exec->bo[i] = to_v3d_bo(bo);
        }
-       spin_unlock(&file_priv->table_lock);
+       xa_unlock(&file_priv->objects);
 
 fail:
        kvfree(handles);
@@ -671,26 +671,25 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
        job->args = *args;
        job->v3d = v3d;
 
-       spin_lock(&file_priv->table_lock);
+       xa_lock(&file_priv->objects);
        for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
                struct drm_gem_object *bo;
 
                if (!args->bo_handles[bo_count])
                        break;
 
-               bo = idr_find(&file_priv->object_idr,
-                             args->bo_handles[bo_count]);
+               bo = xa_load(&file_priv->objects, args->bo_handles[bo_count]);
                if (!bo) {
                        DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
                                  bo_count, args->bo_handles[bo_count]);
                        ret = -ENOENT;
-                       spin_unlock(&file_priv->table_lock);
+                       xa_unlock(&file_priv->objects);
                        goto fail;
                }
                drm_gem_object_get(bo);
                job->bo[bo_count] = to_v3d_bo(bo);
        }
-       spin_unlock(&file_priv->table_lock);
+       xa_unlock(&file_priv->objects);
 
        ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
        if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index aea2b8dfec17..8715573957e9 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -759,9 +759,9 @@ vc4_cl_lookup_bos(struct drm_device *dev,
                goto fail;
        }
 
-       spin_lock(&file_priv->table_lock);
+       xa_lock(&file_priv->objects);
        for (i = 0; i < exec->bo_count; i++) {
-               struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
+               struct drm_gem_object *bo = xa_load(&file_priv->objects,
                                                     handles[i]);
                if (!bo) {
                        DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
@@ -773,7 +773,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
                drm_gem_object_get(bo);
                exec->bo[i] = (struct drm_gem_cma_object *)bo;
        }
-       spin_unlock(&file_priv->table_lock);
+       xa_unlock(&file_priv->objects);
 
        if (ret)
                goto fail_put_bo;
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 84ac79219e4c..685f3cd9d071 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -240,15 +240,12 @@ struct drm_file {
        struct drm_minor *minor;
 
        /**
-        * @object_idr:
+        * @objects:
         *
         * Mapping of mm object handles to object pointers. Used by the GEM
-        * subsystem. Protected by @table_lock.
+        * subsystem.
         */
-       struct idr object_idr;
-
-       /** @table_lock: Protects @object_idr. */
-       spinlock_t table_lock;
+       struct xarray objects;
 
        /** @syncobj_idr: Mapping of sync object handles to object pointers. */
        struct idr syncobj_idr;
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to