From: Jérôme Glisse <jgli...@redhat.com>

When range of virtual address is updated read only and corresponding
user ptr object are already read only it is pointless to do anything.
Optimize this case out.

Signed-off-by: Jérôme Glisse <jgli...@redhat.com>
Cc: Christian König <christian.koe...@amd.com>
Cc: Jan Kara <j...@suse.cz>
Cc: Felix Kuehling <felix.kuehl...@amd.com>
Cc: Jason Gunthorpe <j...@mellanox.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Matthew Wilcox <mawil...@microsoft.com>
Cc: Ross Zwisler <zwis...@kernel.org>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Radim Krčmář <rkrc...@redhat.com>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Ralph Campbell <rcampb...@nvidia.com>
Cc: John Hubbard <jhubb...@nvidia.com>
Cc: k...@vger.kernel.org
Cc: dri-de...@lists.freedesktop.org
Cc: linux-r...@vger.kernel.org
Cc: linux-fsde...@vger.kernel.org
Cc: Arnd Bergmann <a...@arndb.de>
---
 drivers/gpu/drm/i915/i915_gem_userptr.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 9558582c105e..23330ac3d7ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -59,6 +59,7 @@ struct i915_mmu_object {
        struct interval_tree_node it;
        struct list_head link;
        struct work_struct work;
+       bool read_only;
        bool attached;
 };
 
@@ -119,6 +120,7 @@ static int 
i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                container_of(_mn, struct i915_mmu_notifier, mn);
        struct i915_mmu_object *mo;
        struct interval_tree_node *it;
+       bool update_to_read_only;
        LIST_HEAD(cancelled);
        unsigned long end;
 
@@ -128,6 +130,8 @@ static int 
i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        /* interval ranges are inclusive, but invalidate range is exclusive */
        end = range->end - 1;
 
+       update_to_read_only = mmu_notifier_range_update_to_read_only(range);
+
        spin_lock(&mn->lock);
        it = interval_tree_iter_first(&mn->objects, range->start, end);
        while (it) {
@@ -145,6 +149,17 @@ static int 
i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
                 * object if it is not in the process of being destroyed.
                 */
                mo = container_of(it, struct i915_mmu_object, it);
+
+               /*
+                * If it is already read only and we are updating to
+                * read only then we do not need to change anything.
+                * So save time and skip this one.
+                */
+               if (update_to_read_only && mo->read_only) {
+                       it = interval_tree_iter_next(it, range->start, end);
+                       continue;
+               }
+
                if (kref_get_unless_zero(&mo->obj->base.refcount))
                        queue_work(mn->wq, &mo->work);
 
@@ -270,6 +285,7 @@ i915_gem_userptr_init__mmu_notifier(struct 
drm_i915_gem_object *obj,
        mo->mn = mn;
        mo->obj = obj;
        mo->it.start = obj->userptr.ptr;
+       mo->read_only = i915_gem_object_is_readonly(obj);
        mo->it.last = obj->userptr.ptr + obj->base.size - 1;
        INIT_WORK(&mo->work, cancel_userptr);
 
-- 
2.17.2

Reply via email to