Now that we require the object lock for all ops, some code handling
race conditions can be removed.

This is required to not take short-term pins inside execbuf.

Signed-off-by: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
Acked-by: Niranjana Vishwanathapura <niranjana.vishwanathap...@intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 40 +++++----------------------------
 1 file changed, 5 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 100739b3e084..3236cf1c1400 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -776,7 +776,6 @@ i915_vma_detach(struct i915_vma *vma)
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
 {
        unsigned int bound;
-       bool pinned = true;
 
        bound = atomic_read(&vma->flags);
        do {
@@ -786,34 +785,10 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned 
int flags)
                if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
                        return false;
 
-               if (!(bound & I915_VMA_PIN_MASK))
-                       goto unpinned;
-
                GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
        } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
 
        return true;
-
-unpinned:
-       /*
-        * If pin_count==0, but we are bound, check under the lock to avoid
-        * racing with a concurrent i915_vma_unbind().
-        */
-       mutex_lock(&vma->vm->mutex);
-       do {
-               if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
-                       pinned = false;
-                       break;
-               }
-
-               if (unlikely(flags & ~bound)) {
-                       pinned = false;
-                       break;
-               }
-       } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
-       mutex_unlock(&vma->vm->mutex);
-
-       return pinned;
 }
 
 static struct scatterlist *
@@ -1194,13 +1169,7 @@ __i915_vma_get_pages(struct i915_vma *vma)
                        vma->ggtt_view.type, ret);
        }
 
-       pages = xchg(&vma->pages, pages);
-
-       /* did we race against a put_pages? */
-       if (pages && pages != vma->obj->mm.pages) {
-               sg_free_table(vma->pages);
-               kfree(vma->pages);
-       }
+       vma->pages = pages;
 
        return ret;
 }
@@ -1234,13 +1203,14 @@ I915_SELFTEST_EXPORT int i915_vma_get_pages(struct 
i915_vma *vma)
 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
 {
        /* We allocate under vma_get_pages, so beware the shrinker */
-       struct sg_table *pages = READ_ONCE(vma->pages);
+       struct sg_table *pages = vma->pages;
 
        GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
 
        if (atomic_sub_return(count, &vma->pages_count) == 0) {
-               if (pages == cmpxchg(&vma->pages, pages, NULL) &&
-                   pages != vma->obj->mm.pages) {
+               vma->pages = NULL;
+
+               if (pages != vma->obj->mm.pages) {
                        sg_free_table(pages);
                        kfree(pages);
                }
-- 
2.34.0

Reply via email to