In the next patch, we will allow for obj->mm.__pages to be populated
asynchronously. This means that simply acquiring a pages_pin_count is no
longer sufficient to be sure the pages are there, we need to acquire the
pin (to prevent the pages from disappearing again) and then wait for the
completion.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c                  | 26 +++++++---
 drivers/gpu/drm/i915/i915_gem_clflush.c          |  4 +-
 drivers/gpu/drm/i915/i915_gem_dmabuf.c           | 14 ++++--
 drivers/gpu/drm/i915/i915_gem_gtt.c              | 23 +++++++--
 drivers/gpu/drm/i915/i915_gem_object.c           | 61 ++++++++++++++++--------
 drivers/gpu/drm/i915/i915_gem_object.h           | 47 ++++++++++++++----
 drivers/gpu/drm/i915/i915_gem_render_state.c     |  2 +-
 drivers/gpu/drm/i915/i915_gem_shrinker.c         | 14 +++---
 drivers/gpu/drm/i915/i915_gem_stolen.c           |  2 +-
 drivers/gpu/drm/i915/i915_gem_tiling.c           |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c          | 48 +++++--------------
 drivers/gpu/drm/i915/i915_vma.c                  |  4 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c    |  4 +-
 drivers/gpu/drm/i915/selftests/i915_gem_object.c | 45 +++++++++++++++--
 drivers/gpu/drm/i915/selftests/i915_vma.c        |  6 +--
 drivers/gpu/drm/i915/selftests/mock_gtt.c        |  2 +-
 16 files changed, 203 insertions(+), 101 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0316215221f8..ce3d83f924bf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -596,7 +596,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                return ret;
 
        __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-       if (obj->mm.pages)
+       if (i915_gem_object_has_pages(obj))
                return -EBUSY;
 
        GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
@@ -2144,7 +2144,7 @@ void __i915_gem_object_invalidate(struct 
drm_i915_gem_object *obj)
        struct address_space *mapping;
 
        lockdep_assert_held(&obj->mm.lock);
-       GEM_BUG_ON(obj->mm.pages);
+       GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
        switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
@@ -3867,7 +3867,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (err)
                goto out;
 
-       if (obj->mm.pages &&
+       if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -3886,7 +3886,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                obj->mm.madv = args->madv;
 
        /* if the object is no longer attached, discard its backing storage */
-       if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+       if (obj->mm.madv == I915_MADV_DONTNEED &&
+           !i915_gem_object_has_pages(obj))
                i915_gem_object_truncate(obj);
 
        args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -3928,6 +3929,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_page.lock);
+       init_completion(&obj->mm.complete);
+       complete_all(&obj->mm.complete);
 
        i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
@@ -4071,7 +4074,7 @@ static void __i915_gem_free_objects(struct 
drm_i915_private *i915,
                if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                        atomic_set(&obj->mm.pages_pin_count, 0);
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-               GEM_BUG_ON(obj->mm.pages);
+               GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
                if (obj->base.import_attach)
                        drm_prime_gem_destroy(&obj->base, NULL);
@@ -4772,7 +4775,16 @@ i915_gem_object_create_from_data(struct drm_i915_private 
*dev_priv,
        if (ret)
                goto fail;
 
-       sg = obj->mm.pages;
+       ret = i915_gem_object_wait_for_pages(obj);
+       if (ret)
+               goto fail_unpin;
+
+       sg = i915_gem_object_pages(obj);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto fail_unpin;
+       }
+
        bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
        obj->mm.dirty = true; /* Backing store is now out of date */
        i915_gem_object_unpin_pages(obj);
@@ -4785,6 +4797,8 @@ i915_gem_object_create_from_data(struct drm_i915_private 
*dev_priv,
 
        return obj;
 
+fail_unpin:
+       i915_gem_object_unpin_pages(obj);
 fail:
        i915_gem_object_put(obj);
        return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c 
b/drivers/gpu/drm/i915/i915_gem_clflush.c
index d925fb582ba7..d68817231e98 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -71,7 +71,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
 
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
-       drm_clflush_sg(obj->mm.pages);
+       drm_clflush_sg(i915_gem_object_pages(obj));
        obj->cache_dirty = false;
 
        intel_fb_obj_flush(obj, ORIGIN_CPU);
@@ -176,7 +176,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object 
*obj,
                reservation_object_unlock(obj->resv);
 
                i915_sw_fence_commit(&clflush->wait);
-       } else if (obj->mm.pages) {
+       } else if (i915_gem_object_has_pinned_pages(obj)) {
                __i915_do_clflush(obj);
        } else {
                GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 74edd187d0aa..5eb8a81887b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -40,7 +40,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
                                             enum dma_data_direction dir)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-       struct sg_table *st;
+       struct sg_table *st, *pages;
        struct scatterlist *src, *dst;
        int ret, i;
 
@@ -48,6 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
        if (ret)
                goto err;
 
+       ret = i915_gem_object_wait_for_pages(obj);
+       if (ret)
+               goto err_unpin_pages;
+
        /* Copy sg so that we make an independent mapping */
        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (st == NULL) {
@@ -55,13 +59,15 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
                goto err_unpin_pages;
        }
 
-       ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+       pages = i915_gem_object_pages(obj);
+
+       ret = sg_alloc_table(st, pages->nents, GFP_KERNEL);
        if (ret)
                goto err_free;
 
-       src = obj->mm.pages->sgl;
+       src = pages->sgl;
        dst = st->sgl;
-       for (i = 0; i < obj->mm.pages->nents; i++) {
+       for (i = 0; i < pages->nents; i++) {
                sg_set_page(dst, sg_page(src), src->length, 0);
                dst = sg_next(dst);
                src = sg_next(src);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index db335d3ba3ee..0454ade960e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -190,21 +190,30 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
                          enum i915_cache_level cache_level,
                          u32 unused)
 {
+       struct sg_table *pages;
        u32 pte_flags;
        int ret;
 
+       ret = i915_gem_object_wait_for_pages(vma->obj);
+       if (ret)
+               return ret;
+
+       pages = i915_gem_object_pages(vma->obj);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
        ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
        if (ret)
                return ret;
 
-       vma->pages = vma->obj->mm.pages;
+       vma->pages = pages;
 
        /* Currently applicable only to VLV */
        pte_flags = 0;
        if (vma->obj->gt_ro)
                pte_flags |= PTE_READ_ONLY;
 
-       vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
+       vma->vm->insert_entries(vma->vm, pages, vma->node.start,
                                cache_level, pte_flags);
 
        return 0;
@@ -2046,7 +2055,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object 
*obj,
                 * try again - if there are no more pages to remove from
                 * the DMA remapper, i915_gem_shrink will return 0.
                 */
-               GEM_BUG_ON(obj->mm.pages == pages);
+               GEM_BUG_ON(obj->mm.__pages == pages);
        } while (i915_gem_shrink(to_i915(obj->base.dev),
                                 obj->base.size >> PAGE_SHIFT,
                                 I915_SHRINK_BOUND |
@@ -2315,7 +2324,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 err_pages:
        if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
-               if (vma->pages != vma->obj->mm.pages) {
+               if (vma->pages != vma->obj->mm.__pages) {
                        GEM_BUG_ON(!vma->pages);
                        sg_free_table(vma->pages);
                        kfree(vma->pages);
@@ -3170,9 +3179,13 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
         */
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
 
+       ret = i915_gem_object_wait_for_pages(vma->obj);
+       if (ret)
+               return ret;
+
        switch (vma->ggtt_view.type) {
        case I915_GGTT_VIEW_NORMAL:
-               vma->pages = vma->obj->mm.pages;
+               vma->pages = i915_gem_object_pages(vma->obj);
                return 0;
 
        case I915_GGTT_VIEW_ROTATED:
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c 
b/drivers/gpu/drm/i915/i915_gem_object.c
index 30a704ea7e3b..9a07530ca2b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -25,22 +25,34 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages)
+int __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+                               struct sg_table *pages)
 {
-       lockdep_assert_held(&obj->mm.lock);
+       int err = 0;
+
+       /* Seralized by obj->mm.lock + obj->mm.complete */
+       GEM_BUG_ON(completion_done(&obj->mm.complete));
+       GEM_BUG_ON(!pages);
+
+       obj->mm.__pages = pages;
+       if (unlikely(IS_ERR(pages))) {
+               err = PTR_ERR(pages);
+               goto out;
+       }
 
        obj->mm.get_page.sg_pos = pages->sgl;
        obj->mm.get_page.sg_idx = 0;
 
-       obj->mm.pages = pages;
-
        if (i915_gem_object_is_tiled(obj) &&
            to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                GEM_BUG_ON(obj->mm.quirked);
                __i915_gem_object_pin_pages(obj);
                obj->mm.quirked = true;
        }
+
+out:
+       complete_all(&obj->mm.complete);
+       return err;
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -54,12 +66,15 @@ static int ____i915_gem_object_get_pages(struct 
drm_i915_gem_object *obj)
                return -EFAULT;
        }
 
+       if (!completion_done(&obj->mm.complete))
+               return 0;
+
+       reinit_completion(&obj->mm.complete);
        pages = obj->ops->get_pages(obj);
-       if (unlikely(IS_ERR(pages)))
-               return PTR_ERR(pages);
+       if (!pages) /* async completion */
+               return 0;
 
-       __i915_gem_object_set_pages(obj, pages);
-       return 0;
+       return __i915_gem_object_set_pages(obj, pages);
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -77,12 +92,10 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object 
*obj)
        if (err)
                return err;
 
-       if (unlikely(!obj->mm.pages)) {
+       if (unlikely(!obj->mm.__pages)) {
                err = ____i915_gem_object_get_pages(obj);
                if (err)
                        goto unlock;
-
-               smp_mb__before_atomic();
        }
        atomic_inc(&obj->mm.pages_pin_count);
 
@@ -109,7 +122,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj,
                return;
 
        GEM_BUG_ON(obj->bind_count);
-       if (!READ_ONCE(obj->mm.pages))
+
+       wait_for_completion(&obj->mm.complete);
+       if (!i915_gem_object_has_pages(obj))
                return;
 
        /* May be called by shrinker from within get_pages() (on another bo) */
@@ -120,7 +135,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj,
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
-       pages = fetch_and_zero(&obj->mm.pages);
+       pages = fetch_and_zero(&obj->mm.__pages);
        GEM_BUG_ON(!pages);
 
        if (obj->mm.mapping) {
@@ -136,8 +151,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj,
        }
 
        __i915_gem_object_reset_page_iter(obj);
+       if (!IS_ERR(pages))
+               obj->ops->put_pages(obj, pages);
 
-       obj->ops->put_pages(obj, pages);
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -154,6 +170,8 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
        might_sleep();
        GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+       GEM_BUG_ON(IS_ERR(obj->mm.__pages));
 
        /* As we iterate forward through the sg, we record each entry in a
         * radixtree for quick repeated (backwards) lookups. If we have seen
@@ -296,11 +314,11 @@ i915_gem_object_get_dma_address(struct 
drm_i915_gem_object *obj,
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
                                 enum i915_map_type type)
 {
        unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-       struct sg_table *sgt = obj->mm.pages;
+       const struct sg_table *sgt = i915_gem_object_pages(obj);
        struct sgt_iter sgt_iter;
        struct page *page;
        struct page *stack_pages[32];
@@ -359,17 +377,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
 
        pinned = true;
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(!obj->mm.pages)) {
+               if (unlikely(!obj->mm.__pages)) {
                        ret = ____i915_gem_object_get_pages(obj);
                        if (ret)
                                goto err_unlock;
-
-                       smp_mb__before_atomic();
                }
                atomic_inc(&obj->mm.pages_pin_count);
                pinned = false;
        }
-       GEM_BUG_ON(!obj->mm.pages);
 
        ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
        if (ptr && has_type != type) {
@@ -387,6 +402,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
        }
 
        if (!ptr) {
+               ret = i915_gem_object_wait_for_pages(obj);
+               if (ret)
+                       goto err_unpin;
+
                ptr = i915_gem_object_map(obj, type);
                if (!ptr) {
                        ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h 
b/drivers/gpu/drm/i915/i915_gem_object.h
index 1b0bd6576785..813d35b060be 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -25,6 +25,7 @@
 #ifndef __I915_GEM_OBJECT_H__
 #define __I915_GEM_OBJECT_H__
 
+#include <linux/completion.h>
 #include <linux/reservation.h>
 
 #include <drm/drm_vma_manager.h>
@@ -124,8 +125,9 @@ struct drm_i915_gem_object {
        struct {
                struct mutex lock; /* protects the pages and their use */
                atomic_t pages_pin_count;
+               struct completion complete;
 
-               struct sg_table *pages;
+               struct sg_table *__pages;
                void *mapping;
 
                struct i915_gem_object_page_iter {
@@ -179,7 +181,6 @@ struct drm_i915_gem_object {
 
                        struct i915_mm_struct *mm;
                        struct i915_mmu_object *mmu_object;
-                       struct work_struct *work;
                } userptr;
 
                unsigned long scratch;
@@ -384,8 +385,8 @@ dma_addr_t
 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
                                unsigned long n);
 
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages);
+int __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+                               struct sg_table *pages);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
@@ -402,22 +403,52 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       GEM_BUG_ON(!obj->mm.pages);
-
        atomic_inc(&obj->mm.pages_pin_count);
 }
 
 static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+i915_gem_object_has_pinned_pages(const struct drm_i915_gem_object *obj)
 {
        return atomic_read(&obj->mm.pages_pin_count);
 }
 
+static inline bool
+i915_gem_object_has_pages(const struct drm_i915_gem_object *obj)
+{
+       return READ_ONCE(obj->mm.__pages);
+}
+
+static inline int
+i915_gem_object_wait_for_pages(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       err = wait_for_completion_interruptible(&obj->mm.complete);
+       if (err)
+               return err;
+
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+       if (IS_ERR(obj->mm.__pages))
+               return PTR_ERR(obj->mm.__pages);
+
+       return 0;
+}
+
+static inline struct sg_table *
+i915_gem_object_pages(struct drm_i915_gem_object *obj)
+{
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       GEM_BUG_ON(!READ_ONCE(obj->mm.complete.done));
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+       return obj->mm.__pages;
+}
+
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-       GEM_BUG_ON(!obj->mm.pages);
 
        atomic_dec(&obj->mm.pages_pin_count);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c 
b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b42c81b42487..0096ca8b9c24 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request 
*req)
                return 0;
 
        /* Recreate the page after shrinking */
-       if (!so->vma->obj->mm.pages)
+       if (!i915_gem_object_has_pages(so->vma->obj))
                so->batch_offset = -1;
 
        ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3ba838f27292..7e3bb48e043e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -72,9 +72,11 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
-       if (!obj->mm.pages)
+       if (!i915_gem_object_has_pages(obj))
                return false;
 
+       GEM_BUG_ON(!completion_done(&obj->mm.complete));
+
        /* Consider only shrinkable ojects. */
        if (!i915_gem_object_is_shrinkable(obj))
                return false;
@@ -104,7 +106,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object 
*obj)
 {
        if (i915_gem_object_unbind(obj) == 0)
                __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
-       return !READ_ONCE(obj->mm.pages);
+       return !i915_gem_object_has_pages(obj);
 }
 
 /**
@@ -193,7 +195,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                                                       typeof(*obj),
                                                       global_link))) {
                        list_move_tail(&obj->global_link, &still_in_list);
-                       if (!obj->mm.pages) {
+                       if (!i915_gem_object_has_pages(obj)) {
                                list_del_init(&obj->global_link);
                                continue;
                        }
@@ -218,7 +220,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                                /* May arrive from get_pages on another bo */
                                mutex_lock_nested(&obj->mm.lock,
                                                  I915_MM_SHRINKER);
-                               if (!obj->mm.pages) {
+                               if (!i915_gem_object_has_pages(obj)) {
                                        __i915_gem_object_invalidate(obj);
                                        list_del_init(&obj->global_link);
                                        count += obj->base.size >> PAGE_SHIFT;
@@ -392,7 +394,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned 
long event, void *ptr)
         */
        unbound = bound = unevictable = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
-               if (!obj->mm.pages)
+               if (!i915_gem_object_has_pages(obj))
                        continue;
 
                if (!can_release_pages(obj))
@@ -401,7 +403,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned 
long event, void *ptr)
                        unbound += obj->base.size >> PAGE_SHIFT;
        }
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-               if (!obj->mm.pages)
+               if (!i915_gem_object_has_pages(obj))
                        continue;
 
                if (!can_release_pages(obj))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f3abdc27c5dd..eb552c7e76e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -713,7 +713,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_i915_private *dev_priv
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       vma->pages = obj->mm.pages;
+       vma->pages = i915_gem_object_pages(obj);
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
        list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c 
b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5128dac5ba3f..c1d669e32f41 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -263,7 +263,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
         * due to the change in swizzling.
         */
        mutex_lock(&obj->mm.lock);
-       if (obj->mm.pages &&
+       if (i915_gem_object_has_pages(obj) &&
            obj->mm.madv == I915_MADV_WILLNEED &&
            i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (tiling == I915_TILING_NONE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 120186122c82..609120b5ae5c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -68,22 +68,20 @@ static void cancel_userptr(struct work_struct *work)
        struct drm_device *dev = obj->base.dev;
 
        i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
+       wait_for_completion(&obj->mm.complete);
 
        mutex_lock(&dev->struct_mutex);
-       /* Cancel any active worker and force us to re-evaluate gup */
-       obj->userptr.work = NULL;
-
        /* We are inside a kthread context and can't be interrupted */
        if (i915_gem_object_unbind(obj) == 0)
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-       WARN_ONCE(obj->mm.pages,
+       WARN_ONCE(i915_gem_object_has_pages(obj),
                  "Failed to release pages: bind_count=%d, pages_pin_count=%d, 
pin_display=%d\n",
                  obj->bind_count,
                  atomic_read(&obj->mm.pages_pin_count),
                  obj->pin_display);
+       mutex_unlock(&dev->struct_mutex);
 
        i915_gem_object_put(obj);
-       mutex_unlock(&dev->struct_mutex);
 }
 
 static void add_object(struct i915_mmu_object *mo)
@@ -476,10 +474,8 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object 
*obj,
         */
        if (!value)
                del_object(obj->userptr.mmu_object);
-       else if (!work_pending(&obj->userptr.mmu_object->work))
-               add_object(obj->userptr.mmu_object);
        else
-               ret = -EAGAIN;
+               add_object(obj->userptr.mmu_object);
        spin_unlock(&obj->userptr.mmu_object->mn->lock);
 #endif
 
@@ -492,6 +488,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct 
*_work)
        struct get_pages_work *work = container_of(_work, typeof(*work), work);
        struct drm_i915_gem_object *obj = work->obj;
        const int npages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *pages;
        struct page **pvec;
        int pinned, ret;
 
@@ -526,22 +523,14 @@ __i915_gem_userptr_get_pages_worker(struct work_struct 
*_work)
                }
        }
 
-       mutex_lock(&obj->mm.lock);
-       if (obj->userptr.work == &work->work) {
-               struct sg_table *pages = ERR_PTR(ret);
-
-               if (pinned == npages) {
-                       pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
-                       if (!IS_ERR(pages)) {
-                               __i915_gem_object_set_pages(obj, pages);
-                               pinned = 0;
-                               pages = NULL;
-                       }
-               }
-
-               obj->userptr.work = ERR_CAST(pages);
+       pages = ERR_PTR(ret);
+       if (pinned == npages)
+               pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+       __i915_gem_object_set_pages(obj, pages);
+       if (!IS_ERR(pages)) {
+               pinned = 0;
+               pages = NULL;
        }
-       mutex_unlock(&obj->mm.lock);
 
        release_pages(pvec, pinned, 0);
        drm_free_large(pvec);
@@ -580,8 +569,6 @@ __i915_gem_userptr_get_pages_schedule(struct 
drm_i915_gem_object *obj,
        if (work == NULL)
                return ERR_PTR(-ENOMEM);
 
-       obj->userptr.work = &work->work;
-
        work->obj = i915_gem_object_get(obj);
 
        work->task = current;
@@ -591,7 +578,7 @@ __i915_gem_userptr_get_pages_schedule(struct 
drm_i915_gem_object *obj,
        queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
 
        *active = true;
-       return ERR_PTR(-EAGAIN);
+       return NULL;
 }
 
 static struct sg_table *
@@ -620,14 +607,6 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
         * egregious cases from causing harm.
         */
 
-       if (obj->userptr.work) {
-               /* active flag should still be held for the pending work */
-               if (IS_ERR(obj->userptr.work))
-                       return ERR_CAST(obj->userptr.work);
-               else
-                       return ERR_PTR(-EAGAIN);
-       }
-
        /* Let the mmu-notifier know that we have begun and need cancellation */
        ret = __i915_gem_userptr_set_active(obj, true);
        if (ret)
@@ -669,7 +648,6 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
        struct sgt_iter sgt_iter;
        struct page *page;
 
-       BUG_ON(obj->userptr.work != NULL);
        __i915_gem_userptr_set_active(obj, false);
 
        if (obj->mm.madv != I915_MADV_WILLNEED)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9c1ef8d67ba7..cc29015837d5 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -691,6 +691,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        GEM_BUG_ON(obj->bind_count == 0);
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+       GEM_BUG_ON(!completion_done(&obj->mm.complete));
 
        if (i915_vma_is_map_and_fenceable(vma)) {
                /* release the fence reg _after_ flushing */
@@ -711,7 +713,7 @@ int i915_vma_unbind(struct i915_vma *vma)
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
-       if (vma->pages != obj->mm.pages) {
+       if (vma->pages != obj->mm.__pages) {
                GEM_BUG_ON(!vma->pages);
                sg_free_table(vma->pages);
                kfree(vma->pages);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 0f3fa34377c6..dbcbae93f496 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -250,7 +250,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
                            vm->allocate_va_range(vm, addr, BIT_ULL(size)))
                                break;
 
-                       vm->insert_entries(vm, obj->mm.pages, addr,
+                       vm->insert_entries(vm, obj->mm.__pages, addr,
                                           I915_CACHE_NONE, 0);
                }
                count = n;
@@ -1089,7 +1089,7 @@ static void track_vma_bind(struct i915_vma *vma)
        obj->bind_count++; /* track for eviction later */
        __i915_gem_object_pin_pages(obj);
 
-       vma->pages = obj->mm.pages;
+       vma->pages = i915_gem_object_pages(obj);
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 1328332150f6..5902433d4e14 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -233,7 +233,7 @@ static void map_free_pages(struct sg_table *st)
 }
 
 static struct sg_table *
-map_get_pages(struct drm_i915_gem_object *obj)
+__map_get_pages(struct drm_i915_gem_object *obj)
 {
        struct sg_table *pages;
        struct scatterlist *sg;
@@ -281,6 +281,39 @@ map_get_pages(struct drm_i915_gem_object *obj)
 #undef GFP
 }
 
+struct map_work {
+       struct delayed_work work;
+       struct drm_i915_gem_object *obj;
+};
+
+static void map_get_pages_work(struct work_struct *work)
+{
+       struct map_work *data = container_of(work, typeof(*data), work.work);
+
+       __i915_gem_object_set_pages(data->obj, __map_get_pages(data->obj));
+       kfree(data);
+}
+
+static struct sg_table *
+map_get_pages(struct drm_i915_gem_object *obj)
+{
+       unsigned long delay = obj->scratch >> 8;
+       struct map_work *data;
+
+       if (!delay)
+               return __map_get_pages(obj);
+
+       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_DELAYED_WORK(&data->work, map_get_pages_work);
+       data->obj = obj;
+
+       schedule_delayed_work(&data->work, delay);
+       return NULL;
+}
+
 static void map_put_pages(struct drm_i915_gem_object *obj,
                          struct sg_table *pages)
 {
@@ -297,6 +330,7 @@ static const struct drm_i915_gem_object_ops map_ops = {
 
 static struct drm_i915_gem_object *
 map_object(struct drm_i915_private *i915,
+          unsigned long delay,
           unsigned int flags)
 {
        struct drm_i915_gem_object *obj;
@@ -317,7 +351,7 @@ map_object(struct drm_i915_private *i915,
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
        obj->cache_level = I915_CACHE_NONE;
 
-       obj->scratch = flags;
+       obj->scratch = delay << 8 | flags;
 
        return obj;
 }
@@ -328,10 +362,13 @@ static int igt_gem_object_pin_map(void *arg)
        struct drm_i915_gem_object *obj;
        const struct {
                const char *name;
+               unsigned long delay;
                unsigned int flags;
        } phases[] = {
                { "sync" },
-               { "sync-fault", FAULT },
+               { "sync-fault", 0, FAULT },
+               { "async", msecs_to_jiffies(10) },
+               { "async-fault", msecs_to_jiffies(10), FAULT },
                { "sync-after-fault" },
                {},
        }, *p;
@@ -340,7 +377,7 @@ static int igt_gem_object_pin_map(void *arg)
        int err = 0;
 
        for (p = phases; p->name; p++) {
-               obj = map_object(i915, p->flags);
+               obj = map_object(i915, p->delay, p->flags);
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c 
b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fb9072d5877f..16e954b20a3e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -484,7 +484,7 @@ static int igt_vma_rotate(void *arg)
                                                goto out_object;
                                        }
 
-                                       if (vma->pages == obj->mm.pages) {
+                                       if (vma->pages == obj->mm.__pages) {
                                                pr_err("VMA using unrotated 
object pages!\n");
                                                err = -EINVAL;
                                                goto out_object;
@@ -576,7 +576,7 @@ static bool assert_pin(struct i915_vma *vma,
                        ok = false;
                }
 
-               if (vma->pages == vma->obj->mm.pages) {
+               if (vma->pages == vma->obj->mm.__pages) {
                        pr_err("(%s) VMA using original object pages!\n",
                               name);
                        ok = false;
@@ -588,7 +588,7 @@ static bool assert_pin(struct i915_vma *vma,
                        ok = false;
                }
 
-               if (vma->pages != vma->obj->mm.pages) {
+               if (vma->pages != vma->obj->mm.__pages) {
                        pr_err("VMA not using object pages!\n");
                        ok = false;
                }
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c 
b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..b321cd5dfa3f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -44,7 +44,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
                           u32 flags)
 {
        GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
-       vma->pages = vma->obj->mm.pages;
+       vma->pages = i915_gem_object_pages(vma->obj);
        vma->flags |= I915_VMA_LOCAL_BIND;
        return 0;
 }
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to