From: CQ Tang <cq.t...@intel.com>

Function i915_gem_shrink_memory_region() is changed to
intel_memory_region_evict() and moved from i915_gem_shrinker.c
to intel_memory_region.c, this function is used to handle local
memory swapping, in addition to evict purgeable objects only.

When an object is selected from list, i915_gem_object_unbind()
might fail if the object vma is pinned, this causes an error
-EBUSY is returned from this function.

The new code uses similar logic as function i915_gem_shrink().

Signed-off-by: CQ Tang <cq.t...@intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  1 -
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c  | 58 -----------
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.h  |  2 -
 drivers/gpu/drm/i915/i915_gem.c               |  8 +-
 drivers/gpu/drm/i915/intel_memory_region.c    | 95 +++++++++++++++++--
 .../drm/i915/selftests/intel_memory_region.c  |  3 +-
 6 files changed, 94 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 8d639509b78b..517a606ade8d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -237,7 +237,6 @@ struct drm_i915_gem_object {
                 * region->obj_lock.
                 */
                struct list_head region_link;
-               struct list_head tmp_link;
 
                struct sg_table *pages;
                void *mapping;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 4d346df8fd5b..27674048f17d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -272,64 +272,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private 
*i915)
        return freed;
 }
 
-int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
-                                 resource_size_t target)
-{
-       struct drm_i915_private *i915 = mem->i915;
-       struct drm_i915_gem_object *obj;
-       resource_size_t purged;
-       LIST_HEAD(purgeable);
-       int err = -ENOSPC;
-
-       intel_gt_retire_requests(&i915->gt);
-
-       purged = 0;
-
-       mutex_lock(&mem->objects.lock);
-
-       while ((obj = list_first_entry_or_null(&mem->objects.purgeable,
-                                              typeof(*obj),
-                                              mm.region_link))) {
-               list_move_tail(&obj->mm.region_link, &purgeable);
-
-               if (!i915_gem_object_has_pages(obj))
-                       continue;
-
-               if (i915_gem_object_is_framebuffer(obj))
-                       continue;
-
-               if (!kref_get_unless_zero(&obj->base.refcount))
-                       continue;
-
-               mutex_unlock(&mem->objects.lock);
-
-               if (!i915_gem_object_unbind(obj, 
I915_GEM_OBJECT_UNBIND_ACTIVE)) {
-                       if (i915_gem_object_trylock(obj)) {
-                               __i915_gem_object_put_pages(obj);
-                               if (!i915_gem_object_has_pages(obj)) {
-                                       purged += obj->base.size;
-                                       if (!i915_gem_object_is_volatile(obj))
-                                               obj->mm.madv = 
__I915_MADV_PURGED;
-                               }
-                               i915_gem_object_unlock(obj);
-                       }
-               }
-
-               i915_gem_object_put(obj);
-
-               mutex_lock(&mem->objects.lock);
-
-               if (purged >= target) {
-                       err = 0;
-                       break;
-               }
-       }
-
-       list_splice_tail(&purgeable, &mem->objects.purgeable);
-       mutex_unlock(&mem->objects.lock);
-       return err;
-}
-
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h 
b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
index c945f3b587d6..7c1e648a8b44 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -31,7 +31,5 @@ void i915_gem_driver_register__shrinker(struct 
drm_i915_private *i915);
 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
                                    struct mutex *mutex);
-int i915_gem_shrink_memory_region(struct intel_memory_region *mem,
-                                 resource_size_t target);
 
 #endif /* __I915_GEM_SHRINKER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bf67f323a1ae..85cbdb8e2bb8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1008,12 +1008,12 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void 
*data,
 
                switch (obj->mm.madv) {
                case I915_MADV_WILLNEED:
-                       list_move(&obj->mm.region_link,
-                                 &obj->mm.region->objects.list);
+                       list_move_tail(&obj->mm.region_link,
+                                      &obj->mm.region->objects.list);
                        break;
                default:
-                       list_move(&obj->mm.region_link,
-                                 &obj->mm.region->objects.purgeable);
+                       list_move_tail(&obj->mm.region_link,
+                                      &obj->mm.region->objects.purgeable);
                        break;
                }
 
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c 
b/drivers/gpu/drm/i915/intel_memory_region.c
index 371cd88ff6d8..185eab497803 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -3,6 +3,7 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include "gt/intel_gt_requests.h"
 #include "intel_memory_region.h"
 #include "i915_drv.h"
 
@@ -94,6 +95,90 @@ __intel_memory_region_put_block_buddy(struct 
i915_buddy_block *block)
        __intel_memory_region_put_pages_buddy(block->private, &blocks);
 }
 
+static int intel_memory_region_evict(struct intel_memory_region *mem,
+                                    resource_size_t target)
+{
+       struct drm_i915_private *i915 = mem->i915;
+       struct list_head still_in_list;
+       struct drm_i915_gem_object *obj;
+       struct list_head *phases[] = {
+               &mem->objects.purgeable,
+               &mem->objects.list,
+               NULL,
+       };
+       struct list_head **phase;
+       resource_size_t found;
+       int pass;
+
+       intel_gt_retire_requests(&i915->gt);
+
+       found = 0;
+       pass = 0;
+       phase = phases;
+
+next:
+       INIT_LIST_HEAD(&still_in_list);
+       mutex_lock(&mem->objects.lock);
+
+       while (found < target &&
+               (obj = list_first_entry_or_null(*phase,
+                                               typeof(*obj),
+                                               mm.region_link))) {
+               list_move_tail(&obj->mm.region_link, &still_in_list);
+
+               if (!i915_gem_object_has_pages(obj))
+                       continue;
+
+               if (i915_gem_object_is_framebuffer(obj))
+                       continue;
+
+               /*
+                * For IOMEM region, only swap user space objects.
+                * kernel objects are bound and causes a lot of unbind
+                * warning message in driver.
+                * FIXME: swap kernel object as well.
+                */
+               if (i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)
+                   && !obj->base.handle_count)
+                       continue;
+
+               if (!kref_get_unless_zero(&obj->base.refcount))
+                       continue;
+
+               mutex_unlock(&mem->objects.lock);
+
+               if (!i915_gem_object_unbind(obj, 
I915_GEM_OBJECT_UNBIND_ACTIVE)) {
+                       if (i915_gem_object_trylock(obj)) {
+                               __i915_gem_object_put_pages(obj);
+                               /* May arrive from get_pages on another bo */
+                               if (!i915_gem_object_has_pages(obj)) {
+                                       found += obj->base.size;
+                                       if (obj->mm.madv == I915_MADV_DONTNEED)
+                                               obj->mm.madv = 
__I915_MADV_PURGED;
+                               }
+                               i915_gem_object_unlock(obj);
+                       }
+               }
+
+               i915_gem_object_put(obj);
+               mutex_lock(&mem->objects.lock);
+
+               if (found >= target)
+                       break;
+       }
+       list_splice_tail(&still_in_list, *phase);
+       mutex_unlock(&mem->objects.lock);
+
+       if (found < target) {
+               pass++;
+               phase++;
+               if (*phase)
+                       goto next;
+       }
+
+       return (found < target) ? -ENOSPC : 0;
+}
+
 int
 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
                                      resource_size_t size,
@@ -137,7 +222,7 @@ __intel_memory_region_get_pages_buddy(struct 
intel_memory_region *mem,
        do {
                struct i915_buddy_block *block;
                unsigned int order;
-               bool retry = true;
+
 retry:
                order = min_t(u32, (fls(n_pages) - 1), max_order);
                GEM_BUG_ON(order > mem->mm.max_order);
@@ -152,19 +237,15 @@ __intel_memory_region_get_pages_buddy(struct 
intel_memory_region *mem,
                                resource_size_t target;
                                int err;
 
-                               if (!retry)
-                                       goto err_free_blocks;
-
                                target = n_pages * mem->mm.chunk_size;
 
                                mutex_unlock(&mem->mm_lock);
-                               err = i915_gem_shrink_memory_region(mem,
-                                                                   target);
+                               err = intel_memory_region_evict(mem,
+                                                               target);
                                mutex_lock(&mem->mm_lock);
                                if (err)
                                        goto err_free_blocks;
 
-                               retry = false;
                                goto retry;
                        }
                } while (1);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 9df0a4f657c1..4b007ed48d2f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1093,7 +1093,8 @@ static void igt_mark_evictable(struct drm_i915_gem_object 
*obj)
 {
        i915_gem_object_unpin_pages(obj);
        obj->mm.madv = I915_MADV_DONTNEED;
-       list_move(&obj->mm.region_link, &obj->mm.region->objects.purgeable);
+       list_move_tail(&obj->mm.region_link,
+                      &obj->mm.region->objects.purgeable);
 }
 
 static int igt_mock_shrink(void *arg)
-- 
2.26.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to