Re: [PATCH v10 08/11] drm/shmem-helper: Add memory shrinker

2023-02-26 Thread Dmitry Osipenko
On 2/17/23 16:19, Thomas Zimmermann wrote:
>> +/**
>> + * drm_gem_shmem_swap_in() - Moves shmem GEM back to memory and enables
>> + *   hardware access to the memory.
> 
> Do we have a better name than _swap_in()? I suggest
> drm_gem_shmem_unevict(), which suggest that it's the inverse to _evict().

The canonical naming scheme used by TTM and other DRM drivers is
_swapin(), without the underscore. I'll use that variant in v11 for the
naming consistency with the rest of DRM code.

-- 
Best regards,
Dmitry



Re: [PATCH v10 08/11] drm/shmem-helper: Add memory shrinker

2023-02-17 Thread Thomas Zimmermann

Hi

Am 08.01.23 um 22:04 schrieb Dmitry Osipenko:

Introduce common drm-shmem shrinker for DRM drivers.

To start using drm-shmem shrinker drivers should do the following:

1. Implement evict() callback of GEM object where driver should check
whether object is purgeable or evictable using drm-shmem helpers and
perform the shrinking action

2. Initialize drm-shmem internals using drmm_gem_shmem_init(drm_device),
which will register drm-shmem shrinker

3. Implement madvise IOCTL that will use drm_gem_shmem_madvise()


I left commenets beloew, but it's complicated code and a fairly large 
change. It there any chance of splitting this up in a meaningful way?




Signed-off-by: Daniel Almeida 
Signed-off-by: Dmitry Osipenko 
---
  drivers/gpu/drm/drm_gem_shmem_helper.c| 460 --
  .../gpu/drm/panfrost/panfrost_gem_shrinker.c  |   9 +-
  include/drm/drm_device.h  |  10 +-
  include/drm/drm_gem_shmem_helper.h|  61 ++-
  4 files changed, 490 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a1f2f2158c50..3ab5ec325ddb 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -20,6 +20,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  
@@ -128,6 +129,57 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t

  }
  EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
  
+static void drm_gem_shmem_resv_assert_held(struct drm_gem_shmem_object *shmem)

+{
+   /*
+* Destroying the object is a special case.. drm_gem_shmem_free()
+* calls many things that WARN_ON if the obj lock is not held.  But
+* acquiring the obj lock in drm_gem_shmem_free() can cause a locking
+* order inversion between reservation_ww_class_mutex and fs_reclaim.
+*
+* This deadlock is not actually possible, because no one should
+* be already holding the lock when msm_gem_free_object() is called.
+* Unfortunately lockdep is not aware of this detail.  So when the
+* refcount drops to zero, we pretend it is already locked.
+*/
+   if (kref_read(&shmem->base.refcount))
+   dma_resv_assert_held(shmem->base.resv);
+}
+
+static bool drm_gem_shmem_is_evictable(struct drm_gem_shmem_object *shmem)
+{
+   dma_resv_assert_held(shmem->base.resv);
+
+   return (shmem->madv >= 0) && shmem->base.funcs->evict &&
+   shmem->pages_use_count && !shmem->pages_pin_count &&
+   !shmem->base.dma_buf && !shmem->base.import_attach &&
+   shmem->sgt && !shmem->evicted;
+}
+
+static void
+drm_gem_shmem_update_pages_state(struct drm_gem_shmem_object *shmem)
+{
+   struct drm_gem_object *obj = &shmem->base;
+   struct drm_gem_shmem *shmem_mm = obj->dev->shmem_mm;
+   struct drm_gem_shmem_shrinker *gem_shrinker = &shmem_mm->shrinker;
+
+   drm_gem_shmem_resv_assert_held(shmem);
+
+   if (!gem_shrinker || obj->import_attach)
+   return;
+
+   if (shmem->madv < 0)
+   drm_gem_lru_remove(&shmem->base);
+   else if (drm_gem_shmem_is_evictable(shmem) || 
drm_gem_shmem_is_purgeable(shmem))
+   drm_gem_lru_move_tail(&gem_shrinker->lru_evictable, 
&shmem->base);
+   else if (shmem->evicted)
+   drm_gem_lru_move_tail(&gem_shrinker->lru_evicted, &shmem->base);
+   else if (!shmem->pages)
+   drm_gem_lru_remove(&shmem->base);
+   else
+   drm_gem_lru_move_tail(&gem_shrinker->lru_pinned, &shmem->base);
+}
+
  /**
   * drm_gem_shmem_free - Free resources associated with a shmem GEM object
   * @shmem: shmem GEM object to free
@@ -142,7 +194,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
-   dma_resv_lock(shmem->base.resv, NULL);
+   /* take out shmem GEM object from the memory shrinker */
+   drm_gem_shmem_madvise(shmem, -1);
  
  		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
  
@@ -152,12 +205,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)

sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
-   if (shmem->pages)
+   if (shmem->pages_use_count)
drm_gem_shmem_put_pages(shmem);
  
  		drm_WARN_ON(obj->dev, shmem->pages_use_count);

-
-   dma_resv_unlock(shmem->base.resv);
}
  
  	drm_gem_object_release(obj);

@@ -165,19 +216,31 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
*shmem)
  }
  EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
  
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)

+static int
+drm_gem_shmem_acquire_pages(struct drm_gem_shmem_object *shmem)
  {
struct drm_

[PATCH v10 08/11] drm/shmem-helper: Add memory shrinker

2023-01-08 Thread Dmitry Osipenko
Introduce common drm-shmem shrinker for DRM drivers.

To start using drm-shmem shrinker drivers should do the following:

1. Implement evict() callback of GEM object where driver should check
   whether object is purgeable or evictable using drm-shmem helpers and
   perform the shrinking action

2. Initialize drm-shmem internals using drmm_gem_shmem_init(drm_device),
   which will register drm-shmem shrinker

3. Implement madvise IOCTL that will use drm_gem_shmem_madvise()

Signed-off-by: Daniel Almeida 
Signed-off-by: Dmitry Osipenko 
---
 drivers/gpu/drm/drm_gem_shmem_helper.c| 460 --
 .../gpu/drm/panfrost/panfrost_gem_shrinker.c  |   9 +-
 include/drm/drm_device.h  |  10 +-
 include/drm/drm_gem_shmem_helper.h|  61 ++-
 4 files changed, 490 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a1f2f2158c50..3ab5ec325ddb 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -128,6 +129,57 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct 
drm_device *dev, size_t
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
 
+static void drm_gem_shmem_resv_assert_held(struct drm_gem_shmem_object *shmem)
+{
+   /*
+* Destroying the object is a special case.. drm_gem_shmem_free()
+* calls many things that WARN_ON if the obj lock is not held.  But
+* acquiring the obj lock in drm_gem_shmem_free() can cause a locking
+* order inversion between reservation_ww_class_mutex and fs_reclaim.
+*
+* This deadlock is not actually possible, because no one should
+* be already holding the lock when msm_gem_free_object() is called.
+* Unfortunately lockdep is not aware of this detail.  So when the
+* refcount drops to zero, we pretend it is already locked.
+*/
+   if (kref_read(&shmem->base.refcount))
+   dma_resv_assert_held(shmem->base.resv);
+}
+
+static bool drm_gem_shmem_is_evictable(struct drm_gem_shmem_object *shmem)
+{
+   dma_resv_assert_held(shmem->base.resv);
+
+   return (shmem->madv >= 0) && shmem->base.funcs->evict &&
+   shmem->pages_use_count && !shmem->pages_pin_count &&
+   !shmem->base.dma_buf && !shmem->base.import_attach &&
+   shmem->sgt && !shmem->evicted;
+}
+
+static void
+drm_gem_shmem_update_pages_state(struct drm_gem_shmem_object *shmem)
+{
+   struct drm_gem_object *obj = &shmem->base;
+   struct drm_gem_shmem *shmem_mm = obj->dev->shmem_mm;
+   struct drm_gem_shmem_shrinker *gem_shrinker = &shmem_mm->shrinker;
+
+   drm_gem_shmem_resv_assert_held(shmem);
+
+   if (!gem_shrinker || obj->import_attach)
+   return;
+
+   if (shmem->madv < 0)
+   drm_gem_lru_remove(&shmem->base);
+   else if (drm_gem_shmem_is_evictable(shmem) || 
drm_gem_shmem_is_purgeable(shmem))
+   drm_gem_lru_move_tail(&gem_shrinker->lru_evictable, 
&shmem->base);
+   else if (shmem->evicted)
+   drm_gem_lru_move_tail(&gem_shrinker->lru_evicted, &shmem->base);
+   else if (!shmem->pages)
+   drm_gem_lru_remove(&shmem->base);
+   else
+   drm_gem_lru_move_tail(&gem_shrinker->lru_pinned, &shmem->base);
+}
+
 /**
  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
  * @shmem: shmem GEM object to free
@@ -142,7 +194,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
-   dma_resv_lock(shmem->base.resv, NULL);
+   /* take out shmem GEM object from the memory shrinker */
+   drm_gem_shmem_madvise(shmem, -1);
 
drm_WARN_ON(obj->dev, shmem->vmap_use_count);
 
@@ -152,12 +205,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
*shmem)
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
-   if (shmem->pages)
+   if (shmem->pages_use_count)
drm_gem_shmem_put_pages(shmem);
 
drm_WARN_ON(obj->dev, shmem->pages_use_count);
-
-   dma_resv_unlock(shmem->base.resv);
}
 
drm_gem_object_release(obj);
@@ -165,19 +216,31 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
*shmem)
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
 
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int
+drm_gem_shmem_acquire_pages(struct drm_gem_shmem_object *shmem)
 {
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
 
-   if (shmem->pages_use_count++ > 0)
+   dma_resv_assert_held(shmem->base.resv);
+
+   if (shmem->madv < 0) {
+