On contiguous allocation, we round up the size
to the *next* power of 2, implement a function
to free the unused pages after the newly allocate block.

v2(Matthew Auld):
  - replace function name 'drm_buddy_free_unused_pages' with
    drm_buddy_block_trim
  - replace input argument name 'actual_size' with 'new_size'
  - add more validation checks for input arguments
  - add overlaps check to avoid needless searching and splitting
  - merged the below patch to see the feature in action
    - add free unused pages support to i915 driver
  - lock drm_buddy_block_trim() function as it calls mark_free/mark_split
    are all globally visible

v3:
  - remove drm_buddy_block_trim() error handling and
    print a warn message if it fails

Signed-off-by: Arunpravin <arunpravin.paneersel...@amd.com>
---
 drivers/gpu/drm/drm_buddy.c                   | 72 ++++++++++++++++++-
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 10 +++
 include/drm/drm_buddy.h                       |  4 ++
 3 files changed, 83 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index eddc1eeda02e..707efc82216d 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -434,7 +434,8 @@ alloc_from_freelist(struct drm_buddy_mm *mm,
 static int __alloc_range(struct drm_buddy_mm *mm,
                         struct list_head *dfs,
                         u64 start, u64 size,
-                        struct list_head *blocks)
+                        struct list_head *blocks,
+                        bool trim_path)
 {
        struct drm_buddy_block *block;
        struct drm_buddy_block *buddy;
@@ -480,8 +481,20 @@ static int __alloc_range(struct drm_buddy_mm *mm,
 
                if (!drm_buddy_block_is_split(block)) {
                        err = split_block(mm, block);
-                       if (unlikely(err))
+                       if (unlikely(err)) {
+                               if (trim_path)
+                                       /*
+                                        * Here in case of trim, we return and 
dont goto
+                                        * split failure path as it removes 
from the
+                                        * original list and potentially also 
freeing
+                                        * the block. so we could leave as it 
is,
+                                        * worse case we get some internal 
fragmentation
+                                        * and leave the decision to the user
+                                        */
+                                       return err;
+
                                goto err_undo;
+                       }
                }
 
                list_add(&block->right->tmp_link, dfs);
@@ -535,8 +548,61 @@ static int __drm_buddy_alloc_range(struct drm_buddy_mm *mm,
        for (i = 0; i < mm->n_roots; ++i)
                list_add_tail(&mm->roots[i]->tmp_link, &dfs);
 
-       return __alloc_range(mm, &dfs, start, size, blocks);
+       return __alloc_range(mm, &dfs, start, size, blocks, 0);
+}
+
+/**
+ * drm_buddy_block_trim - free unused pages
+ *
+ * @mm: DRM buddy manager
+ * @new_size: original size requested
+ * @blocks: output list head to add allocated blocks
+ *
+ * For contiguous allocation, we round up the size to the nearest
+ * power of two value, drivers consume *actual* size, so remaining
+ * portions are unused and it can be freed.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_buddy_block_trim(struct drm_buddy_mm *mm,
+                        u64 new_size,
+                        struct list_head *blocks)
+{
+       struct drm_buddy_block *block;
+       u64 new_start;
+       LIST_HEAD(dfs);
+
+       if (!list_is_singular(blocks))
+               return -EINVAL;
+
+       block = list_first_entry(blocks,
+                                struct drm_buddy_block,
+                                link);
+
+       if (!drm_buddy_block_is_allocated(block))
+               return -EINVAL;
+
+       if (new_size > drm_buddy_block_size(mm, block))
+               return -EINVAL;
+
+       if (!new_size && !IS_ALIGNED(new_size, mm->chunk_size))
+               return -EINVAL;
+
+       if (new_size == drm_buddy_block_size(mm, block))
+               return 0;
+
+       list_del(&block->link);
+
+       new_start = drm_buddy_block_offset(block);
+
+       mark_free(mm, block);
+
+       list_add(&block->tmp_link, &dfs);
+
+       return __alloc_range(mm, &dfs, new_start, new_size, blocks, 1);
 }
+EXPORT_SYMBOL(drm_buddy_block_trim);
 
 /**
  * drm_buddy_alloc - allocate power-of-two blocks
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c 
b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 7c58efb60dba..c5831c27fe82 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -97,6 +97,16 @@ static int i915_ttm_buddy_man_alloc(struct 
ttm_resource_manager *man,
        if (unlikely(err))
                goto err_free_blocks;
 
+       if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+               mutex_lock(&bman->lock);
+               err = drm_buddy_block_trim(mm,
+                               (u64)n_pages << PAGE_SHIFT,
+                               &bman_res->blocks);
+               mutex_unlock(&bman->lock);
+               pr_warn("drm_buddy_block_trim failed returing %d for 
ttm_buffer_object(%p)\n",
+                       err, bo);
+       }
+
        *res = &bman_res->base;
        return 0;
 
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 316ac0d25f08..90906d9dbbf0 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -146,6 +146,10 @@ int drm_buddy_alloc(struct drm_buddy_mm *mm,
                    struct list_head *blocks,
                    unsigned long flags);
 
+int drm_buddy_block_trim(struct drm_buddy_mm *mm,
+                        u64 new_size,
+                        struct list_head *blocks);
+
 void drm_buddy_free(struct drm_buddy_mm *mm, struct drm_buddy_block *block);
 
 void drm_buddy_free_list(struct drm_buddy_mm *mm, struct list_head *objects);
-- 
2.25.1

Reply via email to