This patch provides infrastructure for deferring buffer frees.

This is a feature ION provided which when used with some form
of a page pool, provides a nice performance boost in an
allocation microbenchmark. The reason it helps is it allows the
page-zeroing to be done out of the normal allocation/free path,
and pushed off to a kthread.

As not all heaps will find this useful, its implemented as
a optional helper library that heaps can utilize.

Cc: Daniel Vetter <dan...@ffwll.ch>
Cc: Sumit Semwal <sumit.sem...@linaro.org>
Cc: Liam Mark <lm...@codeaurora.org>
Cc: Chris Goldsworthy <cgold...@codeaurora.org>
Cc: Laura Abbott <labb...@kernel.org>
Cc: Brian Starkey <brian.star...@arm.com>
Cc: Hridya Valsaraju <hri...@google.com>
Cc: Suren Baghdasaryan <sur...@google.com>
Cc: Sandeep Patil <sspa...@google.com>
Cc: Daniel Mentz <danielme...@google.com>
Cc: Ørjan Eide <orjan.e...@arm.com>
Cc: Robin Murphy <robin.mur...@arm.com>
Cc: Ezequiel Garcia <ezequ...@collabora.com>
Cc: Simon Ser <cont...@emersion.fr>
Cc: James Jones <jajo...@nvidia.com>
Cc: linux-me...@vger.kernel.org
Cc: dri-de...@lists.freedesktop.org
Signed-off-by: John Stultz <john.stu...@linaro.org>
---
v2:
* Fix sleep in atomic issue from using a mutex, by switching
  to a spinlock as Reported-by: kernel test robot <oliver.s...@intel.com>
* Cleanup API to use a reason enum for clarity and add some documentation
  comments as suggested by Suren Baghdasaryan.

v3:
* Minor tweaks so it can be built as a module
* A few small fixups suggested by Daniel Mentz

v4:
* Tweak from Daniel Mentz to make sure the shrinker
  count/freed values are tracked in pages not bytes
---
 drivers/dma-buf/heaps/Kconfig                |   3 +
 drivers/dma-buf/heaps/Makefile               |   1 +
 drivers/dma-buf/heaps/deferred-free-helper.c | 138 +++++++++++++++++++
 drivers/dma-buf/heaps/deferred-free-helper.h |  55 ++++++++
 4 files changed, 197 insertions(+)
 create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.c
 create mode 100644 drivers/dma-buf/heaps/deferred-free-helper.h

diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06c4226..f7aef8bc7119 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -1,3 +1,6 @@
+config DMABUF_HEAPS_DEFERRED_FREE
+       tristate
+
 config DMABUF_HEAPS_SYSTEM
        bool "DMA-BUF System Heap"
        depends on DMABUF_HEAPS
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 974467791032..4e7839875615 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)      += system_heap.o
 obj-$(CONFIG_DMABUF_HEAPS_CMA)         += cma_heap.o
diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c 
b/drivers/dma-buf/heaps/deferred-free-helper.c
new file mode 100644
index 000000000000..0ba02de9dc1c
--- /dev/null
+++ b/drivers/dma-buf/heaps/deferred-free-helper.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Deferred dmabuf freeing helper
+ *
+ * Copyright (C) 2020 Linaro, Ltd.
+ *
+ * Based on the ION page pool code
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/freezer.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/sched/signal.h>
+
+#include "deferred-free-helper.h"
+
+static LIST_HEAD(free_list);
+static size_t list_size_pages;
+wait_queue_head_t freelist_waitqueue;
+struct task_struct *freelist_task;
+static DEFINE_SPINLOCK(free_list_lock);
+
+void deferred_free(struct deferred_freelist_item *item,
+                  void (*free)(struct deferred_freelist_item*,
+                               enum df_reason),
+                  size_t size)
+{
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&item->list);
+       item->size = size;
+       item->free = free;
+
+       spin_lock_irqsave(&free_list_lock, flags);
+       list_add(&item->list, &free_list);
+       list_size_pages += size >> PAGE_SHIFT;
+       spin_unlock_irqrestore(&free_list_lock, flags);
+       wake_up(&freelist_waitqueue);
+}
+EXPORT_SYMBOL_GPL(deferred_free);
+
+static size_t free_one_item(enum df_reason reason)
+{
+       unsigned long flags;
+       size_t size = 0;
+       struct deferred_freelist_item *item;
+
+       spin_lock_irqsave(&free_list_lock, flags);
+       if (list_empty(&free_list)) {
+               spin_unlock_irqrestore(&free_list_lock, flags);
+               return 0;
+       }
+       item = list_first_entry(&free_list, struct deferred_freelist_item, 
list);
+       list_del(&item->list);
+       size = item->size;
+       list_size_pages -= size >> PAGE_SHIFT;
+       spin_unlock_irqrestore(&free_list_lock, flags);
+
+       item->free(item, reason);
+       return size >> PAGE_SHIFT;
+}
+
+static unsigned long get_freelist_size_pages(void)
+{
+       unsigned long size;
+       unsigned long flags;
+
+       spin_lock_irqsave(&free_list_lock, flags);
+       size = list_size_pages;
+       spin_unlock_irqrestore(&free_list_lock, flags);
+       return size;
+}
+
+static unsigned long freelist_shrink_count(struct shrinker *shrinker,
+                                          struct shrink_control *sc)
+{
+       return get_freelist_size_pages();
+}
+
+static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
+                                         struct shrink_control *sc)
+{
+       unsigned long total_freed = 0;
+
+       if (sc->nr_to_scan == 0)
+               return 0;
+
+       while (total_freed < sc->nr_to_scan) {
+               size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
+
+               if (!pages_freed)
+                       break;
+
+               total_freed += pages_freed;
+       }
+
+       return total_freed;
+}
+
+static struct shrinker freelist_shrinker = {
+       .count_objects = freelist_shrink_count,
+       .scan_objects = freelist_shrink_scan,
+       .seeks = DEFAULT_SEEKS,
+       .batch = 0,
+};
+
+static int deferred_free_thread(void *data)
+{
+       while (true) {
+               wait_event_freezable(freelist_waitqueue,
+                                    get_freelist_size_pages() > 0);
+
+               free_one_item(DF_NORMAL);
+       }
+
+       return 0;
+}
+
+static int deferred_freelist_init(void)
+{
+       list_size_pages = 0;
+
+       init_waitqueue_head(&freelist_waitqueue);
+       freelist_task = kthread_run(deferred_free_thread, NULL,
+                                   "%s", "dmabuf-deferred-free-worker");
+       if (IS_ERR(freelist_task)) {
+               pr_err("Creating thread for deferred free failed\n");
+               return -1;
+       }
+       sched_set_normal(freelist_task, 19);
+
+       return register_shrinker(&freelist_shrinker);
+}
+module_init(deferred_freelist_init);
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h 
b/drivers/dma-buf/heaps/deferred-free-helper.h
new file mode 100644
index 000000000000..18b44ac86ef6
--- /dev/null
+++ b/drivers/dma-buf/heaps/deferred-free-helper.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef DEFERRED_FREE_HELPER_H
+#define DEFERRED_FREE_HELPER_H
+
+/**
+ * df_reason - enum for reason why item was freed
+ *
+ * This provides a reason for why the free function was called
+ * on the item. This is useful when deferred_free is used in
+ * combination with a pagepool, so under pressure the page can
+ * be immediately freed.
+ *
+ * DF_NORMAL:         Normal deferred free
+ *
+ * DF_UNDER_PRESSURE: Free was called because the system
+ *                    is under memory pressure. Usually
+ *                    from a shrinker. Avoid allocating
+ *                    memory in the free call, as it may
+ *                    fail.
+ */
+enum df_reason {
+       DF_NORMAL,
+       DF_UNDER_PRESSURE,
+};
+
+/**
+ * deferred_freelist_item - item structure for deferred freelist
+ *
+ * This is to be added to the structure for whatever you want to
+ * defer freeing on.
+ *
+ * @size: size of the item to be freed
+ * @free: function pointer to be called when freeing the item
+ * @list: list entry for the deferred list
+ */
+struct deferred_freelist_item {
+       size_t size;
+       void (*free)(struct deferred_freelist_item *i,
+                    enum df_reason reason);
+       struct list_head list;
+};
+
+/**
+ * deferred_free - call to add item to the deferred free list
+ *
+ * @item: Pointer to deferred_freelist_item field of a structure
+ * @free: Function pointer to the free call
+ * @size: Size of the item to be freed
+ */
+void deferred_free(struct deferred_freelist_item *item,
+                  void (*free)(struct deferred_freelist_item *i,
+                               enum df_reason reason),
+                  size_t size);
+#endif
-- 
2.25.1

Reply via email to