Commit-ID:  af52bf6b92f7d8783c1e712cad6ef7d37cd773b2
Gitweb:     https://git.kernel.org/tip/af52bf6b92f7d8783c1e712cad6ef7d37cd773b2
Author:     Thomas Gleixner <t...@linutronix.de>
AuthorDate: Thu, 25 Apr 2019 11:45:03 +0200
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Mon, 29 Apr 2019 12:37:50 +0200

mm/page_owner: Simplify stack trace handling

Replace the indirection through struct stack_trace by using the storage
array based interfaces.

The original code in all printing functions is really wrong. It allocates a
storage array on stack which is unused because depot_fetch_stack() does not
store anything in it. It overwrites the entries pointer in the stack_trace
struct so it points to the depot storage.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Reviewed-by: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: linux...@kvack.org
Cc: Mike Rapoport <r...@linux.vnet.ibm.com>
Cc: David Rientjes <rient...@google.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Steven Rostedt <rost...@goodmis.org>
Cc: Alexander Potapenko <gli...@google.com>
Cc: Alexey Dobriyan <adobri...@gmail.com>
Cc: Christoph Lameter <c...@linux.com>
Cc: Pekka Enberg <penb...@kernel.org>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Dmitry Vyukov <dvyu...@google.com>
Cc: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: kasan-...@googlegroups.com
Cc: Akinobu Mita <akinobu.m...@gmail.com>
Cc: Christoph Hellwig <h...@lst.de>
Cc: io...@lists.linux-foundation.org
Cc: Robin Murphy <robin.mur...@arm.com>
Cc: Marek Szyprowski <m.szyprow...@samsung.com>
Cc: Johannes Thumshirn <jthumsh...@suse.de>
Cc: David Sterba <dste...@suse.com>
Cc: Chris Mason <c...@fb.com>
Cc: Josef Bacik <jo...@toxicpanda.com>
Cc: linux-bt...@vger.kernel.org
Cc: dm-de...@redhat.com
Cc: Mike Snitzer <snit...@redhat.com>
Cc: Alasdair Kergon <a...@redhat.com>
Cc: Daniel Vetter <dan...@ffwll.ch>
Cc: intel-...@lists.freedesktop.org
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
Cc: dri-de...@lists.freedesktop.org
Cc: David Airlie <airl...@linux.ie>
Cc: Jani Nikula <jani.nik...@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.v...@intel.com>
Cc: Tom Zanussi <tom.zanu...@linux.intel.com>
Cc: Miroslav Benes <mbe...@suse.cz>
Cc: linux-a...@vger.kernel.org
Link: https://lkml.kernel.org/r/20190425094802.067210...@linutronix.de

---
 mm/page_owner.c | 79 ++++++++++++++++++++-------------------------------------
 1 file changed, 28 insertions(+), 51 deletions(-)

diff --git a/mm/page_owner.c b/mm/page_owner.c
index df277e6bc3c6..addcbb2ae4e4 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -58,15 +58,10 @@ static bool need_page_owner(void)
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
 {
        unsigned long entries[4];
-       struct stack_trace dummy;
+       unsigned int nr_entries;
 
-       dummy.nr_entries = 0;
-       dummy.max_entries = ARRAY_SIZE(entries);
-       dummy.entries = &entries[0];
-       dummy.skip = 0;
-
-       save_stack_trace(&dummy);
-       return depot_save_stack(&dummy, GFP_KERNEL);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+       return stack_depot_save(entries, nr_entries, GFP_KERNEL);
 }
 
 static noinline void register_dummy_stack(void)
@@ -120,46 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int 
order)
        }
 }
 
-static inline bool check_recursive_alloc(struct stack_trace *trace,
-                                       unsigned long ip)
+static inline bool check_recursive_alloc(unsigned long *entries,
+                                        unsigned int nr_entries,
+                                        unsigned long ip)
 {
-       int i;
-
-       if (!trace->nr_entries)
-               return false;
+       unsigned int i;
 
-       for (i = 0; i < trace->nr_entries; i++) {
-               if (trace->entries[i] == ip)
+       for (i = 0; i < nr_entries; i++) {
+               if (entries[i] == ip)
                        return true;
        }
-
        return false;
 }
 
 static noinline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 2
-       };
        depot_stack_handle_t handle;
+       unsigned int nr_entries;
 
-       save_stack_trace(&trace);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
 
        /*
-        * We need to check recursion here because our request to stackdepot
-        * could trigger memory allocation to save new entry. New memory
-        * allocation would reach here and call depot_save_stack() again
-        * if we don't catch it. There is still not enough memory in stackdepot
-        * so it would try to allocate memory again and loop forever.
+        * We need to check recursion here because our request to
+        * stackdepot could trigger memory allocation to save new
+        * entry. New memory allocation would reach here and call
+        * stack_depot_save_entries() again if we don't catch it. There is
+        * still not enough memory in stackdepot so it would try to
+        * allocate memory again and loop forever.
         */
-       if (check_recursive_alloc(&trace, _RET_IP_))
+       if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
                return dummy_handle;
 
-       handle = depot_save_stack(&trace, flags);
+       handle = stack_depot_save(entries, nr_entries, flags);
        if (!handle)
                handle = failure_handle;
 
@@ -337,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned 
long pfn,
                struct page *page, struct page_owner *page_owner,
                depot_stack_handle_t handle)
 {
-       int ret;
-       int pageblock_mt, page_mt;
+       int ret, pageblock_mt, page_mt;
+       unsigned long *entries;
+       unsigned int nr_entries;
        char *kbuf;
-       unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 0
-       };
 
        count = min_t(size_t, count, PAGE_SIZE);
        kbuf = kmalloc(count, GFP_KERNEL);
@@ -375,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned 
long pfn,
        if (ret >= count)
                goto err;
 
-       depot_fetch_stack(handle, &trace);
-       ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
+       nr_entries = stack_depot_fetch(handle, &entries);
+       ret += stack_trace_snprint(kbuf + ret, count - ret, entries, 
nr_entries, 0);
        if (ret >= count)
                goto err;
 
@@ -407,14 +389,9 @@ void __dump_page_owner(struct page *page)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
        struct page_owner *page_owner;
-       unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 0
-       };
        depot_stack_handle_t handle;
+       unsigned long *entries;
+       unsigned int nr_entries;
        gfp_t gfp_mask;
        int mt;
 
@@ -438,10 +415,10 @@ void __dump_page_owner(struct page *page)
                return;
        }
 
-       depot_fetch_stack(handle, &trace);
+       nr_entries = stack_depot_fetch(handle, &entries);
        pr_alert("page allocated via order %u, migratetype %s, gfp_mask 
%#x(%pGg)\n",
                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-       print_stack_trace(&trace, 0);
+       stack_trace_print(entries, nr_entries, 0);
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",

Reply via email to