Before the change page_owner recursion was detected via fetching
backtrace and inspecting it for current instruction pointer.
It has a few problems:
- it is slightly slow as it requires extra backtrace and a linear
  stack scan of the result
- it is too late to check if backtrace fetching required memory
  allocation itself (ia64's unwinder requires it).

To simplify recursion tracking let's use page_owner recursion flag
in 'struct task_struct'.

The change make page_owner=on work on ia64 by avoiding infinite
recursion in:
  kmalloc()
  -> __set_page_owner()
  -> save_stack()
  -> unwind() [ia64-specific]
  -> build_script()
  -> kmalloc()
  -> __set_page_owner() [we short-circuit here]
  -> save_stack()
  -> unwind() [recursion]

CC: Ingo Molnar <[email protected]>
CC: Peter Zijlstra <[email protected]>
CC: Juri Lelli <[email protected]>
CC: Vincent Guittot <[email protected]>
CC: Dietmar Eggemann <[email protected]>
CC: Steven Rostedt <[email protected]>
CC: Ben Segall <[email protected]>
CC: Mel Gorman <[email protected]>
CC: Daniel Bristot de Oliveira <[email protected]>
CC: Andrew Morton <[email protected]>
CC: [email protected]
Signed-off-by: Sergei Trofimovich <[email protected]>
---
Change since v1:
- use bit from task_struct instead of a new field
- track only one recursion depth level so far

 include/linux/sched.h |  4 ++++
 mm/page_owner.c       | 32 ++++++++++----------------------
 2 files changed, 14 insertions(+), 22 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ef00bb22164c..00986450677c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -841,6 +841,10 @@ struct task_struct {
        /* Stalled due to lack of memory */
        unsigned                        in_memstall:1;
 #endif
+#ifdef CONFIG_PAGE_OWNER
+       /* Used by page_owner=on to detect recursion in page tracking. */
+       unsigned                        in_page_owner:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic 
access. */
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 7147fd34a948..64b2e4c6afb7 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -97,42 +97,30 @@ static inline struct page_owner *get_page_owner(struct 
page_ext *page_ext)
        return (void *)page_ext + page_owner_ops.offset;
 }
 
-static inline bool check_recursive_alloc(unsigned long *entries,
-                                        unsigned int nr_entries,
-                                        unsigned long ip)
-{
-       unsigned int i;
-
-       for (i = 0; i < nr_entries; i++) {
-               if (entries[i] == ip)
-                       return true;
-       }
-       return false;
-}
-
 static noinline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
        depot_stack_handle_t handle;
        unsigned int nr_entries;
 
-       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
-
        /*
-        * We need to check recursion here because our request to
-        * stackdepot could trigger memory allocation to save new
-        * entry. New memory allocation would reach here and call
-        * stack_depot_save_entries() again if we don't catch it. There is
-        * still not enough memory in stackdepot so it would try to
-        * allocate memory again and loop forever.
+        * Avoid recursion.
+        *
+        * Sometimes page metadata allocation tracking requires more
+        * memory to be allocated:
+        * - when new stack trace is saved to stack depot
+        * - when backtrace itself is calculated (ia64)
         */
-       if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
+       if (current->in_page_owner)
                return dummy_handle;
+       current->in_page_owner = 1;
 
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
        handle = stack_depot_save(entries, nr_entries, flags);
        if (!handle)
                handle = failure_handle;
 
+       current->in_page_owner = 0;
        return handle;
 }
 
-- 
2.31.1

Reply via email to