From: Steven Rostedt <[email protected]> The ret_stack (shadow stack used by function graph infrastructure) is created for every task on the system when function graph is enabled. Give it its own kmem_cache. This will make it easier to see how much memory is being used specifically for function graph shadow stacks.
In the future, this size may change and may not be a power of two. Having its own cache can also keep it from fragmenting memory. Signed-off-by: Steven Rostedt (Google) <[email protected]> --- kernel/trace/fgraph.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 51e81b299a0d..51849bc42d3a 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -172,6 +172,8 @@ enum { DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); int ftrace_graph_active; +static struct kmem_cache *fgraph_stack_cachep; + static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; static unsigned long fgraph_array_bitmask; @@ -1022,8 +1024,11 @@ static int alloc_retstack_tasklist(unsigned long **ret_stack_list) int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; struct task_struct *g, *t; + if (WARN_ON_ONCE(!fgraph_stack_cachep)) + return -ENOMEM; + for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { - ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); + ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); if (!ret_stack_list[i]) { start = 0; end = i; @@ -1054,7 +1059,7 @@ static int alloc_retstack_tasklist(unsigned long **ret_stack_list) rcu_read_unlock(); free: for (i = start; i < end; i++) - kfree(ret_stack_list[i]); + kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]); return ret; } @@ -1117,9 +1122,12 @@ void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) if (ftrace_graph_active) { unsigned long *ret_stack; + if (WARN_ON_ONCE(!fgraph_stack_cachep)) + return; + ret_stack = per_cpu(idle_ret_stack, cpu); if (!ret_stack) { - ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); + ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); if (!ret_stack) return; per_cpu(idle_ret_stack, cpu) = ret_stack; @@ -1139,7 +1147,10 @@ void ftrace_graph_init_task(struct task_struct *t) if (ftrace_graph_active) { unsigned long *ret_stack; - ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); + if (WARN_ON_ONCE(!fgraph_stack_cachep)) + return; + + ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); if (!ret_stack) return; graph_init_task(t, ret_stack); @@ -1154,7 +1165,11 @@ void ftrace_graph_exit_task(struct task_struct *t) /* NULL must become visible to IRQs before we free it: */ barrier(); - kfree(ret_stack); + if (ret_stack) { + if (WARN_ON_ONCE(!fgraph_stack_cachep)) + return; + kmem_cache_free(fgraph_stack_cachep, ret_stack); + } } #ifdef CONFIG_DYNAMIC_FTRACE @@ -1290,6 +1305,16 @@ int register_ftrace_graph(struct fgraph_ops *gops) mutex_lock(&ftrace_lock); + if (!fgraph_stack_cachep) { + fgraph_stack_cachep = kmem_cache_create("fgraph_stack", + SHADOW_STACK_SIZE, + SHADOW_STACK_SIZE, 0, NULL); + if (!fgraph_stack_cachep) { + ret = -ENOMEM; + goto out; + } + } + if (!fgraph_array[0]) { /* The array must always have real data on it */ for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) -- 2.45.2
