[for-next][PATCH 24/33] ftrace: Have the function probes call their own function

2017-04-21 Thread Steven Rostedt
From: "Steven Rostedt (VMware)" 

Now that the function probes have their own ftrace_ops, there's no reason to
continue using the ftrace_func_hash to find which probe to call in the
function callback. The ops that is passed in to the function callback is
part of the probe_ops to call.

Signed-off-by: Steven Rostedt (VMware) 
---
 include/linux/ftrace.h |   4 +-
 kernel/trace/ftrace.c  | 225 +
 kernel/trace/trace.h   |   1 +
 3 files changed, 101 insertions(+), 129 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 774e7a95c201..6d2a63e4ea52 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -443,8 +443,8 @@ enum {
FTRACE_ITER_FILTER  = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL= (1 << 2),
-   FTRACE_ITER_DO_HASH = (1 << 3),
-   FTRACE_ITER_HASH= (1 << 4),
+   FTRACE_ITER_DO_PROBES   = (1 << 3),
+   FTRACE_ITER_PROBE   = (1 << 4),
FTRACE_ITER_ENABLED = (1 << 5),
 };
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cf6b7263199a..493c7ff7e860 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1096,14 +1096,7 @@ static bool update_all_ops;
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
 
-static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
-
-struct ftrace_func_probe {
-   struct hlist_node   node;
-   struct ftrace_probe_ops *ops;
-   unsigned long   ip;
-   struct list_headfree_list;
-};
+static LIST_HEAD(ftrace_func_probes);
 
 struct ftrace_func_entry {
struct hlist_node hlist;
@@ -1270,7 +1263,7 @@ static void
 remove_hash_entry(struct ftrace_hash *hash,
  struct ftrace_func_entry *entry)
 {
-   hlist_del(>hlist);
+   hlist_del_rcu(>hlist);
hash->count--;
 }
 
@@ -3063,35 +3056,58 @@ struct ftrace_iterator {
loff_t  func_pos;
struct ftrace_page  *pg;
struct dyn_ftrace   *func;
-   struct ftrace_func_probe*probe;
+   struct ftrace_probe_ops *probe;
+   struct ftrace_func_entry*probe_entry;
struct trace_parser parser;
struct ftrace_hash  *hash;
struct ftrace_ops   *ops;
-   int hidx;
+   int pidx;
int idx;
unsignedflags;
 };
 
 static void *
-t_hash_next(struct seq_file *m, loff_t *pos)
+t_probe_next(struct seq_file *m, loff_t *pos)
 {
struct ftrace_iterator *iter = m->private;
+   struct ftrace_hash *hash;
+   struct list_head *next;
struct hlist_node *hnd = NULL;
struct hlist_head *hhd;
+   int size;
 
(*pos)++;
iter->pos = *pos;
 
-   if (iter->probe)
-   hnd = >probe->node;
- retry:
-   if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+   if (list_empty(_func_probes))
return NULL;
 
-   hhd = _func_hash[iter->hidx];
+   if (!iter->probe) {
+   next = ftrace_func_probes.next;
+   iter->probe = list_entry(next, struct ftrace_probe_ops, list);
+   }
+
+   if (iter->probe_entry)
+   hnd = >probe_entry->hlist;
+
+   hash = iter->probe->ops.func_hash->filter_hash;
+   size = 1 << hash->size_bits;
+
+ retry:
+   if (iter->pidx >= size) {
+   if (iter->probe->list.next == _func_probes)
+   return NULL;
+   next = iter->probe->list.next;
+   iter->probe = list_entry(next, struct ftrace_probe_ops, list);
+   hash = iter->probe->ops.func_hash->filter_hash;
+   size = 1 << hash->size_bits;
+   iter->pidx = 0;
+   }
+
+   hhd = >buckets[iter->pidx];
 
if (hlist_empty(hhd)) {
-   iter->hidx++;
+   iter->pidx++;
hnd = NULL;
goto retry;
}
@@ -3101,7 +3117,7 @@ t_hash_next(struct seq_file *m, loff_t *pos)
else {
hnd = hnd->next;
if (!hnd) {
-   iter->hidx++;
+   iter->pidx++;
goto retry;
}
}
@@ -3109,26 +3125,28 @@ t_hash_next(struct seq_file *m, loff_t *pos)
if (WARN_ON_ONCE(!hnd))
return NULL;
 
-   iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
+   iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
 
return iter;
 }
 
-static void *t_hash_start(struct seq_file *m, loff_t *pos)
+static void *t_probe_start(struct seq_file *m, loff_t *pos)
 {
struct ftrace_iterator *iter = m->private;
void *p = NULL;
loff_t l;
 

[for-next][PATCH 24/33] ftrace: Have the function probes call their own function

2017-04-21 Thread Steven Rostedt
From: "Steven Rostedt (VMware)" 

Now that the function probes have their own ftrace_ops, there's no reason to
continue using the ftrace_func_hash to find which probe to call in the
function callback. The ops that is passed in to the function callback is
part of the probe_ops to call.

Signed-off-by: Steven Rostedt (VMware) 
---
 include/linux/ftrace.h |   4 +-
 kernel/trace/ftrace.c  | 225 +
 kernel/trace/trace.h   |   1 +
 3 files changed, 101 insertions(+), 129 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 774e7a95c201..6d2a63e4ea52 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -443,8 +443,8 @@ enum {
FTRACE_ITER_FILTER  = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL= (1 << 2),
-   FTRACE_ITER_DO_HASH = (1 << 3),
-   FTRACE_ITER_HASH= (1 << 4),
+   FTRACE_ITER_DO_PROBES   = (1 << 3),
+   FTRACE_ITER_PROBE   = (1 << 4),
FTRACE_ITER_ENABLED = (1 << 5),
 };
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index cf6b7263199a..493c7ff7e860 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1096,14 +1096,7 @@ static bool update_all_ops;
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
 
-static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
-
-struct ftrace_func_probe {
-   struct hlist_node   node;
-   struct ftrace_probe_ops *ops;
-   unsigned long   ip;
-   struct list_headfree_list;
-};
+static LIST_HEAD(ftrace_func_probes);
 
 struct ftrace_func_entry {
struct hlist_node hlist;
@@ -1270,7 +1263,7 @@ static void
 remove_hash_entry(struct ftrace_hash *hash,
  struct ftrace_func_entry *entry)
 {
-   hlist_del(>hlist);
+   hlist_del_rcu(>hlist);
hash->count--;
 }
 
@@ -3063,35 +3056,58 @@ struct ftrace_iterator {
loff_t  func_pos;
struct ftrace_page  *pg;
struct dyn_ftrace   *func;
-   struct ftrace_func_probe*probe;
+   struct ftrace_probe_ops *probe;
+   struct ftrace_func_entry*probe_entry;
struct trace_parser parser;
struct ftrace_hash  *hash;
struct ftrace_ops   *ops;
-   int hidx;
+   int pidx;
int idx;
unsignedflags;
 };
 
 static void *
-t_hash_next(struct seq_file *m, loff_t *pos)
+t_probe_next(struct seq_file *m, loff_t *pos)
 {
struct ftrace_iterator *iter = m->private;
+   struct ftrace_hash *hash;
+   struct list_head *next;
struct hlist_node *hnd = NULL;
struct hlist_head *hhd;
+   int size;
 
(*pos)++;
iter->pos = *pos;
 
-   if (iter->probe)
-   hnd = >probe->node;
- retry:
-   if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+   if (list_empty(_func_probes))
return NULL;
 
-   hhd = _func_hash[iter->hidx];
+   if (!iter->probe) {
+   next = ftrace_func_probes.next;
+   iter->probe = list_entry(next, struct ftrace_probe_ops, list);
+   }
+
+   if (iter->probe_entry)
+   hnd = >probe_entry->hlist;
+
+   hash = iter->probe->ops.func_hash->filter_hash;
+   size = 1 << hash->size_bits;
+
+ retry:
+   if (iter->pidx >= size) {
+   if (iter->probe->list.next == _func_probes)
+   return NULL;
+   next = iter->probe->list.next;
+   iter->probe = list_entry(next, struct ftrace_probe_ops, list);
+   hash = iter->probe->ops.func_hash->filter_hash;
+   size = 1 << hash->size_bits;
+   iter->pidx = 0;
+   }
+
+   hhd = >buckets[iter->pidx];
 
if (hlist_empty(hhd)) {
-   iter->hidx++;
+   iter->pidx++;
hnd = NULL;
goto retry;
}
@@ -3101,7 +3117,7 @@ t_hash_next(struct seq_file *m, loff_t *pos)
else {
hnd = hnd->next;
if (!hnd) {
-   iter->hidx++;
+   iter->pidx++;
goto retry;
}
}
@@ -3109,26 +3125,28 @@ t_hash_next(struct seq_file *m, loff_t *pos)
if (WARN_ON_ONCE(!hnd))
return NULL;
 
-   iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
+   iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
 
return iter;
 }
 
-static void *t_hash_start(struct seq_file *m, loff_t *pos)
+static void *t_probe_start(struct seq_file *m, loff_t *pos)
 {
struct ftrace_iterator *iter = m->private;
void *p = NULL;
loff_t l;
 
-   if (!(iter->flags &