Re: [PATCH bpf-next v3 9/9] tools/bpf: add a test for bpf_get_stack with tracepoint prog

2018-04-22 Thread Yonghong Song



On 4/22/18 5:27 PM, Alexei Starovoitov wrote:

On Fri, Apr 20, 2018 at 03:18:42PM -0700, Yonghong Song wrote:

The test_stacktrace_map and test_stacktrace_build_id are
enhanced to call bpf_get_stack in the helper to get the
stack trace as well.  The stack traces from bpf_get_stack
and bpf_get_stackid are compared to ensure that for the
same stack as represented as the same hash, their ip addresses
or build id's must be the same.

Signed-off-by: Yonghong Song 
---
  tools/testing/selftests/bpf/test_progs.c   | 63 +++---
  .../selftests/bpf/test_stacktrace_build_id.c   | 20 ++-
  tools/testing/selftests/bpf/test_stacktrace_map.c  | 20 +--
  3 files changed, 92 insertions(+), 11 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_progs.c 
b/tools/testing/selftests/bpf/test_progs.c
index dad4c3f..06b922a 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -897,11 +897,40 @@ static int compare_map_keys(int map1_fd, int map2_fd)
return 0;
  }
  
+static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)

+{
+   __u32 key, next_key, *cur_key_p, *next_key_p;
+   char val_buf1[stack_trace_len], val_buf2[stack_trace_len];


the kernel is trying to get rid of VLAs.
test_progs.c already uses them, but if possible let's not
add more uses of them.


okay, try to get rid of these two VLAs.


Other than that looks great.


Re: [PATCH bpf-next v3 9/9] tools/bpf: add a test for bpf_get_stack with tracepoint prog

2018-04-22 Thread Alexei Starovoitov
On Fri, Apr 20, 2018 at 03:18:42PM -0700, Yonghong Song wrote:
> The test_stacktrace_map and test_stacktrace_build_id are
> enhanced to call bpf_get_stack in the helper to get the
> stack trace as well.  The stack traces from bpf_get_stack
> and bpf_get_stackid are compared to ensure that for the
> same stack as represented as the same hash, their ip addresses
> or build id's must be the same.
> 
> Signed-off-by: Yonghong Song 
> ---
>  tools/testing/selftests/bpf/test_progs.c   | 63 
> +++---
>  .../selftests/bpf/test_stacktrace_build_id.c   | 20 ++-
>  tools/testing/selftests/bpf/test_stacktrace_map.c  | 20 +--
>  3 files changed, 92 insertions(+), 11 deletions(-)
> 
> diff --git a/tools/testing/selftests/bpf/test_progs.c 
> b/tools/testing/selftests/bpf/test_progs.c
> index dad4c3f..06b922a 100644
> --- a/tools/testing/selftests/bpf/test_progs.c
> +++ b/tools/testing/selftests/bpf/test_progs.c
> @@ -897,11 +897,40 @@ static int compare_map_keys(int map1_fd, int map2_fd)
>   return 0;
>  }
>  
> +static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
> +{
> + __u32 key, next_key, *cur_key_p, *next_key_p;
> + char val_buf1[stack_trace_len], val_buf2[stack_trace_len];

the kernel is trying to get rid of VLAs.
test_progs.c already uses them, but if possible let's not
add more uses of them.
Other than that looks great.



[PATCH bpf-next v3 9/9] tools/bpf: add a test for bpf_get_stack with tracepoint prog

2018-04-20 Thread Yonghong Song
The test_stacktrace_map and test_stacktrace_build_id are
enhanced to call bpf_get_stack in the helper to get the
stack trace as well.  The stack traces from bpf_get_stack
and bpf_get_stackid are compared to ensure that for the
same stack as represented as the same hash, their ip addresses
or build id's must be the same.

Signed-off-by: Yonghong Song 
---
 tools/testing/selftests/bpf/test_progs.c   | 63 +++---
 .../selftests/bpf/test_stacktrace_build_id.c   | 20 ++-
 tools/testing/selftests/bpf/test_stacktrace_map.c  | 20 +--
 3 files changed, 92 insertions(+), 11 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_progs.c 
b/tools/testing/selftests/bpf/test_progs.c
index dad4c3f..06b922a 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -897,11 +897,40 @@ static int compare_map_keys(int map1_fd, int map2_fd)
return 0;
 }
 
+static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
+{
+   __u32 key, next_key, *cur_key_p, *next_key_p;
+   char val_buf1[stack_trace_len], val_buf2[stack_trace_len];
+   int i, err;
+
+   cur_key_p = NULL;
+   next_key_p = 
+   while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
+   err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
+   if (err)
+   return err;
+   err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
+   if (err)
+   return err;
+   for (i = 0; i < stack_trace_len; i++) {
+   if (val_buf1[i] != val_buf2[i])
+   return -1;
+   }
+   key = *next_key_p;
+   cur_key_p = 
+   next_key_p = _key;
+   }
+   if (errno != ENOENT)
+   return -1;
+
+   return 0;
+}
+
 static void test_stacktrace_map()
 {
-   int control_map_fd, stackid_hmap_fd, stackmap_fd;
+   int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *file = "./test_stacktrace_map.o";
-   int bytes, efd, err, pmu_fd, prog_fd;
+   int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
struct perf_event_attr attr = {};
__u32 key, val, duration = 0;
struct bpf_object *obj;
@@ -957,6 +986,10 @@ static void test_stacktrace_map()
if (stackmap_fd < 0)
goto disable_pmu;
 
+   stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+   if (stack_amap_fd < 0)
+   goto disable_pmu;
+
/* give some time for bpf program run */
sleep(1);
 
@@ -978,6 +1011,12 @@ static void test_stacktrace_map()
  "err %d errno %d\n", err, errno))
goto disable_pmu_noerr;
 
+   stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+   err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+   if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno))
+   goto disable_pmu_noerr;
+
goto disable_pmu_noerr;
 disable_pmu:
error_cnt++;
@@ -1071,9 +1110,9 @@ static int extract_build_id(char *build_id, size_t size)
 
 static void test_stacktrace_build_id(void)
 {
-   int control_map_fd, stackid_hmap_fd, stackmap_fd;
+   int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
const char *file = "./test_stacktrace_build_id.o";
-   int bytes, efd, err, pmu_fd, prog_fd;
+   int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
struct perf_event_attr attr = {};
__u32 key, previous_key, val, duration = 0;
struct bpf_object *obj;
@@ -1138,6 +1177,11 @@ static void test_stacktrace_build_id(void)
  err, errno))
goto disable_pmu;
 
+   stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+   if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+   goto disable_pmu;
+
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
   == 0);
assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> 
/dev/null") == 0);
@@ -1189,8 +1233,15 @@ static void test_stacktrace_build_id(void)
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, _key, ) == 0);
 
-   CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map");
+   if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map"))
+   goto disable_pmu;
+
+   stack_trace_len = PERF_MAX_STACK_DEPTH
+   * sizeof(struct bpf_stack_build_id);
+   err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+   CHECK(err, "compare_stack_ips stackmap vs.