[PATCH bpf-next v3 1/7] samples: bpf: refactor hbm program with libbpf

2020-11-24 Thread Daniel T. Lee
This commit refactors the existing cgroup programs with libbpf
bpf loader. Since bpf_program__attach doesn't support cgroup program
attachment, this explicitly attaches cgroup bpf program with
bpf_program__attach_cgroup(bpf_prog, cg1).

Also, to change attach_type of bpf program, this uses libbpf's
bpf_program__set_expected_attach_type helper to switch EGRESS to
INGRESS. To keep bpf program attached to the cgroup hierarchy even
after the exit, this commit uses the BPF_LINK_PINNING to pin the link
attachment even after it is closed.

Besides, this program was broken due to the typo of BPF MAP definition.
But this commit solves the problem by fixing this from 'queue_stats' map
struct hvm_queue_stats -> hbm_queue_stats.

Fixes: 36b5d471135c ("selftests/bpf: samples/bpf: Split off legacy stuff from 
bpf_helpers.h")
Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - restore read_trace_pipe2
 - remove unnecessary return code and cgroup fd compare
 - add static at global variable and remove unused variable
 - change cgroup path with unified controller (/unified/)
 - add link pinning to prevent cleaning up on process exit

Changes in v3:
 - cleanup bpf_link, bpf_object and cgroup fd both on success and error
 - remove link NULL cleanup since __destroy() can handle
 - fix cgroup test on cgroup fd cleanup
 
 samples/bpf/.gitignore |   3 +
 samples/bpf/Makefile   |   2 +-
 samples/bpf/do_hbm_test.sh |  32 +--
 samples/bpf/hbm.c  | 111 -
 samples/bpf/hbm_kern.h |   2 +-
 5 files changed, 78 insertions(+), 72 deletions(-)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index b2f29bc8dc43..0b9548ea8477 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -52,3 +52,6 @@ xdp_tx_iptunnel
 xdpsock
 xsk_fwd
 testfile.img
+hbm_out.log
+iperf.*
+*.out
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index aeebf5d12f32..7c61118525f7 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -110,7 +110,7 @@ xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
-hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
+hbm-objs := hbm.o $(CGROUP_HELPERS)
 
 # Tell kbuild to always build the programs
 always-y := $(tprogs-y)
diff --git a/samples/bpf/do_hbm_test.sh b/samples/bpf/do_hbm_test.sh
index ffe4c0607341..21790ea5c460 100755
--- a/samples/bpf/do_hbm_test.sh
+++ b/samples/bpf/do_hbm_test.sh
@@ -91,6 +91,16 @@ qdisc=""
 flags=""
 do_stats=0
 
+BPFFS=/sys/fs/bpf
+function config_bpffs () {
+   if mount | grep $BPFFS > /dev/null; then
+   echo "bpffs already mounted"
+   else
+   echo "bpffs not mounted. Mounting..."
+   mount -t bpf none $BPFFS
+   fi
+}
+
 function start_hbm () {
   rm -f hbm.out
   echo "./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog" > hbm.out
@@ -192,6 +202,7 @@ processArgs () {
 }
 
 processArgs
+config_bpffs
 
 if [ $debug_flag -eq 1 ] ; then
   rm -f hbm_out.log
@@ -201,7 +212,7 @@ hbm_pid=$(start_hbm)
 usleep 10
 
 host=`hostname`
-cg_base_dir=/sys/fs/cgroup
+cg_base_dir=/sys/fs/cgroup/unified
 cg_dir="$cg_base_dir/cgroup-test-work-dir/hbm$id"
 
 echo $$ >> $cg_dir/cgroup.procs
@@ -411,23 +422,8 @@ fi
 
 sleep 1
 
-# Detach any BPF programs that may have lingered
-ttx=`bpftool cgroup tree | grep hbm`
-v=2
-for x in $ttx ; do
-if [ "${x:0:36}" == "/sys/fs/cgroup/cgroup-test-work-dir/" ] ; then
-   cg=$x ; v=0
-else
-   if [ $v -eq 0 ] ; then
-   id=$x ; v=1
-   else
-   if [ $v -eq 1 ] ; then
-   type=$x ; bpftool cgroup detach $cg $type id $id
-   v=0
-   fi
-   fi
-fi
-done
+# Detach any pinned BPF programs that may have lingered
+rm -rf $BPFFS/hbm*
 
 if [ $use_netperf -ne 0 ] ; then
   if [ "$server" == "" ] ; then
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 400e741a56eb..b0c18efe7928 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -46,7 +46,6 @@
 #include 
 #include 
 
-#include "bpf_load.h"
 #include "bpf_rlimit.h"
 #include "cgroup_helpers.h"
 #include "hbm.h"
@@ -70,9 +69,9 @@ static void do_error(char *msg, bool errno_flag);
 
 #define DEBUGFS "/sys/kernel/debug/tracing/"
 
-struct bpf_object *obj;
-int bpfprog_fd;
-int cgroup_storage_fd;
+static struct bpf_program *bpf_prog;
+static struct bpf_object *obj;
+static int queue_stats_fd;
 
 static void read_trace_pipe2(void)
 {
@@ -121,56 +120,50 @@ static void do_error(char *msg, bool errno_flag)
 
 static int prog_load(char *prog)
 {
-   struct bpf_prog_load_attr prog_load_attr = {
-   .prog_type = BPF_PROG_TYPE_CGROUP

[PATCH bpf-next v3 4/7] samples: bpf: refactor ibumad program with libbpf

2020-11-24 Thread Daniel T. Lee
This commit refactors the existing ibumad program with libbpf bpf
loader. Attach/detach of Tracepoint bpf programs has been managed
with the generic bpf_program__attach() and bpf_link__destroy() from
the libbpf.

Also, instead of using the previous BPF MAP definition, this commit
refactors ibumad MAP definition with the new BTF-defined MAP format.

To verify that this bpf program works without an infiniband device,
try loading ib_umad kernel module and test the program as follows:

# modprobe ib_umad
# ./ibumad

Moreover, TRACE_HELPERS has been removed from the Makefile since it is
not used on this program.

Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - add static at global variable and drop {}
 - fix return error code on exit
 
 samples/bpf/Makefile  |  2 +-
 samples/bpf/ibumad_kern.c | 26 +++---
 samples/bpf/ibumad_user.c | 71 +--
 3 files changed, 68 insertions(+), 31 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3bffd42e1482..09a249477554 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -109,7 +109,7 @@ xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
-ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
+ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS)
 
 # Tell kbuild to always build the programs
diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
index 3a91b4c1989a..26dcd4dde946 100644
--- a/samples/bpf/ibumad_kern.c
+++ b/samples/bpf/ibumad_kern.c
@@ -16,19 +16,19 @@
 #include 
 
 
-struct bpf_map_def SEC("maps") read_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads read */
-   .max_entries = 256, /* Room for all Classes */
-};
-
-struct bpf_map_def SEC("maps") write_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads written */
-   .max_entries = 256, /* Room for all Classes */
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads read */
+   __uint(max_entries, 256); /* Room for all Classes */
+} read_count SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads written */
+   __uint(max_entries, 256); /* Room for all Classes */
+} write_count SEC(".maps");
 
 #undef DEBUG
 #ifndef DEBUG
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
index fa06eef31a84..d83d8102f489 100644
--- a/samples/bpf/ibumad_user.c
+++ b/samples/bpf/ibumad_user.c
@@ -23,10 +23,15 @@
 #include 
 #include 
 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 #include 
 
+static struct bpf_link *tp_links[3];
+static struct bpf_object *obj;
+static int map_fd[2];
+static int tp_cnt;
+
 static void dump_counts(int fd)
 {
__u32 key;
@@ -53,6 +58,11 @@ static void dump_all_counts(void)
 static void dump_exit(int sig)
 {
dump_all_counts();
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   bpf_object__close(obj);
exit(0);
 }
 
@@ -73,19 +83,11 @@ static void usage(char *cmd)
 
 int main(int argc, char **argv)
 {
+   struct bpf_program *prog;
unsigned long delay = 5;
+   char filename[256];
int longindex = 0;
-   int opt;
-   char bpf_file[256];
-
-   /* Create the eBPF kernel code path name.
-* This follows the pattern of all of the other bpf samples
-*/
-   snprintf(bpf_file, sizeof(bpf_file), "%s_kern.o", argv[0]);
-
-   /* Do one final dump when exiting */
-   signal(SIGINT, dump_exit);
-   signal(SIGTERM, dump_exit);
+   int opt, err = -1;
 
while ((opt = getopt_long(argc, argv, "hd:rSw",
  long_options, &longindex)) != -1) {
@@ -107,16 +109,51 @@ int main(int argc, char **argv)
}
}
 
-   if (load_bpf_file(bpf_file)) {
-   fprintf(stderr, "ERROR: failed to load eBPF from file : %s\n",
-   bpf_file);
-   return 1;
+   /* Do one final dump when exiting */
+   signal(SIGINT, dump_exit);
+   signal(SIGTERM, dump_exit);
+
+   snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return err;
+   

[PATCH bpf-next v3 6/7] samples: bpf: fix lwt_len_hist reusing previous BPF map

2020-11-24 Thread Daniel T. Lee
Currently, lwt_len_hist's map lwt_len_hist_map is uses pinning, and the
map isn't cleared on test end. This leds to reuse of that map for
each test, which prevents the results of the test from being accurate.

This commit fixes the problem by removing of pinned map from bpffs.
Also, this commit add the executable permission to shell script
files.

Fixes: f74599f7c5309 ("bpf: Add tests and samples for LWT-BPF")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/lwt_len_hist.sh | 2 ++
 samples/bpf/test_lwt_bpf.sh | 0
 2 files changed, 2 insertions(+)
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
old mode 100644
new mode 100755
index 090b96eaf7f7..0eda9754f50b
--- a/samples/bpf/lwt_len_hist.sh
+++ b/samples/bpf/lwt_len_hist.sh
@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
 TRACE_ROOT=/sys/kernel/debug/tracing
 
 function cleanup {
+   # To reset saved histogram, remove pinned map
+   rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
ip link del $VETH0 2> /dev/null
ip link del $VETH1 2> /dev/null
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
old mode 100644
new mode 100755
-- 
2.25.1



[PATCH bpf-next v3 7/7] samples: bpf: remove bpf_load loader completely

2020-11-24 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This commit removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Also, this commit removes the unused trace_helper and bpf_load from
samples/bpf target objects from Makefile.

Signed-off-by: Daniel T. Lee 
Acked-by: Jesper Dangaard Brouer 
Acked-by: Andrii Nakryiko 
---
Changes in v2:
 - merge commit with changing Makefile
 
 samples/bpf/Makefile|  10 +-
 samples/bpf/bpf_load.c  | 667 
 samples/bpf/bpf_load.h  |  57 ---
 samples/bpf/xdp2skb_meta_kern.c |   2 +-
 4 files changed, 5 insertions(+), 731 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 25380e04897e..05db041f8b18 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -73,7 +73,7 @@ tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := test_probe_write_user_user.o
-trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
+trace_output-objs := trace_output_user.o
 lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
@@ -91,8 +91,8 @@ test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
   test_current_task_under_cgroup_user.o
 trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
 sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
-tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
-lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
+tc_l2_redirect-objs := tc_l2_redirect_user.o
+lwt_len_hist-objs := lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
 test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
@@ -108,7 +108,7 @@ xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
-xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o
 ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS)
 
@@ -197,8 +197,6 @@ TPROGS_CFLAGS += --sysroot=$(SYSROOT)
 TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
 endif
 
-TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
-
 TPROGS_LDLIBS  += $(LIBBPF) -lelf -lz
 TPROGLDLIBS_tracex4+= -lrt
 TPROGLDLIBS_trace_output   += -lrt
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
deleted file mode 100644
index c5ad528f046e..
--- a/samples/bpf/bpf_load.c
+++ /dev/null
@@ -1,667 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include "bpf_load.h"
-#include "perf-sys.h"
-
-#define DEBUGFS "/sys/kernel/debug/tracing/"
-
-static char license[128];
-static int kern_version;
-static bool processed_sec[128];
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-int map_fd[MAX_MAPS];
-int prog_fd[MAX_PROGS];
-int event_fd[MAX_PROGS];
-int prog_cnt;
-int prog_array_fd = -1;
-
-struct bpf_map_data map_data[MAX_MAPS];
-int map_data_count;
-
-static int populate_prog_array(const char *event, int prog_fd)
-{
-   int ind = atoi(event), err;
-
-   err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
-   if (err < 0) {
-   printf("failed to store prog_fd in prog_array\n");
-   return -1;
-   }
-   return 0;
-}
-
-static int write_kprobe_events(const char *val)
-{
-   int fd, ret, flags;
-
-   if (val == NULL)
-   return -1;
-   else if (val[0] == '\0')
-   flags = O_WRONLY | O_TRUNC;
-   else
-   flags = O_WRONLY | O_APPEND;
-
-   fd = open(DEBUGFS "kprobe_events", flags);
-
-   ret = write(fd, val, strlen(val));
-   close(fd);
-
-   return ret;
-}
-
-static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
-{
-   bool is_socket = strncmp(event, "socket", 6) == 0;
-   bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
-   bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
-   bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
-   bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
-   bool is_xdp = strncmp(event,

[PATCH bpf-next v3 5/7] samples: bpf: refactor test_overhead program with libbpf

2020-11-24 Thread Daniel T. Lee
This commit refactors the existing program with libbpf bpf loader.
Since the kprobe, tracepoint and raw_tracepoint bpf program can be
attached with single bpf_program__attach() interface, so the
corresponding function of libbpf is used here.

Rather than specifying the number of cpus inside the code, this commit
uses the number of available cpus with _SC_NPROCESSORS_ONLN.

Signed-off-by: Daniel T. Lee 
Acked-by: Andrii Nakryiko 
---
Changes in v2:
 - add static at global variable and drop {}
 
 samples/bpf/Makefile |  2 +-
 samples/bpf/test_overhead_user.c | 82 +++-
 2 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 09a249477554..25380e04897e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -78,7 +78,7 @@ lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
 map_perf_test-objs := map_perf_test_user.o
-test_overhead-objs := bpf_load.o test_overhead_user.o
+test_overhead-objs := test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 94f74112a20e..819a6fe86f89 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -18,10 +18,14 @@
 #include 
 #include 
 #include 
-#include "bpf_load.h"
+#include 
 
 #define MAX_CNT 100
 
+static struct bpf_link *links[2];
+static struct bpf_object *obj;
+static int cnt;
+
 static __u64 time_get_ns(void)
 {
struct timespec ts;
@@ -115,20 +119,54 @@ static void run_perf_test(int tasks, int flags)
}
 }
 
+static int load_progs(char *filename)
+{
+   struct bpf_program *prog;
+   int err = 0;
+
+   obj = bpf_object__open_file(filename, NULL);
+   err = libbpf_get_error(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return err;
+   }
+
+   /* load BPF program */
+   err = bpf_object__load(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   return err;
+   }
+
+   bpf_object__for_each_program(prog, obj) {
+   links[cnt] = bpf_program__attach(prog);
+   err = libbpf_get_error(links[cnt]);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+   links[cnt] = NULL;
+   return err;
+   }
+   cnt++;
+   }
+
+   return err;
+}
+
 static void unload_progs(void)
 {
-   close(prog_fd[0]);
-   close(prog_fd[1]);
-   close(event_fd[0]);
-   close(event_fd[1]);
+   while (cnt)
+   bpf_link__destroy(links[--cnt]);
+
+   bpf_object__close(obj);
 }
 
 int main(int argc, char **argv)
 {
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
-   char filename[256];
-   int num_cpu = 8;
+   int num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
int test_flags = ~0;
+   char filename[256];
+   int err = 0;
 
setrlimit(RLIMIT_MEMLOCK, &r);
 
@@ -145,38 +183,36 @@ int main(int argc, char **argv)
if (test_flags & 0xC) {
snprintf(filename, sizeof(filename),
 "%s_kprobe_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
+
printf("w/KPROBE\n");
-   run_perf_test(num_cpu, test_flags >> 2);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 2);
+
unload_progs();
}
 
if (test_flags & 0x30) {
snprintf(filename, sizeof(filename),
 "%s_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
printf("w/TRACEPOINT\n");
-   run_perf_test(num_cpu, test_flags >> 4);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 4);
+
unload_progs();
}
 
if (test_flags & 0xC0) {
snprintf(filename, sizeof(filename),
 "%s_raw_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
  

[PATCH bpf-next v3 3/7] samples: bpf: refactor task_fd_query program with libbpf

2020-11-24 Thread Daniel T. Lee
This commit refactors the existing kprobe program with libbpf bpf
loader. To attach bpf program, this uses generic bpf_program__attach()
approach rather than using bpf_load's load_bpf_file().

To attach bpf to perf_event, instead of using previous ioctl method,
this commit uses bpf_program__attach_perf_event since it manages the
enable of perf_event and attach of BPF programs to it, which is much
more intuitive way to achieve.

Also, explicit close(fd) has been removed since event will be closed
inside bpf_link__destroy() automatically.

Furthermore, to prevent conflict of same named uprobe events, O_TRUNC
flag has been used to clear 'uprobe_events' interface.

Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - add static at global variable and drop {}
 - fix return error code on exit
 - restore DEBUGFS macro to absolute string path
 
 samples/bpf/Makefile |   2 +-
 samples/bpf/task_fd_query_user.c | 101 ++-
 2 files changed, 75 insertions(+), 28 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index d31e082c369e..3bffd42e1482 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -107,7 +107,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
-task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
 hbm-objs := hbm.o $(CGROUP_HELPERS)
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index b68bd2f8fdc9..f6b772faa348 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -15,12 +15,15 @@
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
 #include "bpf_util.h"
 #include "perf-sys.h"
 #include "trace_helpers.h"
 
+static struct bpf_program *progs[2];
+static struct bpf_link *links[2];
+
 #define CHECK_PERROR_RET(condition) ({ \
int __ret = !!(condition);  \
if (__ret) {\
@@ -86,21 +89,22 @@ static int bpf_get_retprobe_bit(const char *event_type)
return ret;
 }
 
-static int test_debug_fs_kprobe(int prog_fd_idx, const char *fn_name,
+static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
__u32 expected_fd_type)
 {
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
+   int err, event_fd;
char buf[256];
-   int err;
 
len = sizeof(buf);
-   err = bpf_task_fd_query(getpid(), event_fd[prog_fd_idx], 0, buf, &len,
+   event_fd = bpf_link__fd(links[link_idx]);
+   err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
-  __func__, prog_fd_idx, fn_name);
+  __func__, link_idx, fn_name);
perror(":");
return -1;
}
@@ -108,7 +112,7 @@ static int test_debug_fs_kprobe(int prog_fd_idx, const char 
*fn_name,
fd_type != expected_fd_type ||
probe_offset != 0x0 || probe_addr != 0x0) {
printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
-  prog_fd_idx);
+  link_idx);
printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
   " probe_addr: 0x%llx\n",
   buf, fd_type, probe_offset, probe_addr);
@@ -125,12 +129,13 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
int is_return_bit = bpf_get_retprobe_bit(event_type);
int type = bpf_find_probe_type(event_type);
struct perf_event_attr attr = {};
-   int fd;
+   struct bpf_link *link;
+   int fd, err = -1;
 
if (type < 0 || is_return_bit < 0) {
printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
__func__, type, is_return_bit);
-   return -1;
+   return err;
}
 
attr.sample_period = 1;
@@ -149,14 +154,21 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
attr.type = type;
 
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
-   CHECK_PERROR_RET(fd < 0);
+   link = bpf_program__attach_perf_event(progs[0], fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach_perf_event failed\n");
+   link = NULL;
+   close(fd);
+

[PATCH bpf-next v3 2/7] samples: bpf: refactor test_cgrp2_sock2 program with libbpf

2020-11-24 Thread Daniel T. Lee
This commit refactors the existing cgroup program with libbpf bpf
loader. The original test_cgrp2_sock2 has keeped the bpf program
attached to the cgroup hierarchy even after the exit of user program.
To implement the same functionality with libbpf, this commit uses the
BPF_LINK_PINNING to pin the link attachment even after it is closed.

Since this uses LINK instead of ATTACH, detach of bpf program from
cgroup with 'test_cgrp2_sock' is not used anymore.

The code to mount the bpf was added to the .sh file in case the bpff
was not mounted on /sys/fs/bpf. Additionally, to fix the problem that
shell script cannot find the binary object from the current path,
relative path './' has been added in front of binary.

Fixes: 554ae6e792ef3 ("samples/bpf: add userspace example for prohibiting 
sockets")
Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - change to destroy link even after link__pin()
 - enhance error message
 
 samples/bpf/Makefile|  2 +-
 samples/bpf/test_cgrp2_sock2.c  | 61 -
 samples/bpf/test_cgrp2_sock2.sh | 21 +---
 3 files changed, 62 insertions(+), 22 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 7c61118525f7..d31e082c369e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -82,7 +82,7 @@ test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
-test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
+test_cgrp2_sock2-objs := test_cgrp2_sock2.o
 xdp1-objs := xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := xdp1_user.o
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index a9277b118c33..e7060aaa2f5a 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -20,9 +20,9 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "bpf_insn.h"
-#include "bpf_load.h"
 
 static int usage(const char *argv0)
 {
@@ -32,37 +32,64 @@ static int usage(const char *argv0)
 
 int main(int argc, char **argv)
 {
-   int cg_fd, ret, filter_id = 0;
+   int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0;
+   const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2";
+   struct bpf_link *link = NULL;
+   struct bpf_program *progs[2];
+   struct bpf_program *prog;
+   struct bpf_object *obj;
 
if (argc < 3)
return usage(argv[0]);
 
+   if (argc > 3)
+   filter_id = atoi(argv[3]);
+
cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
if (cg_fd < 0) {
printf("Failed to open cgroup path: '%s'\n", strerror(errno));
-   return EXIT_FAILURE;
+   return ret;
}
 
-   if (load_bpf_file(argv[2]))
-   return EXIT_FAILURE;
-
-   printf("Output from kernel verifier:\n%s\n---\n", bpf_log_buf);
+   obj = bpf_object__open_file(argv[2], NULL);
+   if (libbpf_get_error(obj)) {
+   printf("ERROR: opening BPF object file failed\n");
+   return ret;
+   }
 
-   if (argc > 3)
-   filter_id = atoi(argv[3]);
+   bpf_object__for_each_program(prog, obj) {
+   progs[prog_cnt] = prog;
+   prog_cnt++;
+   }
 
if (filter_id >= prog_cnt) {
printf("Invalid program id; program not found in file\n");
-   return EXIT_FAILURE;
+   goto cleanup;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   printf("ERROR: loading BPF object file failed\n");
+   goto cleanup;
}
 
-   ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
- BPF_CGROUP_INET_SOCK_CREATE, 0);
-   if (ret < 0) {
-   printf("Failed to attach prog to cgroup: '%s'\n",
-  strerror(errno));
-   return EXIT_FAILURE;
+   link = bpf_program__attach_cgroup(progs[filter_id], cg_fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach failed\n");
+   link = NULL;
+   goto cleanup;
}
 
-   return EXIT_SUCCESS;
+   err = bpf_link__pin(link, link_pin_path);
+   if (err < 0) {
+   printf("ERROR: bpf_link__pin failed: %d\n", err);
+   goto cleanup;
+   }
+
+   ret = EXIT_SUCCESS;
+
+cleanup:
+   bpf_link__destroy(link);
+   bpf_object__close(obj);
+   return ret;
 }
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
index 0f396a86e0cb..6a3dbe642b2b 100755
--- a/samples/bpf/test_cgrp2_sock2.sh
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -1,6 +1,9 @

[PATCH bpf-next v3 0/7] bpf: remove bpf_load loader completely

2020-11-24 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This patchset refactors remaining bpf programs with libbpf and
completely removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Changes in v2:
 - drop 'move tracing helpers to trace_helper' patch
 - add link pinning to prevent cleaning up on process exit
 - add static at global variable and remove unused variable
 - change to destroy link even after link__pin()
 - fix return error code on exit
 - merge commit with changing Makefile

Changes in v3:
 - cleanup bpf_link, bpf_object and cgroup fd both on success and error

Daniel T. Lee (7):
  samples: bpf: refactor hbm program with libbpf
  samples: bpf: refactor test_cgrp2_sock2 program with libbpf
  samples: bpf: refactor task_fd_query program with libbpf
  samples: bpf: refactor ibumad program with libbpf
  samples: bpf: refactor test_overhead program with libbpf
  samples: bpf: fix lwt_len_hist reusing previous BPF map
  samples: bpf: remove bpf_load loader completely

 samples/bpf/.gitignore   |   3 +
 samples/bpf/Makefile |  20 +-
 samples/bpf/bpf_load.c   | 667 ---
 samples/bpf/bpf_load.h   |  57 ---
 samples/bpf/do_hbm_test.sh   |  32 +-
 samples/bpf/hbm.c| 111 ++---
 samples/bpf/hbm_kern.h   |   2 +-
 samples/bpf/ibumad_kern.c|  26 +-
 samples/bpf/ibumad_user.c|  71 +++-
 samples/bpf/lwt_len_hist.sh  |   2 +
 samples/bpf/task_fd_query_user.c | 101 +++--
 samples/bpf/test_cgrp2_sock2.c   |  61 ++-
 samples/bpf/test_cgrp2_sock2.sh  |  21 +-
 samples/bpf/test_lwt_bpf.sh  |   0
 samples/bpf/test_overhead_user.c |  82 ++--
 samples/bpf/xdp2skb_meta_kern.c  |   2 +-
 16 files changed, 350 insertions(+), 908 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

-- 
2.25.1



Re: [PATCH bpf-next v2 1/7] samples: bpf: refactor hbm program with libbpf

2020-11-24 Thread Daniel T. Lee
Sorry for the late reply.

On Sat, Nov 21, 2020 at 11:34 AM Martin KaFai Lau  wrote:
>
> On Thu, Nov 19, 2020 at 03:06:11PM +, Daniel T. Lee wrote:
> [ ... ]
>
> >  static int run_bpf_prog(char *prog, int cg_id)
> > [ ... ]
> >   if (!outFlag)
> > - type = BPF_CGROUP_INET_INGRESS;
> > - if (bpf_prog_attach(bpfprog_fd, cg1, type, 0)) {
> > - printf("ERROR: bpf_prog_attach fails!\n");
> > - log_err("Attaching prog");
> > + bpf_program__set_expected_attach_type(bpf_prog, 
> > BPF_CGROUP_INET_INGRESS);
> > +
> > + link = bpf_program__attach_cgroup(bpf_prog, cg1);
> > + if (libbpf_get_error(link)) {
> > + fprintf(stderr, "ERROR: bpf_program__attach_cgroup failed\n");
> > + link = NULL;
> Again, this is not needed.  bpf_link__destroy() can
> handle both NULL and error pointer.  Please take a look
> at the bpf_link__destroy() in libbpf.c
>
> > + goto err;
> > + }
> > [ ... ]

> > @@ -398,10 +400,10 @@ static int run_bpf_prog(char *prog, int cg_id)
> >  err:
> >   rc = 1;
> >
> > - if (cg1)
> > - close(cg1);
> > + bpf_link__destroy(link);
> > + close(cg1);
> >   cleanup_cgroup_environment();
> > -
> > + bpf_object__close(obj);
> The bpf_* cleanup condition still looks wrong.
>
> I can understand why it does not want to cleanup_cgroup_environment()
> on the success case because the sh script may want to run test under this
> cgroup.
>
> However, the bpf_link__destroy(), bpf_object__close(), and
> even close(cg1) should be done in both success and error
> cases.
>
> The cg1 test still looks wrong also.  The cg1 should
> be init to -1 and then test for "if (cg1 == -1)".

Thanks for pointing this out.
I'll remove NULL initialize and fix this on the next patch.


--
Best,
Daniel T. Lee


[PATCH bpf-next v2 6/7] samples: bpf: fix lwt_len_hist reusing previous BPF map

2020-11-19 Thread Daniel T. Lee
Currently, lwt_len_hist's map lwt_len_hist_map is uses pinning, and the
map isn't cleared on test end. This leds to reuse of that map for
each test, which prevents the results of the test from being accurate.

This commit fixes the problem by removing of pinned map from bpffs.
Also, this commit add the executable permission to shell script
files.

Fixes: f74599f7c5309 ("bpf: Add tests and samples for LWT-BPF")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/lwt_len_hist.sh | 2 ++
 samples/bpf/test_lwt_bpf.sh | 0
 2 files changed, 2 insertions(+)
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
old mode 100644
new mode 100755
index 090b96eaf7f7..0eda9754f50b
--- a/samples/bpf/lwt_len_hist.sh
+++ b/samples/bpf/lwt_len_hist.sh
@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
 TRACE_ROOT=/sys/kernel/debug/tracing
 
 function cleanup {
+   # To reset saved histogram, remove pinned map
+   rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
ip link del $VETH0 2> /dev/null
ip link del $VETH1 2> /dev/null
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
old mode 100644
new mode 100755
-- 
2.25.1



[PATCH bpf-next v2 7/7] samples: bpf: remove bpf_load loader completely

2020-11-19 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This commit removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Also, this commit removes the unused trace_helper and bpf_load from
samples/bpf target objects from Makefile.

Signed-off-by: Daniel T. Lee 
Acked-by: Jesper Dangaard Brouer 
Acked-by: Andrii Nakryiko 
---
Changes in v2:
 - merge commit with changing Makefile

 samples/bpf/Makefile|  10 +-
 samples/bpf/bpf_load.c  | 667 
 samples/bpf/bpf_load.h  |  57 ---
 samples/bpf/xdp2skb_meta_kern.c |   2 +-
 4 files changed, 5 insertions(+), 731 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 25380e04897e..05db041f8b18 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -73,7 +73,7 @@ tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := test_probe_write_user_user.o
-trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
+trace_output-objs := trace_output_user.o
 lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
@@ -91,8 +91,8 @@ test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
   test_current_task_under_cgroup_user.o
 trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
 sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
-tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
-lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
+tc_l2_redirect-objs := tc_l2_redirect_user.o
+lwt_len_hist-objs := lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
 test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
@@ -108,7 +108,7 @@ xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
-xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o
 ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS)
 
@@ -197,8 +197,6 @@ TPROGS_CFLAGS += --sysroot=$(SYSROOT)
 TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
 endif
 
-TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
-
 TPROGS_LDLIBS  += $(LIBBPF) -lelf -lz
 TPROGLDLIBS_tracex4+= -lrt
 TPROGLDLIBS_trace_output   += -lrt
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
deleted file mode 100644
index c5ad528f046e..
--- a/samples/bpf/bpf_load.c
+++ /dev/null
@@ -1,667 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include "bpf_load.h"
-#include "perf-sys.h"
-
-#define DEBUGFS "/sys/kernel/debug/tracing/"
-
-static char license[128];
-static int kern_version;
-static bool processed_sec[128];
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-int map_fd[MAX_MAPS];
-int prog_fd[MAX_PROGS];
-int event_fd[MAX_PROGS];
-int prog_cnt;
-int prog_array_fd = -1;
-
-struct bpf_map_data map_data[MAX_MAPS];
-int map_data_count;
-
-static int populate_prog_array(const char *event, int prog_fd)
-{
-   int ind = atoi(event), err;
-
-   err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
-   if (err < 0) {
-   printf("failed to store prog_fd in prog_array\n");
-   return -1;
-   }
-   return 0;
-}
-
-static int write_kprobe_events(const char *val)
-{
-   int fd, ret, flags;
-
-   if (val == NULL)
-   return -1;
-   else if (val[0] == '\0')
-   flags = O_WRONLY | O_TRUNC;
-   else
-   flags = O_WRONLY | O_APPEND;
-
-   fd = open(DEBUGFS "kprobe_events", flags);
-
-   ret = write(fd, val, strlen(val));
-   close(fd);
-
-   return ret;
-}
-
-static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
-{
-   bool is_socket = strncmp(event, "socket", 6) == 0;
-   bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
-   bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
-   bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
-   bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
-   bool is_xdp = strncmp(event,

[PATCH bpf-next v2 5/7] samples: bpf: refactor test_overhead program with libbpf

2020-11-19 Thread Daniel T. Lee
This commit refactors the existing program with libbpf bpf loader.
Since the kprobe, tracepoint and raw_tracepoint bpf program can be
attached with single bpf_program__attach() interface, so the
corresponding function of libbpf is used here.

Rather than specifying the number of cpus inside the code, this commit
uses the number of available cpus with _SC_NPROCESSORS_ONLN.

Signed-off-by: Daniel T. Lee 
Acked-by: Andrii Nakryiko 
---
Changes in v2:
 - add static at global variable and drop {}

 samples/bpf/Makefile |  2 +-
 samples/bpf/test_overhead_user.c | 82 +++-
 2 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 09a249477554..25380e04897e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -78,7 +78,7 @@ lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
 map_perf_test-objs := map_perf_test_user.o
-test_overhead-objs := bpf_load.o test_overhead_user.o
+test_overhead-objs := test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 94f74112a20e..819a6fe86f89 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -18,10 +18,14 @@
 #include 
 #include 
 #include 
-#include "bpf_load.h"
+#include 
 
 #define MAX_CNT 100
 
+static struct bpf_link *links[2];
+static struct bpf_object *obj;
+static int cnt;
+
 static __u64 time_get_ns(void)
 {
struct timespec ts;
@@ -115,20 +119,54 @@ static void run_perf_test(int tasks, int flags)
}
 }
 
+static int load_progs(char *filename)
+{
+   struct bpf_program *prog;
+   int err = 0;
+
+   obj = bpf_object__open_file(filename, NULL);
+   err = libbpf_get_error(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return err;
+   }
+
+   /* load BPF program */
+   err = bpf_object__load(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   return err;
+   }
+
+   bpf_object__for_each_program(prog, obj) {
+   links[cnt] = bpf_program__attach(prog);
+   err = libbpf_get_error(links[cnt]);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+   links[cnt] = NULL;
+   return err;
+   }
+   cnt++;
+   }
+
+   return err;
+}
+
 static void unload_progs(void)
 {
-   close(prog_fd[0]);
-   close(prog_fd[1]);
-   close(event_fd[0]);
-   close(event_fd[1]);
+   while (cnt)
+   bpf_link__destroy(links[--cnt]);
+
+   bpf_object__close(obj);
 }
 
 int main(int argc, char **argv)
 {
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
-   char filename[256];
-   int num_cpu = 8;
+   int num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
int test_flags = ~0;
+   char filename[256];
+   int err = 0;
 
setrlimit(RLIMIT_MEMLOCK, &r);
 
@@ -145,38 +183,36 @@ int main(int argc, char **argv)
if (test_flags & 0xC) {
snprintf(filename, sizeof(filename),
 "%s_kprobe_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
+
printf("w/KPROBE\n");
-   run_perf_test(num_cpu, test_flags >> 2);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 2);
+
unload_progs();
}
 
if (test_flags & 0x30) {
snprintf(filename, sizeof(filename),
 "%s_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
printf("w/TRACEPOINT\n");
-   run_perf_test(num_cpu, test_flags >> 4);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 4);
+
unload_progs();
}
 
if (test_flags & 0xC0) {
snprintf(filename, sizeof(filename),
 "%s_raw_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
  

[PATCH bpf-next v2 3/7] samples: bpf: refactor task_fd_query program with libbpf

2020-11-19 Thread Daniel T. Lee
This commit refactors the existing kprobe program with libbpf bpf
loader. To attach bpf program, this uses generic bpf_program__attach()
approach rather than using bpf_load's load_bpf_file().

To attach bpf to perf_event, instead of using previous ioctl method,
this commit uses bpf_program__attach_perf_event since it manages the
enable of perf_event and attach of BPF programs to it, which is much
more intuitive way to achieve.

Also, explicit close(fd) has been removed since event will be closed
inside bpf_link__destroy() automatically.

Furthermore, to prevent conflict of same named uprobe events, O_TRUNC
flag has been used to clear 'uprobe_events' interface.

Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - add static at global variable and drop {}
 - fix return error code on exit
 - restore DEBUGFS macro to absolute string path

 samples/bpf/Makefile |   2 +-
 samples/bpf/task_fd_query_user.c | 101 ++-
 2 files changed, 75 insertions(+), 28 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index d31e082c369e..3bffd42e1482 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -107,7 +107,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
-task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
 hbm-objs := hbm.o $(CGROUP_HELPERS)
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index b68bd2f8fdc9..f6b772faa348 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -15,12 +15,15 @@
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
 #include "bpf_util.h"
 #include "perf-sys.h"
 #include "trace_helpers.h"
 
+static struct bpf_program *progs[2];
+static struct bpf_link *links[2];
+
 #define CHECK_PERROR_RET(condition) ({ \
int __ret = !!(condition);  \
if (__ret) {\
@@ -86,21 +89,22 @@ static int bpf_get_retprobe_bit(const char *event_type)
return ret;
 }
 
-static int test_debug_fs_kprobe(int prog_fd_idx, const char *fn_name,
+static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
__u32 expected_fd_type)
 {
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
+   int err, event_fd;
char buf[256];
-   int err;
 
len = sizeof(buf);
-   err = bpf_task_fd_query(getpid(), event_fd[prog_fd_idx], 0, buf, &len,
+   event_fd = bpf_link__fd(links[link_idx]);
+   err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
-  __func__, prog_fd_idx, fn_name);
+  __func__, link_idx, fn_name);
perror(":");
return -1;
}
@@ -108,7 +112,7 @@ static int test_debug_fs_kprobe(int prog_fd_idx, const char 
*fn_name,
fd_type != expected_fd_type ||
probe_offset != 0x0 || probe_addr != 0x0) {
printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
-  prog_fd_idx);
+  link_idx);
printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
   " probe_addr: 0x%llx\n",
   buf, fd_type, probe_offset, probe_addr);
@@ -125,12 +129,13 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
int is_return_bit = bpf_get_retprobe_bit(event_type);
int type = bpf_find_probe_type(event_type);
struct perf_event_attr attr = {};
-   int fd;
+   struct bpf_link *link;
+   int fd, err = -1;
 
if (type < 0 || is_return_bit < 0) {
printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
__func__, type, is_return_bit);
-   return -1;
+   return err;
}
 
attr.sample_period = 1;
@@ -149,14 +154,21 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
attr.type = type;
 
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
-   CHECK_PERROR_RET(fd < 0);
+   link = bpf_program__attach_perf_event(progs[0], fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach_perf_event failed\n");
+   link = NULL;
+   close(fd);
+

[PATCH bpf-next v2 4/7] samples: bpf: refactor ibumad program with libbpf

2020-11-19 Thread Daniel T. Lee
This commit refactors the existing ibumad program with libbpf bpf
loader. Attach/detach of Tracepoint bpf programs has been managed
with the generic bpf_program__attach() and bpf_link__destroy() from
the libbpf.

Also, instead of using the previous BPF MAP definition, this commit
refactors ibumad MAP definition with the new BTF-defined MAP format.

To verify that this bpf program works without an infiniband device,
try loading ib_umad kernel module and test the program as follows:

# modprobe ib_umad
# ./ibumad

Moreover, TRACE_HELPERS has been removed from the Makefile since it is
not used on this program.

Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - add static at global variable and drop {}
 - fix return error code on exit

 samples/bpf/Makefile  |  2 +-
 samples/bpf/ibumad_kern.c | 26 +++---
 samples/bpf/ibumad_user.c | 71 +--
 3 files changed, 68 insertions(+), 31 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3bffd42e1482..09a249477554 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -109,7 +109,7 @@ xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
-ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
+ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS)
 
 # Tell kbuild to always build the programs
diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
index 3a91b4c1989a..26dcd4dde946 100644
--- a/samples/bpf/ibumad_kern.c
+++ b/samples/bpf/ibumad_kern.c
@@ -16,19 +16,19 @@
 #include 
 
 
-struct bpf_map_def SEC("maps") read_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads read */
-   .max_entries = 256, /* Room for all Classes */
-};
-
-struct bpf_map_def SEC("maps") write_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads written */
-   .max_entries = 256, /* Room for all Classes */
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads read */
+   __uint(max_entries, 256); /* Room for all Classes */
+} read_count SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads written */
+   __uint(max_entries, 256); /* Room for all Classes */
+} write_count SEC(".maps");
 
 #undef DEBUG
 #ifndef DEBUG
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
index fa06eef31a84..d83d8102f489 100644
--- a/samples/bpf/ibumad_user.c
+++ b/samples/bpf/ibumad_user.c
@@ -23,10 +23,15 @@
 #include 
 #include 
 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 #include 
 
+static struct bpf_link *tp_links[3];
+static struct bpf_object *obj;
+static int map_fd[2];
+static int tp_cnt;
+
 static void dump_counts(int fd)
 {
__u32 key;
@@ -53,6 +58,11 @@ static void dump_all_counts(void)
 static void dump_exit(int sig)
 {
dump_all_counts();
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   bpf_object__close(obj);
exit(0);
 }
 
@@ -73,19 +83,11 @@ static void usage(char *cmd)
 
 int main(int argc, char **argv)
 {
+   struct bpf_program *prog;
unsigned long delay = 5;
+   char filename[256];
int longindex = 0;
-   int opt;
-   char bpf_file[256];
-
-   /* Create the eBPF kernel code path name.
-* This follows the pattern of all of the other bpf samples
-*/
-   snprintf(bpf_file, sizeof(bpf_file), "%s_kern.o", argv[0]);
-
-   /* Do one final dump when exiting */
-   signal(SIGINT, dump_exit);
-   signal(SIGTERM, dump_exit);
+   int opt, err = -1;
 
while ((opt = getopt_long(argc, argv, "hd:rSw",
  long_options, &longindex)) != -1) {
@@ -107,16 +109,51 @@ int main(int argc, char **argv)
}
}
 
-   if (load_bpf_file(bpf_file)) {
-   fprintf(stderr, "ERROR: failed to load eBPF from file : %s\n",
-   bpf_file);
-   return 1;
+   /* Do one final dump when exiting */
+   signal(SIGINT, dump_exit);
+   signal(SIGTERM, dump_exit);
+
+   snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return err;
+   

[PATCH bpf-next v2 0/7] bpf: remove bpf_load loader completely

2020-11-19 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This patchset refactors remaining bpf programs with libbpf and
completely removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Changes in v2:
 - drop 'move tracing helpers to trace_helper' patch
 - add link pinning to prevent cleaning up on process exit
 - add static at global variable and remove unused variable
 - change to destroy link even after link__pin()
 - fix return error code on exit
 - merge commit with changing Makefile

Daniel T. Lee (7):
  samples: bpf: refactor hbm program with libbpf
  samples: bpf: refactor test_cgrp2_sock2 program with libbpf
  samples: bpf: refactor task_fd_query program with libbpf
  samples: bpf: refactor ibumad program with libbpf
  samples: bpf: refactor test_overhead program with libbpf
  samples: bpf: fix lwt_len_hist reusing previous BPF map
  samples: bpf: remove bpf_load loader completely

 samples/bpf/.gitignore   |   3 +
 samples/bpf/Makefile |  20 +-
 samples/bpf/bpf_load.c   | 667 ---
 samples/bpf/bpf_load.h   |  57 ---
 samples/bpf/do_hbm_test.sh   |  32 +-
 samples/bpf/hbm.c| 106 ++---
 samples/bpf/hbm_kern.h   |   2 +-
 samples/bpf/ibumad_kern.c|  26 +-
 samples/bpf/ibumad_user.c|  71 +++-
 samples/bpf/lwt_len_hist.sh  |   2 +
 samples/bpf/task_fd_query_user.c | 101 +++--
 samples/bpf/test_cgrp2_sock2.c   |  61 ++-
 samples/bpf/test_cgrp2_sock2.sh  |  21 +-
 samples/bpf/test_lwt_bpf.sh  |   0
 samples/bpf/test_overhead_user.c |  82 ++--
 samples/bpf/xdp2skb_meta_kern.c  |   2 +-
 16 files changed, 345 insertions(+), 908 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

-- 
2.25.1



[PATCH bpf-next v2 1/7] samples: bpf: refactor hbm program with libbpf

2020-11-19 Thread Daniel T. Lee
This commit refactors the existing cgroup programs with libbpf
bpf loader. Since bpf_program__attach doesn't support cgroup program
attachment, this explicitly attaches cgroup bpf program with
bpf_program__attach_cgroup(bpf_prog, cg1).

Also, to change attach_type of bpf program, this uses libbpf's
bpf_program__set_expected_attach_type helper to switch EGRESS to
INGRESS. To keep bpf program attached to the cgroup hierarchy even
after the exit, this commit uses the BPF_LINK_PINNING to pin the link
attachment even after it is closed.

Besides, this program was broken due to the typo of BPF MAP definition.
But this commit solves the problem by fixing this from 'queue_stats' map
struct hvm_queue_stats -> hbm_queue_stats.

Fixes: 36b5d471135c ("selftests/bpf: samples/bpf: Split off legacy stuff from 
bpf_helpers.h")
Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - restore read_trace_pipe2
 - remove unnecessary return code and cgroup fd compare
 - add static at global variable and remove unused variable
 - change cgroup path with unified controller (/unified/)
 - add link pinning to prevent cleaning up on process exit

 samples/bpf/.gitignore |   3 ++
 samples/bpf/Makefile   |   2 +-
 samples/bpf/do_hbm_test.sh |  32 +--
 samples/bpf/hbm.c  | 106 +++--
 samples/bpf/hbm_kern.h |   2 +-
 5 files changed, 73 insertions(+), 72 deletions(-)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index b2f29bc8dc43..0b9548ea8477 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -52,3 +52,6 @@ xdp_tx_iptunnel
 xdpsock
 xsk_fwd
 testfile.img
+hbm_out.log
+iperf.*
+*.out
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index aeebf5d12f32..7c61118525f7 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -110,7 +110,7 @@ xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
-hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
+hbm-objs := hbm.o $(CGROUP_HELPERS)
 
 # Tell kbuild to always build the programs
 always-y := $(tprogs-y)
diff --git a/samples/bpf/do_hbm_test.sh b/samples/bpf/do_hbm_test.sh
index ffe4c0607341..21790ea5c460 100755
--- a/samples/bpf/do_hbm_test.sh
+++ b/samples/bpf/do_hbm_test.sh
@@ -91,6 +91,16 @@ qdisc=""
 flags=""
 do_stats=0
 
+BPFFS=/sys/fs/bpf
+function config_bpffs () {
+   if mount | grep $BPFFS > /dev/null; then
+   echo "bpffs already mounted"
+   else
+   echo "bpffs not mounted. Mounting..."
+   mount -t bpf none $BPFFS
+   fi
+}
+
 function start_hbm () {
   rm -f hbm.out
   echo "./hbm $dir -n $id -r $rate -t $dur $flags $dbg $prog" > hbm.out
@@ -192,6 +202,7 @@ processArgs () {
 }
 
 processArgs
+config_bpffs
 
 if [ $debug_flag -eq 1 ] ; then
   rm -f hbm_out.log
@@ -201,7 +212,7 @@ hbm_pid=$(start_hbm)
 usleep 10
 
 host=`hostname`
-cg_base_dir=/sys/fs/cgroup
+cg_base_dir=/sys/fs/cgroup/unified
 cg_dir="$cg_base_dir/cgroup-test-work-dir/hbm$id"
 
 echo $$ >> $cg_dir/cgroup.procs
@@ -411,23 +422,8 @@ fi
 
 sleep 1
 
-# Detach any BPF programs that may have lingered
-ttx=`bpftool cgroup tree | grep hbm`
-v=2
-for x in $ttx ; do
-if [ "${x:0:36}" == "/sys/fs/cgroup/cgroup-test-work-dir/" ] ; then
-   cg=$x ; v=0
-else
-   if [ $v -eq 0 ] ; then
-   id=$x ; v=1
-   else
-   if [ $v -eq 1 ] ; then
-   type=$x ; bpftool cgroup detach $cg $type id $id
-   v=0
-   fi
-   fi
-fi
-done
+# Detach any pinned BPF programs that may have lingered
+rm -rf $BPFFS/hbm*
 
 if [ $use_netperf -ne 0 ] ; then
   if [ "$server" == "" ] ; then
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 400e741a56eb..bda7aa8a52b0 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -46,7 +46,6 @@
 #include 
 #include 
 
-#include "bpf_load.h"
 #include "bpf_rlimit.h"
 #include "cgroup_helpers.h"
 #include "hbm.h"
@@ -70,9 +69,9 @@ static void do_error(char *msg, bool errno_flag);
 
 #define DEBUGFS "/sys/kernel/debug/tracing/"
 
-struct bpf_object *obj;
-int bpfprog_fd;
-int cgroup_storage_fd;
+static struct bpf_program *bpf_prog;
+static struct bpf_object *obj;
+static int queue_stats_fd;
 
 static void read_trace_pipe2(void)
 {
@@ -121,56 +120,50 @@ static void do_error(char *msg, bool errno_flag)
 
 static int prog_load(char *prog)
 {
-   struct bpf_prog_load_attr prog_load_attr = {
-   .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
-   .file = prog,
-   .expected_attach_type = BPF_CGROUP_INET_EGRESS,
-   };
-   int map_fd;
-   struct bpf_map *map;
-
-   i

[PATCH bpf-next v2 2/7] samples: bpf: refactor test_cgrp2_sock2 program with libbpf

2020-11-19 Thread Daniel T. Lee
This commit refactors the existing cgroup program with libbpf bpf
loader. The original test_cgrp2_sock2 has keeped the bpf program
attached to the cgroup hierarchy even after the exit of user program.
To implement the same functionality with libbpf, this commit uses the
BPF_LINK_PINNING to pin the link attachment even after it is closed.

Since this uses LINK instead of ATTACH, detach of bpf program from
cgroup with 'test_cgrp2_sock' is not used anymore.

The code to mount the bpf was added to the .sh file in case the bpff
was not mounted on /sys/fs/bpf. Additionally, to fix the problem that
shell script cannot find the binary object from the current path,
relative path './' has been added in front of binary.

Fixes: 554ae6e792ef3 ("samples/bpf: add userspace example for prohibiting 
sockets")
Signed-off-by: Daniel T. Lee 
---
Changes in v2:
 - change to destroy link even after link__pin()
 - enhance error message

 samples/bpf/Makefile|  2 +-
 samples/bpf/test_cgrp2_sock2.c  | 61 -
 samples/bpf/test_cgrp2_sock2.sh | 21 +---
 3 files changed, 62 insertions(+), 22 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 7c61118525f7..d31e082c369e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -82,7 +82,7 @@ test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
-test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
+test_cgrp2_sock2-objs := test_cgrp2_sock2.o
 xdp1-objs := xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := xdp1_user.o
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index a9277b118c33..e7060aaa2f5a 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -20,9 +20,9 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "bpf_insn.h"
-#include "bpf_load.h"
 
 static int usage(const char *argv0)
 {
@@ -32,37 +32,64 @@ static int usage(const char *argv0)
 
 int main(int argc, char **argv)
 {
-   int cg_fd, ret, filter_id = 0;
+   int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0;
+   const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2";
+   struct bpf_link *link = NULL;
+   struct bpf_program *progs[2];
+   struct bpf_program *prog;
+   struct bpf_object *obj;
 
if (argc < 3)
return usage(argv[0]);
 
+   if (argc > 3)
+   filter_id = atoi(argv[3]);
+
cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
if (cg_fd < 0) {
printf("Failed to open cgroup path: '%s'\n", strerror(errno));
-   return EXIT_FAILURE;
+   return ret;
}
 
-   if (load_bpf_file(argv[2]))
-   return EXIT_FAILURE;
-
-   printf("Output from kernel verifier:\n%s\n---\n", bpf_log_buf);
+   obj = bpf_object__open_file(argv[2], NULL);
+   if (libbpf_get_error(obj)) {
+   printf("ERROR: opening BPF object file failed\n");
+   return ret;
+   }
 
-   if (argc > 3)
-   filter_id = atoi(argv[3]);
+   bpf_object__for_each_program(prog, obj) {
+   progs[prog_cnt] = prog;
+   prog_cnt++;
+   }
 
if (filter_id >= prog_cnt) {
printf("Invalid program id; program not found in file\n");
-   return EXIT_FAILURE;
+   goto cleanup;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   printf("ERROR: loading BPF object file failed\n");
+   goto cleanup;
}
 
-   ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
- BPF_CGROUP_INET_SOCK_CREATE, 0);
-   if (ret < 0) {
-   printf("Failed to attach prog to cgroup: '%s'\n",
-  strerror(errno));
-   return EXIT_FAILURE;
+   link = bpf_program__attach_cgroup(progs[filter_id], cg_fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach failed\n");
+   link = NULL;
+   goto cleanup;
}
 
-   return EXIT_SUCCESS;
+   err = bpf_link__pin(link, link_pin_path);
+   if (err < 0) {
+   printf("ERROR: bpf_link__pin failed: %d\n", err);
+   goto cleanup;
+   }
+
+   ret = EXIT_SUCCESS;
+
+cleanup:
+   bpf_link__destroy(link);
+   bpf_object__close(obj);
+   return ret;
 }
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
index 0f396a86e0cb..6a3dbe642b2b 100755
--- a/samples/bpf/test_cgrp2_sock2.sh
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -1,6 +1,9 @@
 #!/

Re: [PATCH bpf-next 2/9] samples: bpf: refactor hbm program with libbpf

2020-11-18 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 11:10 AM Martin KaFai Lau  wrote:
>
> On Tue, Nov 17, 2020 at 02:56:37PM +, Daniel T. Lee wrote:
> [ ... ]
>
> > +
> > +cleanup:
> > + if (rc != 0)
> so this test can be avoided.
>

Thanks for pointing me out! I will follow this approach.

> > + bpf_object__close(obj);
> > +
> > + return rc;
> >  }
> >
> > [...]
> >   if (!outFlag)
> > - type = BPF_CGROUP_INET_INGRESS;
> > - if (bpf_prog_attach(bpfprog_fd, cg1, type, 0)) {
> > - printf("ERROR: bpf_prog_attach fails!\n");
> > - log_err("Attaching prog");
> > + bpf_program__set_expected_attach_type(bpf_prog, 
> > BPF_CGROUP_INET_INGRESS);
> > +
> > + link = bpf_program__attach_cgroup(bpf_prog, cg1);
> There is a difference here.
> I think the bpf_prog will be detached when link is gone (e.g. process exit)
> I am not sure it is what hbm is expected considering
> cg is not clean-up on the success case.
>

I think you're right. As I did in the third patch, I will use the
link__pin approach to prevent the link from being cleaned up when the
process exit.

> > + if (libbpf_get_error(link)) {
> > + fprintf(stderr, "ERROR: bpf_program__attach_cgroup failed\n");
> > + link = NULL;
> not needed.  bpf_link__destroy() can handle err ptr.
>

Thank you for the detailed advice, but in order to make it more clear
that link is no longer used, how about keeping this approach?

> >   goto err;
> >   }
> > [...]
> > +
> >   if (cg1)
> This test looks wrong since cg1 is a fd.
>

I'll remove unnecessary fd compare.

-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 3/9] samples: bpf: refactor test_cgrp2_sock2 program with libbpf

2020-11-18 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 2:58 PM Martin KaFai Lau  wrote:
>
> On Tue, Nov 17, 2020 at 02:56:38PM +, Daniel T. Lee wrote:
> [ ... ]
>
> > + err = bpf_link__pin(link, link_pin_path);
> > + if (err < 0) {
> > + printf("err : %d\n", err);
> > + goto cleanup;
> > + }
> > +
> > + ret = EXIT_SUCCESS;
> > +
> > +cleanup:
> > + if (ret != EXIT_SUCCESS)
> > + bpf_link__destroy(link);
> This looks wrong.  cleanup should be done regardless.
>

At first, I thought destroying the link after the link__pin might unpin
the link, but I just tested it and confirmed that it actually didn't
and that the link kept pinned.

Thanks for pointing it out! I will stick to this method.

> > +
> > + bpf_object__close(obj);
> > + return ret;
> >  }



-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 5/9] samples: bpf: refactor ibumad program with libbpf

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 12:10 PM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 7:05 PM Daniel T. Lee  wrote:
> >
> > On Wed, Nov 18, 2020 at 11:52 AM Andrii Nakryiko
> >  wrote:
> > >
> > > On Tue, Nov 17, 2020 at 6:57 AM Daniel T. Lee  
> > > wrote:
> > > >
> > > > This commit refactors the existing ibumad program with libbpf bpf
> > > > loader. Attach/detach of Tracepoint bpf programs has been managed
> > > > with the generic bpf_program__attach() and bpf_link__destroy() from
> > > > the libbpf.
> > > >
> > > > Also, instead of using the previous BPF MAP definition, this commit
> > > > refactors ibumad MAP definition with the new BTF-defined MAP format.
> > > >
> > > > To verify that this bpf program works without an infiniband device,
> > > > try loading ib_umad kernel module and test the program as follows:
> > > >
> > > >     # modprobe ib_umad
> > > > # ./ibumad
> > > >
> > > > Moreover, TRACE_HELPERS has been removed from the Makefile since it is
> > > > not used on this program.
> > > >
> > > > Signed-off-by: Daniel T. Lee 
> > > > ---
> > > >  samples/bpf/Makefile  |  2 +-
> > > >  samples/bpf/ibumad_kern.c | 26 +++
> > > >  samples/bpf/ibumad_user.c | 66 ++-
> > > >  3 files changed, 65 insertions(+), 29 deletions(-)
> > > >
> > > > diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> > > > index 36b261c7afc7..bfa595379493 100644
> > > > --- a/samples/bpf/Makefile
> > > > +++ b/samples/bpf/Makefile
> > > > @@ -109,7 +109,7 @@ xsk_fwd-objs := xsk_fwd.o
> > > >  xdp_fwd-objs := xdp_fwd_user.o
> > > >  task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
> > > >  xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
> > > > -ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
> > > > +ibumad-objs := ibumad_user.o
> > > >  hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
> > > >
> > > >  # Tell kbuild to always build the programs
> > > > diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
> > > > index 3a91b4c1989a..26dcd4dde946 100644
> > > > --- a/samples/bpf/ibumad_kern.c
> > > > +++ b/samples/bpf/ibumad_kern.c
> > > > @@ -16,19 +16,19 @@
> > > >  #include 
> > > >
> > > >
> > > > -struct bpf_map_def SEC("maps") read_count = {
> > > > -   .type= BPF_MAP_TYPE_ARRAY,
> > > > -   .key_size= sizeof(u32), /* class; u32 required */
> > > > -   .value_size  = sizeof(u64), /* count of mads read */
> > > > -   .max_entries = 256, /* Room for all Classes */
> > > > -};
> > > > -
> > > > -struct bpf_map_def SEC("maps") write_count = {
> > > > -   .type= BPF_MAP_TYPE_ARRAY,
> > > > -   .key_size= sizeof(u32), /* class; u32 required */
> > > > -   .value_size  = sizeof(u64), /* count of mads written */
> > > > -   .max_entries = 256, /* Room for all Classes */
> > > > -};
> > > > +struct {
> > > > +   __uint(type, BPF_MAP_TYPE_ARRAY);
> > > > +   __type(key, u32); /* class; u32 required */
> > > > +   __type(value, u64); /* count of mads read */
> > > > +   __uint(max_entries, 256); /* Room for all Classes */
> > > > +} read_count SEC(".maps");
> > > > +
> > > > +struct {
> > > > +   __uint(type, BPF_MAP_TYPE_ARRAY);
> > > > +   __type(key, u32); /* class; u32 required */
> > > > +   __type(value, u64); /* count of mads written */
> > > > +   __uint(max_entries, 256); /* Room for all Classes */
> > > > +} write_count SEC(".maps");
> > > >
> > > >  #undef DEBUG
> > > >  #ifndef DEBUG
> > > > diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
> > > > index fa06eef31a84..66a06272f242 100644
> > > > --- a/samples/bpf/ibumad_user.c
> > > > +++ b/samples/bpf/ibumad_user.c
> > > > @@ -23,10 +23,15 @@
> > > >  #include 
> > > >  #include 
> > > >
> > > > -#include "bpf_load.h"
> > > > +

Re: [PATCH bpf-next 3/9] samples: bpf: refactor test_cgrp2_sock2 program with libbpf

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 12:02 PM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 6:57 AM Daniel T. Lee  wrote:
> >
> > This commit refactors the existing cgroup program with libbpf bpf
> > loader. The original test_cgrp2_sock2 has keeped the bpf program
> > attached to the cgroup hierarchy even after the exit of user program.
> > To implement the same functionality with libbpf, this commit uses the
> > BPF_LINK_PINNING to pin the link attachment even after it is closed.
> >
> > Since this uses LINK instead of ATTACH, detach of bpf program from
> > cgroup with 'test_cgrp2_sock' is not used anymore.
> >
> > The code to mount the bpf was added to the .sh file in case the bpff
> > was not mounted on /sys/fs/bpf. Additionally, to fix the problem that
> > shell script cannot find the binary object from the current path,
> > relative path './' has been added in front of binary.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile|  2 +-
> >  samples/bpf/test_cgrp2_sock2.c  | 63 -
> >  samples/bpf/test_cgrp2_sock2.sh | 21 ---
> >  3 files changed, 64 insertions(+), 22 deletions(-)
> >
>
> [...]
>
> >
> > -   return EXIT_SUCCESS;
> > +   err = bpf_link__pin(link, link_pin_path);
> > +   if (err < 0) {
> > +   printf("err : %d\n", err);
>
> more meaningful error message would be helpful
>

Thanks for pointing out, I will fix it directly!

> > +   goto cleanup;
> > +   }
> > +
> > +   ret = EXIT_SUCCESS;
> > +
> > +cleanup:
> > +   if (ret != EXIT_SUCCESS)
> > +   bpf_link__destroy(link);
> > +
> > +   bpf_object__close(obj);
> > +   return ret;
> >  }
>
> [...]
>
> >
> >  function attach_bpf {
> > -   test_cgrp2_sock2 /tmp/cgroupv2/foo sock_flags_kern.o $1
> > +   ./test_cgrp2_sock2 /tmp/cgroupv2/foo sock_flags_kern.o $1
>
> Can you please add Fixes: tag for this?
>

Will add it in the next version of patch :)

Thanks for your time and effort for the review.

-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 4/9] samples: bpf: refactor task_fd_query program with libbpf

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 11:58 AM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 6:57 AM Daniel T. Lee  wrote:
> >
> > This commit refactors the existing kprobe program with libbpf bpf
> > loader. To attach bpf program, this uses generic bpf_program__attach()
> > approach rather than using bpf_load's load_bpf_file().
> >
> > To attach bpf to perf_event, instead of using previous ioctl method,
> > this commit uses bpf_program__attach_perf_event since it manages the
> > enable of perf_event and attach of BPF programs to it, which is much
> > more intuitive way to achieve.
> >
> > Also, explicit close(fd) has been removed since event will be closed
> > inside bpf_link__destroy() automatically.
> >
> > DEBUGFS macro from trace_helpers has been used to control uprobe events.
> > Furthermore, to prevent conflict of same named uprobe events, O_TRUNC
> > flag has been used to clear 'uprobe_events' interface.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile |   2 +-
> >  samples/bpf/task_fd_query_user.c | 101 ++-
> >  2 files changed, 74 insertions(+), 29 deletions(-)
> >
>
> [...]
>
> >  static int test_debug_fs_uprobe(char *binary_path, long offset, bool 
> > is_return)
> >  {
> > +   char buf[256], event_alias[sizeof("test_1234567890")];
> > const char *event_type = "uprobe";
> > struct perf_event_attr attr = {};
> > -   char buf[256], event_alias[sizeof("test_1234567890")];
> > __u64 probe_offset, probe_addr;
> > __u32 len, prog_id, fd_type;
> > -   int err, res, kfd, efd;
> > +   int err = -1, res, kfd, efd;
> > +   struct bpf_link *link;
> > ssize_t bytes;
> >
> > -   snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events",
> > -event_type);
> > -   kfd = open(buf, O_WRONLY | O_APPEND, 0);
> > +   snprintf(buf, sizeof(buf), DEBUGFS "%s_events", event_type);
> > +   kfd = open(buf, O_WRONLY | O_TRUNC, 0);
>
> O_TRUNC will also remove other events, created by users. Not a great
> experience. Let's leave the old behavior?
>

The reason why I used O_TRUNC is, it gets conflict error during tests.
I'm not sure if it is a bug of ftrace uprobes_events or not, but seems adding
same name of uprobe_events with another type seems not working.
(adding uretprobes after uprobes returns an error)

samples/bpf # echo 'p:uprobes/test_500836 ./task_fd_query:0x3d80'
>> /sys/kernel/debug/tracing/uprobe_events
samples/bpf # cat /sys/kernel/debug/tracing/uprobe_events
 p:uprobes/test_500836 ./task_fd_query:0x3d80
samples/bpf# echo 'r:uprobes/test_500836 ./task_fd_query:0x3d80'
>> /sys/kernel/debug/tracing/uprobe_events
 bash: echo: write error: File exists

Since this gets error, I've just truncated on every open of this interface.

> > CHECK_PERROR_RET(kfd < 0);
> >
> > res = snprintf(event_alias, sizeof(event_alias), "test_%d", 
> > getpid());
> > @@ -240,8 +252,8 @@ static int test_debug_fs_uprobe(char *binary_path, long 
> > offset, bool is_return)
> > close(kfd);
> > kfd = -1;
> >
> > -   snprintf(buf, sizeof(buf), 
> > "/sys/kernel/debug/tracing/events/%ss/%s/id",
> > -            event_type, event_alias);
> > +   snprintf(buf, sizeof(buf), DEBUGFS "events/%ss/%s/id", event_type,
>
> I'd leave the string verbatim here (and above), I think it's better
> that way and easier to figure out what's written where. And then no
> need to expose DEBUGFS.
>

Sounds great. I'll keep the string path as it was.

> > +event_alias);
> > efd = open(buf, O_RDONLY, 0);
> > CHECK_PERROR_RET(efd < 0);
> >
>
> [...]



-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 5/9] samples: bpf: refactor ibumad program with libbpf

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 11:52 AM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 6:57 AM Daniel T. Lee  wrote:
> >
> > This commit refactors the existing ibumad program with libbpf bpf
> > loader. Attach/detach of Tracepoint bpf programs has been managed
> > with the generic bpf_program__attach() and bpf_link__destroy() from
> > the libbpf.
> >
> > Also, instead of using the previous BPF MAP definition, this commit
> > refactors ibumad MAP definition with the new BTF-defined MAP format.
> >
> > To verify that this bpf program works without an infiniband device,
> > try loading ib_umad kernel module and test the program as follows:
> >
> > # modprobe ib_umad
> > # ./ibumad
> >
> > Moreover, TRACE_HELPERS has been removed from the Makefile since it is
> > not used on this program.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile  |  2 +-
> >  samples/bpf/ibumad_kern.c | 26 +++
> >  samples/bpf/ibumad_user.c | 66 ++-
> >  3 files changed, 65 insertions(+), 29 deletions(-)
> >
> > diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> > index 36b261c7afc7..bfa595379493 100644
> > --- a/samples/bpf/Makefile
> > +++ b/samples/bpf/Makefile
> > @@ -109,7 +109,7 @@ xsk_fwd-objs := xsk_fwd.o
> >  xdp_fwd-objs := xdp_fwd_user.o
> >  task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
> >  xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
> > -ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
> > +ibumad-objs := ibumad_user.o
> >  hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
> >
> >  # Tell kbuild to always build the programs
> > diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
> > index 3a91b4c1989a..26dcd4dde946 100644
> > --- a/samples/bpf/ibumad_kern.c
> > +++ b/samples/bpf/ibumad_kern.c
> > @@ -16,19 +16,19 @@
> >  #include 
> >
> >
> > -struct bpf_map_def SEC("maps") read_count = {
> > -   .type= BPF_MAP_TYPE_ARRAY,
> > -   .key_size= sizeof(u32), /* class; u32 required */
> > -   .value_size  = sizeof(u64), /* count of mads read */
> > -   .max_entries = 256, /* Room for all Classes */
> > -};
> > -
> > -struct bpf_map_def SEC("maps") write_count = {
> > -   .type= BPF_MAP_TYPE_ARRAY,
> > -   .key_size= sizeof(u32), /* class; u32 required */
> > -   .value_size  = sizeof(u64), /* count of mads written */
> > -   .max_entries = 256, /* Room for all Classes */
> > -};
> > +struct {
> > +   __uint(type, BPF_MAP_TYPE_ARRAY);
> > +   __type(key, u32); /* class; u32 required */
> > +   __type(value, u64); /* count of mads read */
> > +   __uint(max_entries, 256); /* Room for all Classes */
> > +} read_count SEC(".maps");
> > +
> > +struct {
> > +   __uint(type, BPF_MAP_TYPE_ARRAY);
> > +   __type(key, u32); /* class; u32 required */
> > +   __type(value, u64); /* count of mads written */
> > +   __uint(max_entries, 256); /* Room for all Classes */
> > +} write_count SEC(".maps");
> >
> >  #undef DEBUG
> >  #ifndef DEBUG
> > diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
> > index fa06eef31a84..66a06272f242 100644
> > --- a/samples/bpf/ibumad_user.c
> > +++ b/samples/bpf/ibumad_user.c
> > @@ -23,10 +23,15 @@
> >  #include 
> >  #include 
> >
> > -#include "bpf_load.h"
> > +#include 
> >  #include "bpf_util.h"
> >  #include 
> >
> > +struct bpf_link *tp_links[3] = {};
> > +struct bpf_object *obj;
>
> statics and you can drop = {} part.
>
> > +static int map_fd[2];
> > +static int tp_cnt;
> > +
> >  static void dump_counts(int fd)
> >  {
> > __u32 key;
> > @@ -53,6 +58,11 @@ static void dump_all_counts(void)
> >  static void dump_exit(int sig)
> >  {
> > dump_all_counts();
> > +   /* Detach tracepoints */
> > +   while (tp_cnt)
> > +   bpf_link__destroy(tp_links[--tp_cnt]);
> > +
> > +   bpf_object__close(obj);
> > exit(0);
> >  }
> >
> > @@ -73,19 +83,11 @@ static void usage(char *cmd)
> >
> >  int main(int argc, char **argv)
> >  {
> > +   struct bpf_program *prog;
> > unsigned long delay = 5;
> > +   char filename[256];
> > int lon

Re: [PATCH bpf-next 9/9] samples: bpf: remove bpf_load loader completely

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 11:49 AM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 6:58 AM Daniel T. Lee  wrote:
> >
> > Numerous refactoring that rewrites BPF programs written with bpf_load
> > to use the libbpf loader was finally completed, resulting in BPF
> > programs using bpf_load within the kernel being completely no longer
> > present.
> >
> > This commit removes bpf_load, an outdated bpf loader that is difficult
> > to keep up with the latest kernel BPF and causes confusion.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
>
> RIP, bpf_load().
>
> Probably makes more sense to combine this patch with the previous patch.
>
> Acked-by: Andrii Nakryiko 

Will merge and send the next version of patch!

Thanks for your time and effort for the review.

-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 1/9] selftests: bpf: move tracing helpers to trace_helper

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 10:58 AM Andrii Nakryiko
 wrote:
>
> On Tue, Nov 17, 2020 at 6:57 AM Daniel T. Lee  wrote:
> >
> > Under the samples/bpf directory, similar tracing helpers are
> > fragmented around. To keep consistent of tracing programs, this commit
> > moves the helper and define locations to increase the reuse of each
> > helper function.
> >
> > Signed-off-by: Daniel T. Lee 
> >
> > ---
> > [...]
> > -static void read_trace_pipe2(void)
>
> This is used only in hbm.c, why move it into trace_helpers.c?
>

I think this function can be made into a helper that can be used in
other programs. Which is basically same as 'read_trace_pipe' and
also writes the content of that pipe to file either. Well, it's not used
anywhere else, but I moved this function for the potential of reuse.

Since these 'read_trace_pipe's are helpers that are only used
under samples directory, what do you think about moving these
helpers to something like samples/bpf/trace_pipe.h?

Thanks for your time and effort for the review.

-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next 1/9] selftests: bpf: move tracing helpers to trace_helper

2020-11-17 Thread Daniel T. Lee
On Wed, Nov 18, 2020 at 10:19 AM Martin KaFai Lau  wrote:
>
> On Tue, Nov 17, 2020 at 02:56:36PM +, Daniel T. Lee wrote:
> > Under the samples/bpf directory, similar tracing helpers are
> > fragmented around. To keep consistent of tracing programs, this commit
> > moves the helper and define locations to increase the reuse of each
> > helper function.
> >
> > Signed-off-by: Daniel T. Lee 
> >
> > ---
> >  samples/bpf/Makefile|  2 +-
> >  samples/bpf/hbm.c   | 51 -
> >  tools/testing/selftests/bpf/trace_helpers.c | 33 -
> >  tools/testing/selftests/bpf/trace_helpers.h |  3 ++
> >  4 files changed, 45 insertions(+), 44 deletions(-)
> >
> > [...]
> >
> > -#define DEBUGFS "/sys/kernel/debug/tracing/"
> Is this change needed?

This macro can be used in other samples such as the 4th' patch of this
patchset, task_fd_query.

> > -
> >  #define MAX_SYMS 30
> >  static struct ksym syms[MAX_SYMS];
> >  static int sym_cnt;
> > @@ -136,3 +134,34 @@ void read_trace_pipe(void)
> >   }
> >   }
> >  }
> > +
> > +void read_trace_pipe2(char *filename)
> > +{
> > + int trace_fd;
> > + FILE *outf;
> > +
> > + trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
> > + if (trace_fd < 0) {
> > + printf("Error opening trace_pipe\n");
> > + return;
> > + }
> > +
> > + outf = fopen(filename, "w");
> > + if (!outf)
> > + printf("Error creating %s\n", filename);
> > +
> > + while (1) {
> > + static char buf[4096];
> > + ssize_t sz;
> > +
> > + sz = read(trace_fd, buf, sizeof(buf) - 1);
> > + if (sz > 0) {
> > + buf[sz] = 0;
> > + puts(buf);
> > + if (outf) {
> > + fprintf(outf, "%s\n", buf);
> > + fflush(outf);
> > + }
> > + }
> > + }
> It needs a fclose().
>
> IIUC, this function will never return.  I am not sure
> this is something that should be made available to selftests.

Actually, read_trace_pipe and read_trace_pipe2 are helpers that are
only used under samples directory. Since these helpers are not used
in selftests, What do you think about moving these helpers to
something like samples/bpf/trace_pipe.h?

Thanks for your time and effort for the review.

-- 
Best,
Daniel T. Lee


[PATCH bpf-next 1/9] selftests: bpf: move tracing helpers to trace_helper

2020-11-17 Thread Daniel T. Lee
Under the samples/bpf directory, similar tracing helpers are
fragmented around. To keep consistent of tracing programs, this commit
moves the helper and define locations to increase the reuse of each
helper function.

Signed-off-by: Daniel T. Lee 

---
 samples/bpf/Makefile|  2 +-
 samples/bpf/hbm.c   | 51 -
 tools/testing/selftests/bpf/trace_helpers.c | 33 -
 tools/testing/selftests/bpf/trace_helpers.h |  3 ++
 4 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index aeebf5d12f32..3e83cd5ca1c2 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -110,7 +110,7 @@ xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
-hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS)
+hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
 
 # Tell kbuild to always build the programs
 always-y := $(tprogs-y)
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 400e741a56eb..b9f9f771dd81 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -48,6 +48,7 @@
 
 #include "bpf_load.h"
 #include "bpf_rlimit.h"
+#include "trace_helpers.h"
 #include "cgroup_helpers.h"
 #include "hbm.h"
 #include "bpf_util.h"
@@ -65,51 +66,12 @@ bool no_cn_flag;
 bool edt_flag;
 
 static void Usage(void);
-static void read_trace_pipe2(void);
 static void do_error(char *msg, bool errno_flag);
 
-#define DEBUGFS "/sys/kernel/debug/tracing/"
-
 struct bpf_object *obj;
 int bpfprog_fd;
 int cgroup_storage_fd;
 
-static void read_trace_pipe2(void)
-{
-   int trace_fd;
-   FILE *outf;
-   char *outFname = "hbm_out.log";
-
-   trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
-   if (trace_fd < 0) {
-   printf("Error opening trace_pipe\n");
-   return;
-   }
-
-// Future support of ingress
-// if (!outFlag)
-// outFname = "hbm_in.log";
-   outf = fopen(outFname, "w");
-
-   if (outf == NULL)
-   printf("Error creating %s\n", outFname);
-
-   while (1) {
-   static char buf[4097];
-   ssize_t sz;
-
-   sz = read(trace_fd, buf, sizeof(buf) - 1);
-   if (sz > 0) {
-   buf[sz] = 0;
-   puts(buf);
-   if (outf != NULL) {
-   fprintf(outf, "%s\n", buf);
-   fflush(outf);
-   }
-   }
-   }
-}
-
 static void do_error(char *msg, bool errno_flag)
 {
if (errno_flag)
@@ -392,8 +354,15 @@ static int run_bpf_prog(char *prog, int cg_id)
fclose(fout);
}
 
-   if (debugFlag)
-   read_trace_pipe2();
+   if (debugFlag) {
+   char *out_fname = "hbm_out.log";
+   /* Future support of ingress */
+   // if (!outFlag)
+   //  out_fname = "hbm_in.log";
+
+   read_trace_pipe2(out_fname);
+   }
+
return rc;
 err:
rc = 1;
diff --git a/tools/testing/selftests/bpf/trace_helpers.c 
b/tools/testing/selftests/bpf/trace_helpers.c
index 1bbd1d9830c8..b7c184e109e8 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -11,8 +11,6 @@
 #include 
 #include "trace_helpers.h"
 
-#define DEBUGFS "/sys/kernel/debug/tracing/"
-
 #define MAX_SYMS 30
 static struct ksym syms[MAX_SYMS];
 static int sym_cnt;
@@ -136,3 +134,34 @@ void read_trace_pipe(void)
}
}
 }
+
+void read_trace_pipe2(char *filename)
+{
+   int trace_fd;
+   FILE *outf;
+
+   trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
+   if (trace_fd < 0) {
+   printf("Error opening trace_pipe\n");
+   return;
+   }
+
+   outf = fopen(filename, "w");
+   if (!outf)
+   printf("Error creating %s\n", filename);
+
+   while (1) {
+   static char buf[4096];
+   ssize_t sz;
+
+   sz = read(trace_fd, buf, sizeof(buf) - 1);
+   if (sz > 0) {
+   buf[sz] = 0;
+   puts(buf);
+   if (outf) {
+   fprintf(outf, "%s\n", buf);
+   fflush(outf);
+   }
+   }
+   }
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h 
b/tools/testing/selftests/bpf/trace_helpers.h
index f62fdef9e589..68c23bf55897 100644
--- a/tools/

[PATCH bpf-next 4/9] samples: bpf: refactor task_fd_query program with libbpf

2020-11-17 Thread Daniel T. Lee
This commit refactors the existing kprobe program with libbpf bpf
loader. To attach bpf program, this uses generic bpf_program__attach()
approach rather than using bpf_load's load_bpf_file().

To attach bpf to perf_event, instead of using previous ioctl method,
this commit uses bpf_program__attach_perf_event since it manages the
enable of perf_event and attach of BPF programs to it, which is much
more intuitive way to achieve.

Also, explicit close(fd) has been removed since event will be closed
inside bpf_link__destroy() automatically.

DEBUGFS macro from trace_helpers has been used to control uprobe events.
Furthermore, to prevent conflict of same named uprobe events, O_TRUNC
flag has been used to clear 'uprobe_events' interface.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile |   2 +-
 samples/bpf/task_fd_query_user.c | 101 ++-
 2 files changed, 74 insertions(+), 29 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 7a643595ac6c..36b261c7afc7 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -107,7 +107,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
-task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
 hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index b68bd2f8fdc9..0891ef3a4779 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -15,12 +15,15 @@
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
 #include "bpf_util.h"
 #include "perf-sys.h"
 #include "trace_helpers.h"
 
+struct bpf_program *progs[2];
+struct bpf_link *links[2];
+
 #define CHECK_PERROR_RET(condition) ({ \
int __ret = !!(condition);  \
if (__ret) {\
@@ -86,21 +89,22 @@ static int bpf_get_retprobe_bit(const char *event_type)
return ret;
 }
 
-static int test_debug_fs_kprobe(int prog_fd_idx, const char *fn_name,
+static int test_debug_fs_kprobe(int link_idx, const char *fn_name,
__u32 expected_fd_type)
 {
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
+   int err, event_fd;
char buf[256];
-   int err;
 
len = sizeof(buf);
-   err = bpf_task_fd_query(getpid(), event_fd[prog_fd_idx], 0, buf, &len,
+   event_fd = bpf_link__fd(links[link_idx]);
+   err = bpf_task_fd_query(getpid(), event_fd, 0, buf, &len,
&prog_id, &fd_type, &probe_offset,
&probe_addr);
if (err < 0) {
printf("FAIL: %s, for event_fd idx %d, fn_name %s\n",
-  __func__, prog_fd_idx, fn_name);
+  __func__, link_idx, fn_name);
perror(":");
return -1;
}
@@ -108,7 +112,7 @@ static int test_debug_fs_kprobe(int prog_fd_idx, const char 
*fn_name,
fd_type != expected_fd_type ||
probe_offset != 0x0 || probe_addr != 0x0) {
printf("FAIL: bpf_trace_event_query(event_fd[%d]):\n",
-  prog_fd_idx);
+  link_idx);
printf("buf: %s, fd_type: %u, probe_offset: 0x%llx,"
   " probe_addr: 0x%llx\n",
   buf, fd_type, probe_offset, probe_addr);
@@ -125,12 +129,13 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
int is_return_bit = bpf_get_retprobe_bit(event_type);
int type = bpf_find_probe_type(event_type);
struct perf_event_attr attr = {};
-   int fd;
+   struct bpf_link *link;
+   int fd, err = -1;
 
if (type < 0 || is_return_bit < 0) {
printf("FAIL: %s incorrect type (%d) or is_return_bit (%d)\n",
__func__, type, is_return_bit);
-   return -1;
+   return err;
}
 
attr.sample_period = 1;
@@ -149,14 +154,21 @@ static int test_nondebug_fs_kuprobe_common(const char 
*event_type,
attr.type = type;
 
fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
-   CHECK_PERROR_RET(fd < 0);
+   link = bpf_program__attach_perf_event(progs[0], fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach_perf_event failed\n");
+   link = NULL;
+   close(fd);
+   goto cleanup;
+   }
 
-   CHECK_PERROR_

[PATCH bpf-next 9/9] samples: bpf: remove bpf_load loader completely

2020-11-17 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This commit removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/bpf_load.c  | 667 
 samples/bpf/bpf_load.h  |  57 ---
 samples/bpf/xdp2skb_meta_kern.c |   2 +-
 3 files changed, 1 insertion(+), 725 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h

diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
deleted file mode 100644
index c5ad528f046e..
--- a/samples/bpf/bpf_load.c
+++ /dev/null
@@ -1,667 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include "bpf_load.h"
-#include "perf-sys.h"
-
-#define DEBUGFS "/sys/kernel/debug/tracing/"
-
-static char license[128];
-static int kern_version;
-static bool processed_sec[128];
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-int map_fd[MAX_MAPS];
-int prog_fd[MAX_PROGS];
-int event_fd[MAX_PROGS];
-int prog_cnt;
-int prog_array_fd = -1;
-
-struct bpf_map_data map_data[MAX_MAPS];
-int map_data_count;
-
-static int populate_prog_array(const char *event, int prog_fd)
-{
-   int ind = atoi(event), err;
-
-   err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
-   if (err < 0) {
-   printf("failed to store prog_fd in prog_array\n");
-   return -1;
-   }
-   return 0;
-}
-
-static int write_kprobe_events(const char *val)
-{
-   int fd, ret, flags;
-
-   if (val == NULL)
-   return -1;
-   else if (val[0] == '\0')
-   flags = O_WRONLY | O_TRUNC;
-   else
-   flags = O_WRONLY | O_APPEND;
-
-   fd = open(DEBUGFS "kprobe_events", flags);
-
-   ret = write(fd, val, strlen(val));
-   close(fd);
-
-   return ret;
-}
-
-static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
-{
-   bool is_socket = strncmp(event, "socket", 6) == 0;
-   bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
-   bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
-   bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
-   bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
-   bool is_xdp = strncmp(event, "xdp", 3) == 0;
-   bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
-   bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
-   bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
-   bool is_sockops = strncmp(event, "sockops", 7) == 0;
-   bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0;
-   bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0;
-   size_t insns_cnt = size / sizeof(struct bpf_insn);
-   enum bpf_prog_type prog_type;
-   char buf[256];
-   int fd, efd, err, id;
-   struct perf_event_attr attr = {};
-
-   attr.type = PERF_TYPE_TRACEPOINT;
-   attr.sample_type = PERF_SAMPLE_RAW;
-   attr.sample_period = 1;
-   attr.wakeup_events = 1;
-
-   if (is_socket) {
-   prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
-   } else if (is_kprobe || is_kretprobe) {
-   prog_type = BPF_PROG_TYPE_KPROBE;
-   } else if (is_tracepoint) {
-   prog_type = BPF_PROG_TYPE_TRACEPOINT;
-   } else if (is_raw_tracepoint) {
-   prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT;
-   } else if (is_xdp) {
-   prog_type = BPF_PROG_TYPE_XDP;
-   } else if (is_perf_event) {
-   prog_type = BPF_PROG_TYPE_PERF_EVENT;
-   } else if (is_cgroup_skb) {
-   prog_type = BPF_PROG_TYPE_CGROUP_SKB;
-   } else if (is_cgroup_sk) {
-   prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
-   } else if (is_sockops) {
-   prog_type = BPF_PROG_TYPE_SOCK_OPS;
-   } else if (is_sk_skb) {
-   prog_type = BPF_PROG_TYPE_SK_SKB;
-   } else if (is_sk_msg) {
-   prog_type = BPF_PROG_TYPE_SK_MSG;
-   } else {
-   printf("Unknown event '%s'\n", event);
-   return -1;
-   }
-
-   if (prog_cnt == MAX_PROGS)
-   return -1;
-
-   fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
- bpf_log_buf, BPF_LOG_BUF_SIZE)

[PATCH bpf-next 0/9] bpf: remove bpf_load loader completely

2020-11-17 Thread Daniel T. Lee
Numerous refactoring that rewrites BPF programs written with bpf_load
to use the libbpf loader was finally completed, resulting in BPF
programs using bpf_load within the kernel being completely no longer
present.

This patchset refactors remaining bpf programs with libbpf and
completely removes bpf_load, an outdated bpf loader that is difficult
to keep up with the latest kernel BPF and causes confusion.

Daniel T. Lee (9):
  selftests: bpf: move tracing helpers to trace_helper.h
  samples: bpf: refactor hbm program with libbpf
  samples: bpf: refactor test_cgrp2_sock2 program with libbpf
  samples: bpf: refactor task_fd_query program with libbpf
  samples: bpf: refactor ibumad program with libbpf
  samples: bpf: refactor test_overhead program with libbpf
  samples: bpf: fix lwt_len_hist reusing previous BPF map
  samples: bpf: remove unused trace_helper and bpf_load from Makefile
  samples: bpf: remove bpf_load loader completely

 samples/bpf/.gitignore  |   3 +
 samples/bpf/Makefile|  20 +-
 samples/bpf/bpf_load.c  | 667 
 samples/bpf/bpf_load.h  |  57 --
 samples/bpf/hbm.c   | 147 ++---
 samples/bpf/hbm_kern.h  |   2 +-
 samples/bpf/ibumad_kern.c   |  26 +-
 samples/bpf/ibumad_user.c   |  66 +-
 samples/bpf/lwt_len_hist.sh |   2 +
 samples/bpf/task_fd_query_user.c| 101 ++-
 samples/bpf/test_cgrp2_sock2.c  |  63 +-
 samples/bpf/test_cgrp2_sock2.sh |  21 +-
 samples/bpf/test_lwt_bpf.sh |   0
 samples/bpf/test_overhead_user.c|  82 ++-
 samples/bpf/xdp2skb_meta_kern.c |   2 +-
 tools/testing/selftests/bpf/trace_helpers.c |  33 +-
 tools/testing/selftests/bpf/trace_helpers.h |   3 +
 17 files changed, 368 insertions(+), 927 deletions(-)
 delete mode 100644 samples/bpf/bpf_load.c
 delete mode 100644 samples/bpf/bpf_load.h
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

-- 
2.25.1



[PATCH bpf-next 7/9] samples: bpf: fix lwt_len_hist reusing previous BPF map

2020-11-17 Thread Daniel T. Lee
Currently, lwt_len_hist's map lwt_len_hist_map is uses pinning, and the
map isn't cleared on test end. This leds to reuse of that map for
each test, which prevents the results of the test from being accurate.

This commit fixes the problem by removing of pinned map from bpffs.
Also, this commit add the executable permission to shell script
files.

Fixes: f74599f7c5309 ("bpf: Add tests and samples for LWT-BPF")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/lwt_len_hist.sh | 2 ++
 samples/bpf/test_lwt_bpf.sh | 0
 2 files changed, 2 insertions(+)
 mode change 100644 => 100755 samples/bpf/lwt_len_hist.sh
 mode change 100644 => 100755 samples/bpf/test_lwt_bpf.sh

diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
old mode 100644
new mode 100755
index 090b96eaf7f7..0eda9754f50b
--- a/samples/bpf/lwt_len_hist.sh
+++ b/samples/bpf/lwt_len_hist.sh
@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
 TRACE_ROOT=/sys/kernel/debug/tracing
 
 function cleanup {
+   # To reset saved histogram, remove pinned map
+   rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
ip link del $VETH0 2> /dev/null
ip link del $VETH1 2> /dev/null
diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
old mode 100644
new mode 100755
-- 
2.25.1



[PATCH bpf-next 5/9] samples: bpf: refactor ibumad program with libbpf

2020-11-17 Thread Daniel T. Lee
This commit refactors the existing ibumad program with libbpf bpf
loader. Attach/detach of Tracepoint bpf programs has been managed
with the generic bpf_program__attach() and bpf_link__destroy() from
the libbpf.

Also, instead of using the previous BPF MAP definition, this commit
refactors ibumad MAP definition with the new BTF-defined MAP format.

To verify that this bpf program works without an infiniband device,
try loading ib_umad kernel module and test the program as follows:

# modprobe ib_umad
# ./ibumad

Moreover, TRACE_HELPERS has been removed from the Makefile since it is
not used on this program.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile  |  2 +-
 samples/bpf/ibumad_kern.c | 26 +++
 samples/bpf/ibumad_user.c | 66 ++-
 3 files changed, 65 insertions(+), 29 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 36b261c7afc7..bfa595379493 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -109,7 +109,7 @@ xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
-ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
+ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
 
 # Tell kbuild to always build the programs
diff --git a/samples/bpf/ibumad_kern.c b/samples/bpf/ibumad_kern.c
index 3a91b4c1989a..26dcd4dde946 100644
--- a/samples/bpf/ibumad_kern.c
+++ b/samples/bpf/ibumad_kern.c
@@ -16,19 +16,19 @@
 #include 
 
 
-struct bpf_map_def SEC("maps") read_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads read */
-   .max_entries = 256, /* Room for all Classes */
-};
-
-struct bpf_map_def SEC("maps") write_count = {
-   .type= BPF_MAP_TYPE_ARRAY,
-   .key_size= sizeof(u32), /* class; u32 required */
-   .value_size  = sizeof(u64), /* count of mads written */
-   .max_entries = 256, /* Room for all Classes */
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads read */
+   __uint(max_entries, 256); /* Room for all Classes */
+} read_count SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32); /* class; u32 required */
+   __type(value, u64); /* count of mads written */
+   __uint(max_entries, 256); /* Room for all Classes */
+} write_count SEC(".maps");
 
 #undef DEBUG
 #ifndef DEBUG
diff --git a/samples/bpf/ibumad_user.c b/samples/bpf/ibumad_user.c
index fa06eef31a84..66a06272f242 100644
--- a/samples/bpf/ibumad_user.c
+++ b/samples/bpf/ibumad_user.c
@@ -23,10 +23,15 @@
 #include 
 #include 
 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 #include 
 
+struct bpf_link *tp_links[3] = {};
+struct bpf_object *obj;
+static int map_fd[2];
+static int tp_cnt;
+
 static void dump_counts(int fd)
 {
__u32 key;
@@ -53,6 +58,11 @@ static void dump_all_counts(void)
 static void dump_exit(int sig)
 {
dump_all_counts();
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   bpf_object__close(obj);
exit(0);
 }
 
@@ -73,19 +83,11 @@ static void usage(char *cmd)
 
 int main(int argc, char **argv)
 {
+   struct bpf_program *prog;
unsigned long delay = 5;
+   char filename[256];
int longindex = 0;
int opt;
-   char bpf_file[256];
-
-   /* Create the eBPF kernel code path name.
-* This follows the pattern of all of the other bpf samples
-*/
-   snprintf(bpf_file, sizeof(bpf_file), "%s_kern.o", argv[0]);
-
-   /* Do one final dump when exiting */
-   signal(SIGINT, dump_exit);
-   signal(SIGTERM, dump_exit);
 
while ((opt = getopt_long(argc, argv, "hd:rSw",
  long_options, &longindex)) != -1) {
@@ -107,10 +109,38 @@ int main(int argc, char **argv)
}
}
 
-   if (load_bpf_file(bpf_file)) {
-   fprintf(stderr, "ERROR: failed to load eBPF from file : %s\n",
-   bpf_file);
-   return 1;
+   /* Do one final dump when exiting */
+   signal(SIGINT, dump_exit);
+   signal(SIGTERM, dump_exit);
+
+   snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return 0;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   fpri

[PATCH bpf-next 6/9] samples: bpf: refactor test_overhead program with libbpf

2020-11-17 Thread Daniel T. Lee
This commit refactors the existing program with libbpf bpf loader.
Since the kprobe, tracepoint and raw_tracepoint bpf program can be
attached with single bpf_program__attach() interface, so the
corresponding function of libbpf is used here.

Rather than specifying the number of cpus inside the code, this commit
uses the number of available cpus with _SC_NPROCESSORS_ONLN.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile |  2 +-
 samples/bpf/test_overhead_user.c | 82 +++-
 2 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index bfa595379493..16d9d68e1e01 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -78,7 +78,7 @@ lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
 map_perf_test-objs := map_perf_test_user.o
-test_overhead-objs := bpf_load.o test_overhead_user.o
+test_overhead-objs := test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 94f74112a20e..e4de268d5c9e 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -18,10 +18,14 @@
 #include 
 #include 
 #include 
-#include "bpf_load.h"
+#include 
 
 #define MAX_CNT 100
 
+struct bpf_link *links[2] = {};
+struct bpf_object *obj;
+static int cnt;
+
 static __u64 time_get_ns(void)
 {
struct timespec ts;
@@ -115,20 +119,54 @@ static void run_perf_test(int tasks, int flags)
}
 }
 
+static int load_progs(char *filename)
+{
+   struct bpf_program *prog;
+   int err = 0;
+
+   obj = bpf_object__open_file(filename, NULL);
+   err = libbpf_get_error(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return err;
+   }
+
+   /* load BPF program */
+   err = bpf_object__load(obj);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   return err;
+   }
+
+   bpf_object__for_each_program(prog, obj) {
+   links[cnt] = bpf_program__attach(prog);
+   err = libbpf_get_error(links[cnt]);
+   if (err < 0) {
+   fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+   links[cnt] = NULL;
+   return err;
+   }
+   cnt++;
+   }
+
+   return err;
+}
+
 static void unload_progs(void)
 {
-   close(prog_fd[0]);
-   close(prog_fd[1]);
-   close(event_fd[0]);
-   close(event_fd[1]);
+   while (cnt)
+   bpf_link__destroy(links[--cnt]);
+
+   bpf_object__close(obj);
 }
 
 int main(int argc, char **argv)
 {
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
-   char filename[256];
-   int num_cpu = 8;
+   int num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
int test_flags = ~0;
+   char filename[256];
+   int err = 0;
 
setrlimit(RLIMIT_MEMLOCK, &r);
 
@@ -145,38 +183,36 @@ int main(int argc, char **argv)
if (test_flags & 0xC) {
snprintf(filename, sizeof(filename),
 "%s_kprobe_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
+
printf("w/KPROBE\n");
-   run_perf_test(num_cpu, test_flags >> 2);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 2);
+
unload_progs();
}
 
if (test_flags & 0x30) {
snprintf(filename, sizeof(filename),
 "%s_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
printf("w/TRACEPOINT\n");
-   run_perf_test(num_cpu, test_flags >> 4);
+   err = load_progs(filename);
+   if (!err)
+   run_perf_test(num_cpu, test_flags >> 4);
+
unload_progs();
}
 
if (test_flags & 0xC0) {
snprintf(filename, sizeof(filename),
 "%s_raw_tp_kern.o", argv[0]);
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
-   }
printf("w/RAW_TRACEPOINT\n");
-   run_perf_test(num_cpu, test

[PATCH bpf-next 2/9] samples: bpf: refactor hbm program with libbpf

2020-11-17 Thread Daniel T. Lee
This commit refactors the existing cgroup programs with libbpf
bpf loader. Since bpf_program__attach doesn't support cgroup program
attachment, this explicitly attaches cgroup bpf program with
bpf_program__attach_cgroup(bpf_prog, cg1).

Also, to change attach_type of bpf program, this uses libbpf's
bpf_program__set_expected_attach_type helper to switch EGRESS to
INGRESS.

Besides, this program was broken due to the typo of BPF MAP definition.
But this commit solves the problem by fixing this from 'queue_stats' map
struct hvm_queue_stats -> hbm_queue_stats.

Fixes: 36b5d471135c ("selftests/bpf: samples/bpf: Split off legacy stuff from 
bpf_helpers.h")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/.gitignore |  3 ++
 samples/bpf/Makefile   |  2 +-
 samples/bpf/hbm.c  | 96 +-
 samples/bpf/hbm_kern.h |  2 +-
 4 files changed, 54 insertions(+), 49 deletions(-)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index b2f29bc8dc43..0b9548ea8477 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -52,3 +52,6 @@ xdp_tx_iptunnel
 xdpsock
 xsk_fwd
 testfile.img
+hbm_out.log
+iperf.*
+*.out
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3e83cd5ca1c2..01449d767122 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -110,7 +110,7 @@ xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
 xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 ibumad-objs := bpf_load.o ibumad_user.o $(TRACE_HELPERS)
-hbm-objs := bpf_load.o hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
+hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
 
 # Tell kbuild to always build the programs
 always-y := $(tprogs-y)
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index b9f9f771dd81..008bc635ad9b 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -46,7 +46,6 @@
 #include 
 #include 
 
-#include "bpf_load.h"
 #include "bpf_rlimit.h"
 #include "trace_helpers.h"
 #include "cgroup_helpers.h"
@@ -68,9 +67,10 @@ bool edt_flag;
 static void Usage(void);
 static void do_error(char *msg, bool errno_flag);
 
+struct bpf_program *bpf_prog;
 struct bpf_object *obj;
-int bpfprog_fd;
 int cgroup_storage_fd;
+int queue_stats_fd;
 
 static void do_error(char *msg, bool errno_flag)
 {
@@ -83,56 +83,54 @@ static void do_error(char *msg, bool errno_flag)
 
 static int prog_load(char *prog)
 {
-   struct bpf_prog_load_attr prog_load_attr = {
-   .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
-   .file = prog,
-   .expected_attach_type = BPF_CGROUP_INET_EGRESS,
-   };
-   int map_fd;
-   struct bpf_map *map;
+   int rc = 1;
 
-   int ret = 0;
+   obj = bpf_object__open_file(prog, NULL);
+   if (libbpf_get_error(obj)) {
+   printf("ERROR: opening BPF object file failed\n");
+   return rc;
+   }
 
-   if (access(prog, O_RDONLY) < 0) {
-   printf("Error accessing file %s: %s\n", prog, strerror(errno));
-   return 1;
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   printf("ERROR: loading BPF object file failed\n");
+   goto cleanup;
}
-   if (bpf_prog_load_xattr(&prog_load_attr, &obj, &bpfprog_fd))
-   ret = 1;
-   if (!ret) {
-   map = bpf_object__find_map_by_name(obj, "queue_stats");
-   map_fd = bpf_map__fd(map);
-   if (map_fd < 0) {
-   printf("Map not found: %s\n", strerror(map_fd));
-   ret = 1;
-   }
+
+   bpf_prog = bpf_object__find_program_by_title(obj, "cgroup_skb/egress");
+   if (!bpf_prog) {
+   printf("ERROR: finding a prog in obj file failed\n");
+   goto cleanup;
}
 
-   if (ret) {
-   printf("ERROR: bpf_prog_load_xattr failed for: %s\n", prog);
-   printf("  Output from verifier:\n%s\n--\n", bpf_log_buf);
-   ret = -1;
-   } else {
-   ret = map_fd;
+   queue_stats_fd = bpf_object__find_map_fd_by_name(obj, "queue_stats");
+   if (queue_stats_fd < 0) {
+   printf("ERROR: finding a map in obj file failed\n");
+   goto cleanup;
}
 
-   return ret;
+   rc = 0;
+
+cleanup:
+   if (rc != 0)
+   bpf_object__close(obj);
+
+   return rc;
 }
 
 static int run_bpf_prog(char *prog, int cg_id)
 {
-   int map_fd;
-   int rc = 0;
+   struct hbm_queue_stats qstats = {0};
+   struct bpf_link *link = NULL;
+   char cg_dir[100];
int key = 0;
int cg1 = 0;
-   int type = BPF_CGROUP_INET_EGRESS;
-   char cg_dir[100];
-   struct

[PATCH bpf-next 3/9] samples: bpf: refactor test_cgrp2_sock2 program with libbpf

2020-11-17 Thread Daniel T. Lee
This commit refactors the existing cgroup program with libbpf bpf
loader. The original test_cgrp2_sock2 has keeped the bpf program
attached to the cgroup hierarchy even after the exit of user program.
To implement the same functionality with libbpf, this commit uses the
BPF_LINK_PINNING to pin the link attachment even after it is closed.

Since this uses LINK instead of ATTACH, detach of bpf program from
cgroup with 'test_cgrp2_sock' is not used anymore.

The code to mount the bpf was added to the .sh file in case the bpff
was not mounted on /sys/fs/bpf. Additionally, to fix the problem that
shell script cannot find the binary object from the current path,
relative path './' has been added in front of binary.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile|  2 +-
 samples/bpf/test_cgrp2_sock2.c  | 63 -
 samples/bpf/test_cgrp2_sock2.sh | 21 ---
 3 files changed, 64 insertions(+), 22 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 01449d767122..7a643595ac6c 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -82,7 +82,7 @@ test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
 test_cgrp2_sock-objs := test_cgrp2_sock.o
-test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
+test_cgrp2_sock2-objs := test_cgrp2_sock2.o
 xdp1-objs := xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := xdp1_user.o
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index a9277b118c33..518526c7ce16 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -20,9 +20,9 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "bpf_insn.h"
-#include "bpf_load.h"
 
 static int usage(const char *argv0)
 {
@@ -32,37 +32,66 @@ static int usage(const char *argv0)
 
 int main(int argc, char **argv)
 {
-   int cg_fd, ret, filter_id = 0;
+   int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0;
+   const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2";
+   struct bpf_link *link = NULL;
+   struct bpf_program *progs[2];
+   struct bpf_program *prog;
+   struct bpf_object *obj;
 
if (argc < 3)
return usage(argv[0]);
 
+   if (argc > 3)
+   filter_id = atoi(argv[3]);
+
cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
if (cg_fd < 0) {
printf("Failed to open cgroup path: '%s'\n", strerror(errno));
-   return EXIT_FAILURE;
+   return ret;
}
 
-   if (load_bpf_file(argv[2]))
-   return EXIT_FAILURE;
-
-   printf("Output from kernel verifier:\n%s\n---\n", bpf_log_buf);
+   obj = bpf_object__open_file(argv[2], NULL);
+   if (libbpf_get_error(obj)) {
+   printf("ERROR: opening BPF object file failed\n");
+   return ret;
+   }
 
-   if (argc > 3)
-   filter_id = atoi(argv[3]);
+   bpf_object__for_each_program(prog, obj) {
+   progs[prog_cnt] = prog;
+   prog_cnt++;
+   }
 
if (filter_id >= prog_cnt) {
printf("Invalid program id; program not found in file\n");
-   return EXIT_FAILURE;
+   goto cleanup;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   printf("ERROR: loading BPF object file failed\n");
+   goto cleanup;
}
 
-   ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
- BPF_CGROUP_INET_SOCK_CREATE, 0);
-   if (ret < 0) {
-   printf("Failed to attach prog to cgroup: '%s'\n",
-  strerror(errno));
-   return EXIT_FAILURE;
+   link = bpf_program__attach_cgroup(progs[filter_id], cg_fd);
+   if (libbpf_get_error(link)) {
+   printf("ERROR: bpf_program__attach failed\n");
+   link = NULL;
+   goto cleanup;
}
 
-   return EXIT_SUCCESS;
+   err = bpf_link__pin(link, link_pin_path);
+   if (err < 0) {
+   printf("err : %d\n", err);
+   goto cleanup;
+   }
+
+   ret = EXIT_SUCCESS;
+
+cleanup:
+   if (ret != EXIT_SUCCESS)
+   bpf_link__destroy(link);
+
+   bpf_object__close(obj);
+   return ret;
 }
diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh
index 0f396a86e0cb..6a3dbe642b2b 100755
--- a/samples/bpf/test_cgrp2_sock2.sh
+++ b/samples/bpf/test_cgrp2_sock2.sh
@@ -1,6 +1,9 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+BPFFS=/sys/fs/bpf
+LINK_PIN=$BPFFS/test_cgrp2_sock2
+
 function config_device {
ip netns add at_ns0

[PATCH bpf-next 8/9] samples: bpf: remove unused trace_helper and bpf_load from Makefile

2020-11-17 Thread Daniel T. Lee
This commit removes the unused trace_helper and bpf_load from
samples/bpf target objects from Makefile.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 16d9d68e1e01..a8b6fd943461 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -73,7 +73,7 @@ tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := test_probe_write_user_user.o
-trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
+trace_output-objs := trace_output_user.o
 lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
@@ -91,8 +91,8 @@ test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
   test_current_task_under_cgroup_user.o
 trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
 sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
-tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
-lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
+tc_l2_redirect-objs := tc_l2_redirect_user.o
+lwt_len_hist-objs := lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
 test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
@@ -108,7 +108,7 @@ xdpsock-objs := xdpsock_user.o
 xsk_fwd-objs := xsk_fwd.o
 xdp_fwd-objs := xdp_fwd_user.o
 task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
-xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o
 ibumad-objs := ibumad_user.o
 hbm-objs := hbm.o $(CGROUP_HELPERS) $(TRACE_HELPERS)
 
@@ -197,8 +197,6 @@ TPROGS_CFLAGS += --sysroot=$(SYSROOT)
 TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
 endif
 
-TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
-
 TPROGS_LDLIBS  += $(LIBBPF) -lelf -lz
 TPROGLDLIBS_tracex4+= -lrt
 TPROGLDLIBS_trace_output   += -lrt
-- 
2.25.1



[PATCH bpf-next v2 1/3] samples: bpf: Refactor xdp_monitor with libbpf

2020-10-10 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to change to the libbpf loader
instead of using the bpf_load.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

This commit refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object
 - split increment into seperate satement
 - refactor pointer array initialization

 samples/bpf/Makefile   |   2 +-
 samples/bpf/xdp_monitor_user.c | 159 +
 2 files changed, 121 insertions(+), 40 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4f1ed0e3cf9f..0cee2aa8970f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -99,7 +99,7 @@ per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
-xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
+xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
 cpustat-objs := cpustat_user.o
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index ef53b93db573..03d0a182913f 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -26,12 +26,37 @@ static const char *__doc_err_only__=
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 
+enum map_type {
+   REDIRECT_ERR_CNT,
+   EXCEPTION_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   DEVMAP_XMIT_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [EXCEPTION_CNT] = "exception_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
+};
+
+#define NUM_MAP 5
+#define NUM_TP 8
+
+static int tp_cnt;
+static int map_cnt;
 static int verbose = 1;
 static bool debug = false;
+struct bpf_map *map_data[NUM_MAP] = {};
+struct bpf_link *tp_links[NUM_TP] = {};
+struct bpf_object *obj;
 
 static const struct option long_options[] = {
{"help",no_argument,NULL, 'h' },
@@ -41,6 +66,16 @@ static const struct option long_options[] = {
{0, 0, NULL,  0 }
 };
 
+static void int_exit(int sig)
+{
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   bpf_object__close(obj);
+   exit(0);
+}
+
 /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
 #define EXIT_FAIL_MEM  5
 
@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec)
 * this can happen by someone running perf-record -e
 */
 
-   fd = map_data[0].fd; /* map0: redirect_err_cnt */
+   fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
for (i = 0; i < REDIR_RES_MAX; i++)
map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
 
-   fd = map_data[1].fd; /* map1: exception_cnt */
+   fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
for (i = 0; i < XDP_ACTION_MAX; i++) {
map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
}
 
-   fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
for (i = 0; i < MAX_CPUS; i++)
map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
 
-   fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
 
-   fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
+   fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
 
return true;
@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only)
 
/* TODO Need more advanced stats on error types */
if (verbose) {
-   printf(" - Stats map0: %s\n", map_data[0].name);
-   printf(" - Stats map1: %s\n", map_data[1].name);
+   printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
+   printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
printf("\n");
}
fflush(stdout);
@@ -618,44 +653,51 @

[PATCH bpf-next v2 3/3] samples: bpf: refactor XDP kern program maps with BTF-defined map

2020-10-10 Thread Daniel T. Lee
Most of the samples were converted to use the new BTF-defined MAP as
they moved to libbpf, but some of the samples were missing.

Instead of using the previous BPF MAP definition, this commit refactors
xdp_monitor and xdp_sample_pkts_kern MAP definition with the new
BTF-defined MAP format.

Also, this commit removes the max_entries attribute at PERF_EVENT_ARRAY
map type. The libbpf's bpf_object__create_map() will automatically
set max_entries to the maximum configured number of CPUs on the host.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY

 samples/bpf/xdp_monitor_kern.c | 60 +++---
 samples/bpf/xdp_sample_pkts_kern.c | 14 +++
 samples/bpf/xdp_sample_pkts_user.c |  1 -
 3 files changed, 36 insertions(+), 39 deletions(-)

diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 3d33cca2d48a..5c955b812c47 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -6,21 +6,21 @@
 #include 
 #include 
 
-struct bpf_map_def SEC("maps") redirect_err_cnt = {
-   .type = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = 2,
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */
-};
+} redirect_err_cnt SEC(".maps");
 
 #define XDP_UNKNOWNXDP_REDIRECT + 1
-struct bpf_map_def SEC("maps") exception_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries= XDP_UNKNOWN + 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, XDP_UNKNOWN + 1);
+} exception_cnt SEC(".maps");
 
 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
  * Code in:kernel/include/trace/events/xdp.h
@@ -129,19 +129,19 @@ struct datarec {
 };
 #define MAX_CPUS 64
 
-struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= MAX_CPUS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, MAX_CPUS);
+} cpumap_enqueue_cnt SEC(".maps");
 
-struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} cpumap_kthread_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
  * Code in: kernel/include/trace/events/xdp.h
@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx 
*ctx)
return 0;
 }
 
-struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} devmap_xmit_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
  * Code in: kernel/include/trace/events/xdp.h
diff --git a/samples/bpf/xdp_sample_pkts_kern.c 
b/samples/bpf/xdp_sample_pkts_kern.c
index 33377289e2a8..9cf76b340dd7 100644
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -5,14 +5,12 @@
 #include 
 
 #define SAMPLE_SIZE 64ul
-#define MAX_CPUS 128
-
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_CPUS,
-};
+
+struct {
+   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+   __uint(key_size, sizeof(int));
+   __uint(value_size, sizeof(u32));
+} my_map SEC(".maps");
 
 SEC("xdp_sample")
 int xdp_sample_prog(struct xdp_md *ctx)
diff --git a/samples/bpf/xdp_sample_pkts_user.c 
b/samples/bpf/xdp_sample_pkts_user.c
index 991ef6f0880b..4b2a300c750c 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -18,7 +18,6 @@
 
 #include "perf-sys.h"
 
-#define MAX_CPUS 128
 static int if_idx;
 static char *if_name;
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-- 
2.25.1



[PATCH bpf-next v2 0/3] samples: bpf: Refactor XDP programs with libbpf

2020-10-10 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to convert the previous bpf_load
loader with the libbpf loader.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

And due to addition of generic bpf_program__attach() to libbpf, it is
now possible to attach BPF programs with __attach() instead of
explicitly calling __attach_().

This patchset refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
is replaced with the generic __attach() method in xdp_redirect_cpu.
Moreover, maps in kern program have been converted to BTF-defined map.

---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object in xdp_monitor
 - program section match with bpf_program__is_ instead of strncmp
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY
 - split increment into seperate satement
 - refactor pointer array initialization
 - error code cleanup

Daniel T. Lee (3):
  samples: bpf: Refactor xdp_monitor with libbpf
  samples: bpf: Replace attach_tracepoint() to attach() in
xdp_redirect_cpu
  samples: bpf: refactor XDP kern program maps with BTF-defined map

 samples/bpf/Makefile|   4 +-
 samples/bpf/xdp_monitor_kern.c  |  60 +--
 samples/bpf/xdp_monitor_user.c  | 159 +---
 samples/bpf/xdp_redirect_cpu_user.c | 153 +-
 samples/bpf/xdp_sample_pkts_kern.c  |  14 ++-
 samples/bpf/xdp_sample_pkts_user.c  |   1 -
 6 files changed, 230 insertions(+), 161 deletions(-)

-- 
2.25.1



[PATCH bpf-next v2 1/3] samples: bpf: Refactor xdp_monitor with libbpf

2020-10-10 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to change to the libbpf loader
instead of using the bpf_load.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

This commit refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object
 - split increment into seperate satement
 - refactor pointer array initialization

 samples/bpf/Makefile   |   2 +-
 samples/bpf/xdp_monitor_user.c | 159 +
 2 files changed, 121 insertions(+), 40 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4f1ed0e3cf9f..0cee2aa8970f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -99,7 +99,7 @@ per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
-xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
+xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
 cpustat-objs := cpustat_user.o
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index ef53b93db573..03d0a182913f 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -26,12 +26,37 @@ static const char *__doc_err_only__=
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 
+enum map_type {
+   REDIRECT_ERR_CNT,
+   EXCEPTION_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   DEVMAP_XMIT_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [EXCEPTION_CNT] = "exception_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
+};
+
+#define NUM_MAP 5
+#define NUM_TP 8
+
+static int tp_cnt;
+static int map_cnt;
 static int verbose = 1;
 static bool debug = false;
+struct bpf_map *map_data[NUM_MAP] = {};
+struct bpf_link *tp_links[NUM_TP] = {};
+struct bpf_object *obj;
 
 static const struct option long_options[] = {
{"help",no_argument,NULL, 'h' },
@@ -41,6 +66,16 @@ static const struct option long_options[] = {
{0, 0, NULL,  0 }
 };
 
+static void int_exit(int sig)
+{
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   bpf_object__close(obj);
+   exit(0);
+}
+
 /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
 #define EXIT_FAIL_MEM  5
 
@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec)
 * this can happen by someone running perf-record -e
 */
 
-   fd = map_data[0].fd; /* map0: redirect_err_cnt */
+   fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
for (i = 0; i < REDIR_RES_MAX; i++)
map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
 
-   fd = map_data[1].fd; /* map1: exception_cnt */
+   fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
for (i = 0; i < XDP_ACTION_MAX; i++) {
map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
}
 
-   fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
for (i = 0; i < MAX_CPUS; i++)
map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
 
-   fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
 
-   fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
+   fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
 
return true;
@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only)
 
/* TODO Need more advanced stats on error types */
if (verbose) {
-   printf(" - Stats map0: %s\n", map_data[0].name);
-   printf(" - Stats map1: %s\n", map_data[1].name);
+   printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
+   printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
printf("\n");
}
fflush(stdout);
@@ -618,44 +653,51 @

Re: [PATCH bpf-next 0/3] samples: bpf: Refactor XDP programs with libbpf

2020-10-10 Thread Daniel T. Lee
On Sat, Oct 10, 2020 at 3:30 AM Andrii Nakryiko
 wrote:
>
> On Fri, Oct 9, 2020 at 9:04 AM Daniel T. Lee  wrote:
> >
> > To avoid confusion caused by the increasing fragmentation of the BPF
> > Loader program, this commit would like to convert the previous bpf_load
> > loader with the libbpf loader.
> >
> > Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
> > program is much easier. bpf_program__attach_tracepoint manages the
> > enable of tracepoint event and attach of BPF programs to it with a
> > single interface bpf_link, so there is no need to manage event_fd and
> > prog_fd separately.
> >
> > And due to addition of generic bpf_program__attach() to libbpf, it is
> > now possible to attach BPF programs with __attach() instead of
> > explicitly calling __attach_().
> >
> > This patchset refactors xdp_monitor with using this libbpf API, and the
> > bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
> > is replaced with the generic __attach() method in xdp_redirect_cpu.
> > Moreover, maps in kern program have been converted to BTF-defined map.
> >
> > Daniel T. Lee (3):
> >   samples: bpf: Refactor xdp_monitor with libbpf
> >   samples: bpf: Replace attach_tracepoint() to attach() in
> > xdp_redirect_cpu
> >   samples: bpf: refactor XDP kern program maps with BTF-defined map
> >
> >  samples/bpf/Makefile|   4 +-
> >  samples/bpf/xdp_monitor_kern.c  |  60 ++--
> >  samples/bpf/xdp_monitor_user.c  | 144 +---
> >  samples/bpf/xdp_redirect_cpu_user.c | 138 +-
> >  samples/bpf/xdp_sample_pkts_kern.c  |  14 ++-
> >  samples/bpf/xdp_sample_pkts_user.c  |   1 -
> >  6 files changed, 211 insertions(+), 150 deletions(-)
> >
> > --
> > 2.25.1
> >
>
> Thanks for this clean up, Daniel! It's great! I left a few nits here
> and there in the appropriate patches.
>
> There still seem to be a bunch of users of bpf_load.c, which would be
> nice to get rid of completely. But before you go do that, consider
> integrating BPF skeleton into samples/bpf Makefile. That way instead
> of all those look ups of maps/programs by name, you'd be writing a
> straightforward skel->maps.my_map and similar short and non-failing
> code. This should make the overall time spent on conversion much
> smaller (and more pleasant, IMO).
>
> You've dealt with a lot of samples/bpf reworking, so it should be too
> hard for you to figure out the best way to do this, but check
> selftests/bpf's Makefile, if you need some ideas. Or just ask for
> help. Thanks!

Thanks for the great feedback!

Thank you for letting me know about the BPF features that I can apply.
Currently, I'm not familiar with the BPF skeleton yet, but I'll take a good
look at the BPF skeleton to apply it in a more advanced form.

Thank you for your time and effort for the review.

-- 
Best,
Daniel T. Lee


[PATCH bpf-next v2 3/3] samples: bpf: refactor XDP kern program maps with BTF-defined map

2020-10-10 Thread Daniel T. Lee
Most of the samples were converted to use the new BTF-defined MAP as
they moved to libbpf, but some of the samples were missing.

Instead of using the previous BPF MAP definition, this commit refactors
xdp_monitor and xdp_sample_pkts_kern MAP definition with the new
BTF-defined MAP format.

Also, this commit removes the max_entries attribute at PERF_EVENT_ARRAY
map type. The libbpf's bpf_object__create_map() will automatically
set max_entries to the maximum configured number of CPUs on the host.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY

 samples/bpf/xdp_monitor_kern.c | 60 +++---
 samples/bpf/xdp_sample_pkts_kern.c | 14 +++
 samples/bpf/xdp_sample_pkts_user.c |  1 -
 3 files changed, 36 insertions(+), 39 deletions(-)

diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 3d33cca2d48a..5c955b812c47 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -6,21 +6,21 @@
 #include 
 #include 
 
-struct bpf_map_def SEC("maps") redirect_err_cnt = {
-   .type = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = 2,
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */
-};
+} redirect_err_cnt SEC(".maps");
 
 #define XDP_UNKNOWNXDP_REDIRECT + 1
-struct bpf_map_def SEC("maps") exception_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries= XDP_UNKNOWN + 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, XDP_UNKNOWN + 1);
+} exception_cnt SEC(".maps");
 
 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
  * Code in:kernel/include/trace/events/xdp.h
@@ -129,19 +129,19 @@ struct datarec {
 };
 #define MAX_CPUS 64
 
-struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= MAX_CPUS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, MAX_CPUS);
+} cpumap_enqueue_cnt SEC(".maps");
 
-struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} cpumap_kthread_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
  * Code in: kernel/include/trace/events/xdp.h
@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx 
*ctx)
return 0;
 }
 
-struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} devmap_xmit_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
  * Code in: kernel/include/trace/events/xdp.h
diff --git a/samples/bpf/xdp_sample_pkts_kern.c 
b/samples/bpf/xdp_sample_pkts_kern.c
index 33377289e2a8..9cf76b340dd7 100644
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -5,14 +5,12 @@
 #include 
 
 #define SAMPLE_SIZE 64ul
-#define MAX_CPUS 128
-
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_CPUS,
-};
+
+struct {
+   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+   __uint(key_size, sizeof(int));
+   __uint(value_size, sizeof(u32));
+} my_map SEC(".maps");
 
 SEC("xdp_sample")
 int xdp_sample_prog(struct xdp_md *ctx)
diff --git a/samples/bpf/xdp_sample_pkts_user.c 
b/samples/bpf/xdp_sample_pkts_user.c
index 991ef6f0880b..4b2a300c750c 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -18,7 +18,6 @@
 
 #include "perf-sys.h"
 
-#define MAX_CPUS 128
 static int if_idx;
 static char *if_name;
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-- 
2.25.1



Re: [PATCH bpf-next 2/3] samples: bpf: Replace attach_tracepoint() to attach() in xdp_redirect_cpu

2020-10-10 Thread Daniel T. Lee
On Sat, Oct 10, 2020 at 3:23 AM Andrii Nakryiko
 wrote:
>
> On Fri, Oct 9, 2020 at 9:04 AM Daniel T. Lee  wrote:
> >
> > From commit d7a18ea7e8b6 ("libbpf: Add generic bpf_program__attach()"),
> > for some BPF programs, it is now possible to attach BPF programs
> > with __attach() instead of explicitly calling __attach_().
> >
> > This commit refactors the __attach_tracepoint() with libbpf's generic
> > __attach() method. In addition, this refactors the logic of setting
> > the map FD to simplify the code. Also, the missing removal of
> > bpf_load.o in Makefile has been fixed.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile|   2 +-
> >  samples/bpf/xdp_redirect_cpu_user.c | 138 +---
> >  2 files changed, 67 insertions(+), 73 deletions(-)
> >
>
> [...]
>
> >  #define NUM_TP 5
> > +#define NUM_MAP 9
> >  struct bpf_link *tp_links[NUM_TP] = { 0 };
>
> = {}
>
> > +static int map_fds[NUM_MAP];
> >  static int tp_cnt = 0;
> >
> >  /* Exit return codes */
>
> [...]
>
> > -static struct bpf_link * attach_tp(struct bpf_object *obj,
> > -  const char *tp_category,
> > -  const char* tp_name)
> > +static int init_tracepoints(struct bpf_object *obj)
> >  {
> > +   char *tp_section = "tracepoint/";
> > struct bpf_program *prog;
> > -   struct bpf_link *link;
> > -   char sec_name[PATH_MAX];
> > -   int len;
> > +   const char *section;
> >
> > -   len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s",
> > -  tp_category, tp_name);
> > -   if (len < 0)
> > -   exit(EXIT_FAIL);
> > +   bpf_object__for_each_program(prog, obj) {
> > +   section = bpf_program__section_name(prog);
> > +   if (strncmp(section, tp_section, strlen(tp_section)) != 0)
> > +   continue;
>
> that's a convoluted and error-prone way (you can also use "tp/bla/bla"
> for tracepoint programs, for example). Use
> bpf_program__is_tracepoint() check.
>

Thanks for the review!
I think that's a much better way. I will send the next patch with applying
that method.

> >
> > -   prog = bpf_object__find_program_by_title(obj, sec_name);
> > -   if (!prog) {
> > -   fprintf(stderr, "ERR: finding progsec: %s\n", sec_name);
> > -   exit(EXIT_FAIL_BPF);
> > +   tp_links[tp_cnt] = bpf_program__attach(prog);
> > +   if (libbpf_get_error(tp_links[tp_cnt])) {
> > +   tp_links[tp_cnt] = NULL;
> > +   return -EINVAL;
> > +   }
> > +   tp_cnt++;
> > }
> >
>
> [...]


[PATCH bpf-next v2 2/3] samples: bpf: Replace attach_tracepoint() to attach() in xdp_redirect_cpu

2020-10-10 Thread Daniel T. Lee
>From commit d7a18ea7e8b6 ("libbpf: Add generic bpf_program__attach()"),
for some BPF programs, it is now possible to attach BPF programs
with __attach() instead of explicitly calling __attach_().

This commit refactors the __attach_tracepoint() with libbpf's generic
__attach() method. In addition, this refactors the logic of setting
the map FD to simplify the code. Also, the missing removal of
bpf_load.o in Makefile has been fixed.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - program section match with bpf_program__is_ instead of strncmp
 - refactor pointer array initialization
 - error code cleanup

 samples/bpf/Makefile|   2 +-
 samples/bpf/xdp_redirect_cpu_user.c | 153 +---
 2 files changed, 73 insertions(+), 82 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 0cee2aa8970f..ac9175705b2f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -98,7 +98,7 @@ test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
-xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
+xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
 xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 3dd366e9474d..6fb8dbde62c5 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -37,18 +37,35 @@ static __u32 prog_id;
 
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
 static int n_cpus;
-static int cpu_map_fd;
-static int rx_cnt_map_fd;
-static int redirect_err_cnt_map_fd;
-static int cpumap_enqueue_cnt_map_fd;
-static int cpumap_kthread_cnt_map_fd;
-static int cpus_available_map_fd;
-static int cpus_count_map_fd;
-static int cpus_iterator_map_fd;
-static int exception_cnt_map_fd;
+
+enum map_type {
+   CPU_MAP,
+   RX_CNT,
+   REDIRECT_ERR_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   CPUS_AVAILABLE,
+   CPUS_COUNT,
+   CPUS_ITERATOR,
+   EXCEPTION_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [CPU_MAP] = "cpu_map",
+   [RX_CNT] = "rx_cnt",
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [CPUS_AVAILABLE] = "cpus_available",
+   [CPUS_COUNT] = "cpus_count",
+   [CPUS_ITERATOR] = "cpus_iterator",
+   [EXCEPTION_CNT] = "exception_cnt",
+};
 
 #define NUM_TP 5
-struct bpf_link *tp_links[NUM_TP] = { 0 };
+#define NUM_MAP 9
+struct bpf_link *tp_links[NUM_TP] = {};
+static int map_fds[NUM_MAP];
 static int tp_cnt = 0;
 
 /* Exit return codes */
@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
 {
int fd, i;
 
-   fd = rx_cnt_map_fd;
+   fd = map_fds[RX_CNT];
map_collect_percpu(fd, 0, &rec->rx_cnt);
 
-   fd = redirect_err_cnt_map_fd;
+   fd = map_fds[REDIRECT_ERR_CNT];
map_collect_percpu(fd, 1, &rec->redir_err);
 
-   fd = cpumap_enqueue_cnt_map_fd;
+   fd = map_fds[CPUMAP_ENQUEUE_CNT];
for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]);
 
-   fd = cpumap_kthread_cnt_map_fd;
+   fd = map_fds[CPUMAP_KTHREAD_CNT];
map_collect_percpu(fd, 0, &rec->kthread);
 
-   fd = exception_cnt_map_fd;
+   fd = map_fds[EXCEPTION_CNT];
map_collect_percpu(fd, 0, &rec->exception);
 }
 
@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
 * the kernel for the cpu.
 */
-   ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
+   ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Inform bpf_prog's that a new CPU is available to select
 * from via some control maps.
 */
-   ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
+   ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}
 
/* When not replacing/updating existing entry, bump the count */
-   ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);

[PATCH bpf-next v2 0/3] samples: bpf: Refactor XDP programs with libbpf

2020-10-10 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to convert the previous bpf_load
loader with the libbpf loader.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

And due to addition of generic bpf_program__attach() to libbpf, it is
now possible to attach BPF programs with __attach() instead of
explicitly calling __attach_().

This patchset refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
is replaced with the generic __attach() method in xdp_redirect_cpu.
Moreover, maps in kern program have been converted to BTF-defined map.

---
Changes in v2:
 - added cleanup logic for bpf_link and bpf_object in xdp_monitor
 - program section match with bpf_program__is_ instead of strncmp
 - revert BTF key/val type to default of BPF_MAP_TYPE_PERF_EVENT_ARRAY
 - split increment into seperate satement
 - refactor pointer array initialization
 - error code cleanup

Daniel T. Lee (3):
  samples: bpf: Refactor xdp_monitor with libbpf
  samples: bpf: Replace attach_tracepoint() to attach() in
xdp_redirect_cpu
  samples: bpf: refactor XDP kern program maps with BTF-defined map

 samples/bpf/Makefile|   4 +-
 samples/bpf/xdp_monitor_kern.c  |  60 +--
 samples/bpf/xdp_monitor_user.c  | 159 +---
 samples/bpf/xdp_redirect_cpu_user.c | 153 +-
 samples/bpf/xdp_sample_pkts_kern.c  |  14 ++-
 samples/bpf/xdp_sample_pkts_user.c  |   1 -
 6 files changed, 230 insertions(+), 161 deletions(-)

-- 
2.25.1



[PATCH bpf-next v2 2/3] samples: bpf: Replace attach_tracepoint() to attach() in xdp_redirect_cpu

2020-10-10 Thread Daniel T. Lee
>From commit d7a18ea7e8b6 ("libbpf: Add generic bpf_program__attach()"),
for some BPF programs, it is now possible to attach BPF programs
with __attach() instead of explicitly calling __attach_().

This commit refactors the __attach_tracepoint() with libbpf's generic
__attach() method. In addition, this refactors the logic of setting
the map FD to simplify the code. Also, the missing removal of
bpf_load.o in Makefile has been fixed.

Signed-off-by: Daniel T. Lee 

---
Changes in v2:
 - program section match with bpf_program__is_ instead of strncmp
 - refactor pointer array initialization
 - error code cleanup

 samples/bpf/Makefile|   2 +-
 samples/bpf/xdp_redirect_cpu_user.c | 153 +---
 2 files changed, 73 insertions(+), 82 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 0cee2aa8970f..ac9175705b2f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -98,7 +98,7 @@ test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
-xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
+xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
 xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 3dd366e9474d..6fb8dbde62c5 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -37,18 +37,35 @@ static __u32 prog_id;
 
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
 static int n_cpus;
-static int cpu_map_fd;
-static int rx_cnt_map_fd;
-static int redirect_err_cnt_map_fd;
-static int cpumap_enqueue_cnt_map_fd;
-static int cpumap_kthread_cnt_map_fd;
-static int cpus_available_map_fd;
-static int cpus_count_map_fd;
-static int cpus_iterator_map_fd;
-static int exception_cnt_map_fd;
+
+enum map_type {
+   CPU_MAP,
+   RX_CNT,
+   REDIRECT_ERR_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   CPUS_AVAILABLE,
+   CPUS_COUNT,
+   CPUS_ITERATOR,
+   EXCEPTION_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [CPU_MAP] = "cpu_map",
+   [RX_CNT] = "rx_cnt",
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [CPUS_AVAILABLE] = "cpus_available",
+   [CPUS_COUNT] = "cpus_count",
+   [CPUS_ITERATOR] = "cpus_iterator",
+   [EXCEPTION_CNT] = "exception_cnt",
+};
 
 #define NUM_TP 5
-struct bpf_link *tp_links[NUM_TP] = { 0 };
+#define NUM_MAP 9
+struct bpf_link *tp_links[NUM_TP] = {};
+static int map_fds[NUM_MAP];
 static int tp_cnt = 0;
 
 /* Exit return codes */
@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
 {
int fd, i;
 
-   fd = rx_cnt_map_fd;
+   fd = map_fds[RX_CNT];
map_collect_percpu(fd, 0, &rec->rx_cnt);
 
-   fd = redirect_err_cnt_map_fd;
+   fd = map_fds[REDIRECT_ERR_CNT];
map_collect_percpu(fd, 1, &rec->redir_err);
 
-   fd = cpumap_enqueue_cnt_map_fd;
+   fd = map_fds[CPUMAP_ENQUEUE_CNT];
for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]);
 
-   fd = cpumap_kthread_cnt_map_fd;
+   fd = map_fds[CPUMAP_KTHREAD_CNT];
map_collect_percpu(fd, 0, &rec->kthread);
 
-   fd = exception_cnt_map_fd;
+   fd = map_fds[EXCEPTION_CNT];
map_collect_percpu(fd, 0, &rec->exception);
 }
 
@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
 * the kernel for the cpu.
 */
-   ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
+   ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Inform bpf_prog's that a new CPU is available to select
 * from via some control maps.
 */
-   ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
+   ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}
 
/* When not replacing/updating existing entry, bump the count */
-   ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);

Re: [PATCH bpf-next 3/3] samples: bpf: refactor XDP kern program maps with BTF-defined map

2020-10-10 Thread Daniel T. Lee
On Sat, Oct 10, 2020 at 3:25 AM Andrii Nakryiko
 wrote:
>
> On Fri, Oct 9, 2020 at 9:04 AM Daniel T. Lee  wrote:
> >
> > Most of the samples were converted to use the new BTF-defined MAP as
> > they moved to libbpf, but some of the samples were missing.
> >
> > Instead of using the previous BPF MAP definition, this commit refactors
> > xdp_monitor and xdp_sample_pkts_kern MAP definition with the new
> > BTF-defined MAP format.
> >
> > Also, this commit removes the max_entries attribute at PERF_EVENT_ARRAY
> > map type. The libbpf's bpf_object__create_map() will automatically
> > set max_entries to the maximum configured number of CPUs on the host.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/xdp_monitor_kern.c | 60 +++---
> >  samples/bpf/xdp_sample_pkts_kern.c | 14 +++
> >  samples/bpf/xdp_sample_pkts_user.c |  1 -
> >  3 files changed, 36 insertions(+), 39 deletions(-)
> >
>
> [...]
>
> > --- a/samples/bpf/xdp_sample_pkts_kern.c
> > +++ b/samples/bpf/xdp_sample_pkts_kern.c
> > @@ -5,14 +5,12 @@
> >  #include 
> >
> >  #define SAMPLE_SIZE 64ul
> > -#define MAX_CPUS 128
> > -
> > -struct bpf_map_def SEC("maps") my_map = {
> > -   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> > -   .key_size = sizeof(int),
> > -   .value_size = sizeof(u32),
> > -   .max_entries = MAX_CPUS,
> > -};
> > +
> > +struct {
> > +   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
> > +   __type(key, int);
> > +   __type(value, u32);
>
>
> this actually will generate unnecessary libbpf warnings, because
> PERF_EVENT_ARRAY doesn't support BTF types for key/value. So use
> __uint(key_size, sizeof(int)) and __uint(value_size, sizeof(u32))
> instead.
>

Thanks for the great review!
I'll fix it right away and send the next version of patch.


> > +} my_map SEC(".maps");
> >
> >  SEC("xdp_sample")
> >  int xdp_sample_prog(struct xdp_md *ctx)
> > diff --git a/samples/bpf/xdp_sample_pkts_user.c 
> > b/samples/bpf/xdp_sample_pkts_user.c
> > index 991ef6f0880b..4b2a300c750c 100644
> > --- a/samples/bpf/xdp_sample_pkts_user.c
> > +++ b/samples/bpf/xdp_sample_pkts_user.c
> > @@ -18,7 +18,6 @@
> >
> >  #include "perf-sys.h"
> >
> > -#define MAX_CPUS 128
> >  static int if_idx;
> >  static char *if_name;
> >  static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
> > --
> > 2.25.1
> >


Re: [PATCH bpf-next 1/3] samples: bpf: Refactor xdp_monitor with libbpf

2020-10-10 Thread Daniel T. Lee
On Sat, Oct 10, 2020 at 3:17 AM Andrii Nakryiko
 wrote:
>
> On Fri, Oct 9, 2020 at 9:04 AM Daniel T. Lee  wrote:
> >
> > To avoid confusion caused by the increasing fragmentation of the BPF
> > Loader program, this commit would like to change to the libbpf loader
> > instead of using the bpf_load.
> >
> > Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
> > program is much easier. bpf_program__attach_tracepoint manages the
> > enable of tracepoint event and attach of BPF programs to it with a
> > single interface bpf_link, so there is no need to manage event_fd and
> > prog_fd separately.
> >
> > This commit refactors xdp_monitor with using this libbpf API, and the
> > bpf_load is removed and migrated to libbpf.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile   |   2 +-
> >  samples/bpf/xdp_monitor_user.c | 144 -
> >  2 files changed, 108 insertions(+), 38 deletions(-)
> >
>
> [...]
>
> > +static int tp_cnt;
> > +static int map_cnt;
> >  static int verbose = 1;
> >  static bool debug = false;
> > +struct bpf_map *map_data[NUM_MAP] = { 0 };
> > +struct bpf_link *tp_links[NUM_TP] = { 0 };
>
> this syntax means "initialize *only the first element* to 0
> (explicitly) and the rest of elements to default (which is also 0)".
> So it's just misleading, use ` = {}`.
>

Thanks for the great review!

Come to think of it, it could be confusing as you mentioned. I will
remove the unnecessary initializer in the next patch and resend it.

> >
> >  static const struct option long_options[] = {
> > {"help",no_argument,NULL, 'h' },
> > @@ -41,6 +65,15 @@ static const struct option long_options[] = {
> > {0, 0, NULL,  0 }
> >  };
> >
> > +static void int_exit(int sig)
> > +{
> > +   /* Detach tracepoints */
> > +   while (tp_cnt)
> > +   bpf_link__destroy(tp_links[--tp_cnt]);
> > +
>
> see below about proper cleanup
>
> > +   exit(0);
> > +}
> > +
> >  /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) 
> > */
> >  #define EXIT_FAIL_MEM  5
> >
>
> [...]
>
> >
> > -static void print_bpf_prog_info(void)
> > +static void print_bpf_prog_info(struct bpf_object *obj)
> >  {
> > -   int i;
> > +   struct bpf_program *prog;
> > +   struct bpf_map *map;
> > +   int i = 0;
> >
> > /* Prog info */
> > -   printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
> > -   for (i = 0; i < prog_cnt; i++) {
> > -   printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
> > +   printf("Loaded BPF prog have %d bpf program(s)\n", tp_cnt);
> > +   bpf_object__for_each_program(prog, obj) {
> > +   printf(" - prog_fd[%d] = fd(%d)\n", i++, 
> > bpf_program__fd(prog));
> > }
> >
> > +   i = 0;
> > /* Maps info */
> > -   printf("Loaded BPF prog have %d map(s)\n", map_data_count);
> > -   for (i = 0; i < map_data_count; i++) {
> > -   char *name = map_data[i].name;
> > -   int fd = map_data[i].fd;
> > +   printf("Loaded BPF prog have %d map(s)\n", map_cnt);
> > +   bpf_object__for_each_map(map, obj) {
> > +   const char *name = bpf_map__name(map);
> > +   int fd   = bpf_map__fd(map);
> >
> > -   printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
> > +   printf(" - map_data[%d] = fd(%d) name:%s\n", i++, fd, name);
>
> please move out increment into a separate statement, no need to
> confuse readers unnecessarily
>

I will fix it at the following patch.

> > }
> >
> > /* Event info */
> > -   printf("Searching for (max:%d) event file descriptor(s)\n", 
> > prog_cnt);
> > -   for (i = 0; i < prog_cnt; i++) {
> > -   if (event_fd[i] != -1)
> > -   printf(" - event_fd[%d] = fd(%d)\n", i, 
> > event_fd[i]);
> > +   printf("Searching for (max:%d) event file descriptor(s)\n", tp_cnt);
> > +   for (i = 0; i < tp_cnt; i++) {
> > +   int fd = bpf_link__fd(tp_links[i]);
> > +
> > +   if (fd != -1)
> > +   pri

[PATCH bpf-next 3/3] samples: bpf: refactor XDP kern program maps with BTF-defined map

2020-10-09 Thread Daniel T. Lee
Most of the samples were converted to use the new BTF-defined MAP as
they moved to libbpf, but some of the samples were missing.

Instead of using the previous BPF MAP definition, this commit refactors
xdp_monitor and xdp_sample_pkts_kern MAP definition with the new
BTF-defined MAP format.

Also, this commit removes the max_entries attribute at PERF_EVENT_ARRAY
map type. The libbpf's bpf_object__create_map() will automatically
set max_entries to the maximum configured number of CPUs on the host.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/xdp_monitor_kern.c | 60 +++---
 samples/bpf/xdp_sample_pkts_kern.c | 14 +++
 samples/bpf/xdp_sample_pkts_user.c |  1 -
 3 files changed, 36 insertions(+), 39 deletions(-)

diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 3d33cca2d48a..5c955b812c47 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -6,21 +6,21 @@
 #include 
 #include 
 
-struct bpf_map_def SEC("maps") redirect_err_cnt = {
-   .type = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = 2,
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */
-};
+} redirect_err_cnt SEC(".maps");
 
 #define XDP_UNKNOWNXDP_REDIRECT + 1
-struct bpf_map_def SEC("maps") exception_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries= XDP_UNKNOWN + 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, XDP_UNKNOWN + 1);
+} exception_cnt SEC(".maps");
 
 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
  * Code in:kernel/include/trace/events/xdp.h
@@ -129,19 +129,19 @@ struct datarec {
 };
 #define MAX_CPUS 64
 
-struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= MAX_CPUS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, MAX_CPUS);
+} cpumap_enqueue_cnt SEC(".maps");
 
-struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} cpumap_kthread_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
  * Code in: kernel/include/trace/events/xdp.h
@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx 
*ctx)
return 0;
 }
 
-struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
-   .type   = BPF_MAP_TYPE_PERCPU_ARRAY,
-   .key_size   = sizeof(u32),
-   .value_size = sizeof(struct datarec),
-   .max_entries= 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+   __type(key, u32);
+   __type(value, struct datarec);
+   __uint(max_entries, 1);
+} devmap_xmit_cnt SEC(".maps");
 
 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
  * Code in: kernel/include/trace/events/xdp.h
diff --git a/samples/bpf/xdp_sample_pkts_kern.c 
b/samples/bpf/xdp_sample_pkts_kern.c
index 33377289e2a8..2fc3ecc9d9aa 100644
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -5,14 +5,12 @@
 #include 
 
 #define SAMPLE_SIZE 64ul
-#define MAX_CPUS 128
-
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_CPUS,
-};
+
+struct {
+   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+   __type(key, int);
+   __type(value, u32);
+} my_map SEC(".maps");
 
 SEC("xdp_sample")
 int xdp_sample_prog(struct xdp_md *ctx)
diff --git a/samples/bpf/xdp_sample_pkts_user.c 
b/samples/bpf/xdp_sample_pkts_user.c
index 991ef6f0880b..4b2a300c750c 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -18,7 +18,6 @@
 
 #include "perf-sys.h"
 
-#define MAX_CPUS 128
 static int if_idx;
 static char *if_name;
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-- 
2.25.1



[PATCH bpf-next 0/3] samples: bpf: Refactor XDP programs with libbpf

2020-10-09 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to convert the previous bpf_load
loader with the libbpf loader.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

And due to addition of generic bpf_program__attach() to libbpf, it is
now possible to attach BPF programs with __attach() instead of
explicitly calling __attach_().

This patchset refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf. Also, attach_tracepoint()
is replaced with the generic __attach() method in xdp_redirect_cpu.
Moreover, maps in kern program have been converted to BTF-defined map.

Daniel T. Lee (3):
  samples: bpf: Refactor xdp_monitor with libbpf
  samples: bpf: Replace attach_tracepoint() to attach() in
xdp_redirect_cpu
  samples: bpf: refactor XDP kern program maps with BTF-defined map

 samples/bpf/Makefile|   4 +-
 samples/bpf/xdp_monitor_kern.c  |  60 ++--
 samples/bpf/xdp_monitor_user.c  | 144 +---
 samples/bpf/xdp_redirect_cpu_user.c | 138 +-
 samples/bpf/xdp_sample_pkts_kern.c  |  14 ++-
 samples/bpf/xdp_sample_pkts_user.c  |   1 -
 6 files changed, 211 insertions(+), 150 deletions(-)

-- 
2.25.1



[PATCH bpf-next 1/3] samples: bpf: Refactor xdp_monitor with libbpf

2020-10-09 Thread Daniel T. Lee
To avoid confusion caused by the increasing fragmentation of the BPF
Loader program, this commit would like to change to the libbpf loader
instead of using the bpf_load.

Thanks to libbpf's bpf_link interface, managing the tracepoint BPF
program is much easier. bpf_program__attach_tracepoint manages the
enable of tracepoint event and attach of BPF programs to it with a
single interface bpf_link, so there is no need to manage event_fd and
prog_fd separately.

This commit refactors xdp_monitor with using this libbpf API, and the
bpf_load is removed and migrated to libbpf.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   |   2 +-
 samples/bpf/xdp_monitor_user.c | 144 -
 2 files changed, 108 insertions(+), 38 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4f1ed0e3cf9f..0cee2aa8970f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -99,7 +99,7 @@ per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
-xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
+xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
 cpustat-objs := cpustat_user.o
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index ef53b93db573..c627c53d6ada 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -26,12 +26,36 @@ static const char *__doc_err_only__=
 #include 
 #include 
 
+#include 
 #include 
-#include "bpf_load.h"
+#include 
 #include "bpf_util.h"
 
+enum map_type {
+   REDIRECT_ERR_CNT,
+   EXCEPTION_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   DEVMAP_XMIT_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [EXCEPTION_CNT] = "exception_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
+};
+
+#define NUM_MAP 5
+#define NUM_TP 8
+
+static int tp_cnt;
+static int map_cnt;
 static int verbose = 1;
 static bool debug = false;
+struct bpf_map *map_data[NUM_MAP] = { 0 };
+struct bpf_link *tp_links[NUM_TP] = { 0 };
 
 static const struct option long_options[] = {
{"help",no_argument,NULL, 'h' },
@@ -41,6 +65,15 @@ static const struct option long_options[] = {
{0, 0, NULL,  0 }
 };
 
+static void int_exit(int sig)
+{
+   /* Detach tracepoints */
+   while (tp_cnt)
+   bpf_link__destroy(tp_links[--tp_cnt]);
+
+   exit(0);
+}
+
 /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
 #define EXIT_FAIL_MEM  5
 
@@ -483,23 +516,23 @@ static bool stats_collect(struct stats_record *rec)
 * this can happen by someone running perf-record -e
 */
 
-   fd = map_data[0].fd; /* map0: redirect_err_cnt */
+   fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
for (i = 0; i < REDIR_RES_MAX; i++)
map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
 
-   fd = map_data[1].fd; /* map1: exception_cnt */
+   fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
for (i = 0; i < XDP_ACTION_MAX; i++) {
map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
}
 
-   fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
for (i = 0; i < MAX_CPUS; i++)
map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
 
-   fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+   fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
 
-   fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
+   fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
 
return true;
@@ -598,8 +631,8 @@ static void stats_poll(int interval, bool err_only)
 
/* TODO Need more advanced stats on error types */
if (verbose) {
-   printf(" - Stats map0: %s\n", map_data[0].name);
-   printf(" - Stats map1: %s\n", map_data[1].name);
+   printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
+   printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
printf("\n");
}
fflush(stdout);
@@ -616,46 +649,52 @@ static void stats_poll(int interval, bool err_only)
free_stats_record(prev);
 }
 
-static void print_bpf_prog_info(void)
+static void print_bpf_prog_info(struct bpf_object *obj)
 {
-

[PATCH bpf-next 2/3] samples: bpf: Replace attach_tracepoint() to attach() in xdp_redirect_cpu

2020-10-09 Thread Daniel T. Lee
>From commit d7a18ea7e8b6 ("libbpf: Add generic bpf_program__attach()"),
for some BPF programs, it is now possible to attach BPF programs
with __attach() instead of explicitly calling __attach_().

This commit refactors the __attach_tracepoint() with libbpf's generic
__attach() method. In addition, this refactors the logic of setting
the map FD to simplify the code. Also, the missing removal of
bpf_load.o in Makefile has been fixed.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile|   2 +-
 samples/bpf/xdp_redirect_cpu_user.c | 138 +---
 2 files changed, 67 insertions(+), 73 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 0cee2aa8970f..ac9175705b2f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -98,7 +98,7 @@ test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
-xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
+xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
 xdp_monitor-objs := xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 3dd366e9474d..805b5df5e47b 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -37,18 +37,35 @@ static __u32 prog_id;
 
 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
 static int n_cpus;
-static int cpu_map_fd;
-static int rx_cnt_map_fd;
-static int redirect_err_cnt_map_fd;
-static int cpumap_enqueue_cnt_map_fd;
-static int cpumap_kthread_cnt_map_fd;
-static int cpus_available_map_fd;
-static int cpus_count_map_fd;
-static int cpus_iterator_map_fd;
-static int exception_cnt_map_fd;
+
+enum map_type {
+   CPU_MAP,
+   RX_CNT,
+   REDIRECT_ERR_CNT,
+   CPUMAP_ENQUEUE_CNT,
+   CPUMAP_KTHREAD_CNT,
+   CPUS_AVAILABLE,
+   CPUS_COUNT,
+   CPUS_ITERATOR,
+   EXCEPTION_CNT,
+};
+
+static const char *const map_type_strings[] = {
+   [CPU_MAP] = "cpu_map",
+   [RX_CNT] = "rx_cnt",
+   [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+   [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+   [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+   [CPUS_AVAILABLE] = "cpus_available",
+   [CPUS_COUNT] = "cpus_count",
+   [CPUS_ITERATOR] = "cpus_iterator",
+   [EXCEPTION_CNT] = "exception_cnt",
+};
 
 #define NUM_TP 5
+#define NUM_MAP 9
 struct bpf_link *tp_links[NUM_TP] = { 0 };
+static int map_fds[NUM_MAP];
 static int tp_cnt = 0;
 
 /* Exit return codes */
@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
 {
int fd, i;
 
-   fd = rx_cnt_map_fd;
+   fd = map_fds[RX_CNT];
map_collect_percpu(fd, 0, &rec->rx_cnt);
 
-   fd = redirect_err_cnt_map_fd;
+   fd = map_fds[REDIRECT_ERR_CNT];
map_collect_percpu(fd, 1, &rec->redir_err);
 
-   fd = cpumap_enqueue_cnt_map_fd;
+   fd = map_fds[CPUMAP_ENQUEUE_CNT];
for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]);
 
-   fd = cpumap_kthread_cnt_map_fd;
+   fd = map_fds[CPUMAP_KTHREAD_CNT];
map_collect_percpu(fd, 0, &rec->kthread);
 
-   fd = exception_cnt_map_fd;
+   fd = map_fds[EXCEPTION_CNT];
map_collect_percpu(fd, 0, &rec->exception);
 }
 
@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
 * the kernel for the cpu.
 */
-   ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
+   ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct 
bpf_cpumap_val *value,
/* Inform bpf_prog's that a new CPU is available to select
 * from via some control maps.
 */
-   ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
+   ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}
 
/* When not replacing/updating existing entry, bump the count */
-   ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
+   ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
if (ret) {
fprintf(stderr, "Failed reading curr cpus_count\n"

Re: [PATCH bpf-next] samples: bpf: refactor xdp_sample_pkts_kern with BTF-defined map

2020-09-08 Thread Daniel T. Lee
On Wed, Sep 9, 2020 at 8:24 AM Andrii Nakryiko
 wrote:
>
> On Sat, Sep 5, 2020 at 8:41 AM Daniel T. Lee  wrote:
> >
> > Most of the samples were converted to use the new BTF-defined MAP as
> > they moved to libbbpf, but some of the samples were missing.
> >
> > Instead of using the previous BPF MAP definition, this commit refactors
> > xdp_sample_pkts_kern MAP definition with the new BTF-defined MAP format.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/xdp_sample_pkts_kern.c | 12 ++--
> >  1 file changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/samples/bpf/xdp_sample_pkts_kern.c 
> > b/samples/bpf/xdp_sample_pkts_kern.c
> > index 33377289e2a8..b15172b7d455 100644
> > --- a/samples/bpf/xdp_sample_pkts_kern.c
> > +++ b/samples/bpf/xdp_sample_pkts_kern.c
> > @@ -7,12 +7,12 @@
> >  #define SAMPLE_SIZE 64ul
> >  #define MAX_CPUS 128
> >
> > -struct bpf_map_def SEC("maps") my_map = {
> > -   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> > -   .key_size = sizeof(int),
> > -   .value_size = sizeof(u32),
> > -   .max_entries = MAX_CPUS,
> > -};
> > +struct {
> > +   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
> > +   __uint(key_size, sizeof(int));
> > +   __uint(value_size, sizeof(u32));
> > +   __uint(max_entries, MAX_CPUS);
>
> if you drop max_entries property, libbpf will set it to the maximum
> configured number of CPUs on the host, which is what you probably
> want. Do you might sending v2 without MAX_CPUS (check if macro is
> still used anywhere else). Thanks!
>

Thanks for your time and effort for the review.

I'll check and send the next version of patch.


> > +} my_map SEC(".maps");
> >
> >  SEC("xdp_sample")
> >  int xdp_sample_prog(struct xdp_md *ctx)
> > --
> > 2.25.1
> >

-- 
Best,
Daniel T. Lee


Re: [PATCH bpf-next] samples: bpf: refactor xdp_sample_pkts_kern with BTF-defined map

2020-09-07 Thread Daniel T. Lee
On Mon, Sep 7, 2020 at 10:02 PM Michal Rostecki  wrote:
>
> Daniel T. Lee writes:
> > Most of the samples were converted to use the new BTF-defined MAP as
> > they moved to libbbpf, but some of the samples were missing.
> >
> > Instead of using the previous BPF MAP definition, this commit refactors
> > xdp_sample_pkts_kern MAP definition with the new BTF-defined MAP format.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/xdp_sample_pkts_kern.c | 12 ++--
> >  1 file changed, 6 insertions(+), 6 deletions(-)
>
> I see that samples/bpf/ibumad_kern.c and samples/bpf/xdp_monitor_kern.c
> still have old style BPF map definitions. Maybe you could change them as
> well?


Thanks for the review!

Actually, I'm well aware that there are some samples left with old style map
definitions, But those examples should be transferred from bpf_load to
libbbpf, not just bpf map changes.

I'm also planning to refactor those patches in the future.
For now I've just refactored this file, but if you think this patch
size is small,
I'll send it with other changes.


[PATCH bpf-next] samples: bpf: refactor xdp_sample_pkts_kern with BTF-defined map

2020-09-05 Thread Daniel T. Lee
Most of the samples were converted to use the new BTF-defined MAP as
they moved to libbbpf, but some of the samples were missing.

Instead of using the previous BPF MAP definition, this commit refactors
xdp_sample_pkts_kern MAP definition with the new BTF-defined MAP format.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/xdp_sample_pkts_kern.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/samples/bpf/xdp_sample_pkts_kern.c 
b/samples/bpf/xdp_sample_pkts_kern.c
index 33377289e2a8..b15172b7d455 100644
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -7,12 +7,12 @@
 #define SAMPLE_SIZE 64ul
 #define MAX_CPUS 128
 
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_CPUS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+   __uint(key_size, sizeof(int));
+   __uint(value_size, sizeof(u32));
+   __uint(max_entries, MAX_CPUS);
+} my_map SEC(".maps");
 
 SEC("xdp_sample")
 int xdp_sample_prog(struct xdp_md *ctx)
-- 
2.25.1



[PATCH bpf-next 1/2] samples: bpf: Replace bpf_program__title() with bpf_program__section_name()

2020-09-03 Thread Daniel T. Lee
>From commit 521095842027 ("libbpf: Deprecate notion of BPF program
"title" in favor of "section name""), the term title has been replaced
with section name in libbpf.

Since the bpf_program__title() has been deprecated, this commit
switches this function to bpf_program__section_name(). Due to
this commit, the compilation warning issue has also been resolved.

Fixes: 521095842027 ("libbpf: Deprecate notion of BPF program "title" in favor 
of "section name"")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sockex3_user.c  | 6 +++---
 samples/bpf/spintest_user.c | 6 +++---
 samples/bpf/tracex5_user.c  | 6 +++---
 samples/bpf/xdp_redirect_cpu_user.c | 2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 4dbee7427d47..7793f6a6ae7e 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -29,8 +29,8 @@ int main(int argc, char **argv)
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_program *prog;
struct bpf_object *obj;
+   const char *section;
char filename[256];
-   const char *title;
FILE *f;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@@ -58,8 +58,8 @@ int main(int argc, char **argv)
bpf_object__for_each_program(prog, obj) {
fd = bpf_program__fd(prog);
 
-   title = bpf_program__title(prog, false);
-   if (sscanf(title, "socket/%d", &key) != 1) {
+   section = bpf_program__section_name(prog);
+   if (sscanf(section, "socket/%d", &key) != 1) {
fprintf(stderr, "ERROR: finding prog failed\n");
goto cleanup;
}
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index 847da9284fa8..f090d0dc60d6 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -17,7 +17,7 @@ int main(int ac, char **argv)
long key, next_key, value;
struct bpf_program *prog;
int map_fd, i, j = 0;
-   const char *title;
+   const char *section;
struct ksym *sym;
 
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
@@ -51,8 +51,8 @@ int main(int ac, char **argv)
}
 
bpf_object__for_each_program(prog, obj) {
-   title = bpf_program__title(prog, false);
-   if (sscanf(title, "kprobe/%s", symbol) != 1)
+   section = bpf_program__section_name(prog);
+   if (sscanf(section, "kprobe/%s", symbol) != 1)
continue;
 
/* Attach prog only when symbol exists */
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 98dad57a96c4..c17d3fb5fd64 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -39,8 +39,8 @@ int main(int ac, char **argv)
struct bpf_program *prog;
struct bpf_object *obj;
int key, fd, progs_fd;
+   const char *section;
char filename[256];
-   const char *title;
FILE *f;
 
setrlimit(RLIMIT_MEMLOCK, &r);
@@ -78,9 +78,9 @@ int main(int ac, char **argv)
}
 
bpf_object__for_each_program(prog, obj) {
-   title = bpf_program__title(prog, false);
+   section = bpf_program__section_name(prog);
/* register only syscalls to PROG_ARRAY */
-   if (sscanf(title, "kprobe/%d", &key) != 1)
+   if (sscanf(section, "kprobe/%d", &key) != 1)
continue;
 
fd = bpf_program__fd(prog);
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 004c0622c913..3dd366e9474d 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -111,7 +111,7 @@ static void print_avail_progs(struct bpf_object *obj)
 
bpf_object__for_each_program(pos, obj) {
if (bpf_program__is_xdp(pos))
-   printf(" %s\n", bpf_program__title(pos, false));
+   printf(" %s\n", bpf_program__section_name(pos));
}
 }
 
-- 
2.25.1



[PATCH bpf-next 2/2] samples: bpf: add xsk_fwd test file to .gitignore

2020-09-03 Thread Daniel T. Lee
This commit adds xsk_fwd test file to .gitignore which is newly added
to samples/bpf.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/.gitignore | 1 +
 1 file changed, 1 insertion(+)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 034800c4d1e6..b2f29bc8dc43 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -50,4 +50,5 @@ xdp_rxq_info
 xdp_sample_pkts
 xdp_tx_iptunnel
 xdpsock
+xsk_fwd
 testfile.img
-- 
2.25.1



[PATCH 1/2] samples: bpf: Replace bpf_program__title() with bpf_program__section_name()

2020-09-03 Thread Daniel T. Lee
>From commit 521095842027 ("libbpf: Deprecate notion of BPF program
"title" in favor of "section name""), the term title has been replaced
with section name in libbpf.

Since the bpf_program__title() has been deprecated, this commit
switches this function to bpf_program__section_name(). Due to
this commit, the compilation warning issue has also been resolved.

Fixes: 521095842027 ("libbpf: Deprecate notion of BPF program "title" in favor 
of "section name"")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sockex3_user.c  | 6 +++---
 samples/bpf/spintest_user.c | 6 +++---
 samples/bpf/tracex5_user.c  | 6 +++---
 samples/bpf/xdp_redirect_cpu_user.c | 2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 4dbee7427d47..7793f6a6ae7e 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -29,8 +29,8 @@ int main(int argc, char **argv)
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_program *prog;
struct bpf_object *obj;
+   const char *section;
char filename[256];
-   const char *title;
FILE *f;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@@ -58,8 +58,8 @@ int main(int argc, char **argv)
bpf_object__for_each_program(prog, obj) {
fd = bpf_program__fd(prog);
 
-   title = bpf_program__title(prog, false);
-   if (sscanf(title, "socket/%d", &key) != 1) {
+   section = bpf_program__section_name(prog);
+   if (sscanf(section, "socket/%d", &key) != 1) {
fprintf(stderr, "ERROR: finding prog failed\n");
goto cleanup;
}
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index 847da9284fa8..f090d0dc60d6 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -17,7 +17,7 @@ int main(int ac, char **argv)
long key, next_key, value;
struct bpf_program *prog;
int map_fd, i, j = 0;
-   const char *title;
+   const char *section;
struct ksym *sym;
 
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
@@ -51,8 +51,8 @@ int main(int ac, char **argv)
}
 
bpf_object__for_each_program(prog, obj) {
-   title = bpf_program__title(prog, false);
-   if (sscanf(title, "kprobe/%s", symbol) != 1)
+   section = bpf_program__section_name(prog);
+   if (sscanf(section, "kprobe/%s", symbol) != 1)
continue;
 
/* Attach prog only when symbol exists */
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 98dad57a96c4..c17d3fb5fd64 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -39,8 +39,8 @@ int main(int ac, char **argv)
struct bpf_program *prog;
struct bpf_object *obj;
int key, fd, progs_fd;
+   const char *section;
char filename[256];
-   const char *title;
FILE *f;
 
setrlimit(RLIMIT_MEMLOCK, &r);
@@ -78,9 +78,9 @@ int main(int ac, char **argv)
}
 
bpf_object__for_each_program(prog, obj) {
-   title = bpf_program__title(prog, false);
+   section = bpf_program__section_name(prog);
/* register only syscalls to PROG_ARRAY */
-   if (sscanf(title, "kprobe/%d", &key) != 1)
+   if (sscanf(section, "kprobe/%d", &key) != 1)
continue;
 
fd = bpf_program__fd(prog);
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 004c0622c913..3dd366e9474d 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -111,7 +111,7 @@ static void print_avail_progs(struct bpf_object *obj)
 
bpf_object__for_each_program(pos, obj) {
if (bpf_program__is_xdp(pos))
-   printf(" %s\n", bpf_program__title(pos, false));
+   printf(" %s\n", bpf_program__section_name(pos));
}
 }
 
-- 
2.25.1



[PATCH 2/2] samples: bpf: add xsk_fwd test file to .gitignore

2020-09-03 Thread Daniel T. Lee
This commit adds xsk_fwd test file to .gitignore which is newly added
to samples/bpf.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/.gitignore | 1 +
 1 file changed, 1 insertion(+)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 034800c4d1e6..b2f29bc8dc43 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -50,4 +50,5 @@ xdp_rxq_info
 xdp_sample_pkts
 xdp_tx_iptunnel
 xdpsock
+xsk_fwd
 testfile.img
-- 
2.25.1



[PATCH bpf-next 2/3] samples: bpf: Refactor kprobe tracing programs with libbpf

2020-08-23 Thread Daniel T. Lee
For the problem of increasing fragmentation of the bpf loader programs,
instead of using bpf_loader.o, which is used in samples/bpf, this
commit refactors the existing kprobe tracing programs with libbbpf
bpf loader.

- For kprobe events pointing to system calls, the SYSCALL() macro in
trace_common.h was used.
- Adding a kprobe event and attaching a bpf program to it was done
through bpf_program_attach().
- Instead of using the existing BPF MAP definition, MAP definition
has been refactored with the new BTF-defined MAP format.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile  | 10 +--
 samples/bpf/lathist_kern.c| 24 +++
 samples/bpf/lathist_user.c| 42 ++--
 samples/bpf/spintest_kern.c   | 36 +-
 samples/bpf/spintest_user.c   | 68 +++
 .../bpf/test_current_task_under_cgroup_kern.c | 27 
 .../bpf/test_current_task_under_cgroup_user.c | 52 +++---
 samples/bpf/test_probe_write_user_kern.c  | 12 ++--
 samples/bpf/test_probe_write_user_user.c  | 49 ++---
 samples/bpf/trace_output_kern.c   | 15 ++--
 samples/bpf/trace_output_user.c   | 55 ++-
 11 files changed, 272 insertions(+), 118 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 0cac89230c6d..c74d477474e2 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -71,11 +71,11 @@ tracex4-objs := tracex4_user.o
 tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
-test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
-trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
-lathist-objs := bpf_load.o lathist_user.o
+test_probe_write_user-objs := test_probe_write_user_user.o
+trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
+lathist-objs := lathist_user.o
 offwaketime-objs := bpf_load.o offwaketime_user.o $(TRACE_HELPERS)
-spintest-objs := bpf_load.o spintest_user.o $(TRACE_HELPERS)
+spintest-objs := spintest_user.o $(TRACE_HELPERS)
 map_perf_test-objs := map_perf_test_user.o
 test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
@@ -86,7 +86,7 @@ xdp1-objs := xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := xdp1_user.o
 xdp_router_ipv4-objs := xdp_router_ipv4_user.o
-test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
+test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
   test_current_task_under_cgroup_user.o
 trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
 sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
diff --git a/samples/bpf/lathist_kern.c b/samples/bpf/lathist_kern.c
index ca9c2e4e69aa..4adfcbbe6ef4 100644
--- a/samples/bpf/lathist_kern.c
+++ b/samples/bpf/lathist_kern.c
@@ -18,12 +18,12 @@
  * trace_preempt_[on|off] tracepoints hooks is not supported.
  */
 
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(u64),
-   .max_entries = MAX_CPU,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, int);
+   __type(value, u64);
+   __uint(max_entries, MAX_CPU);
+} my_map SEC(".maps");
 
 SEC("kprobe/trace_preempt_off")
 int bpf_prog1(struct pt_regs *ctx)
@@ -61,12 +61,12 @@ static unsigned int log2l(unsigned long v)
return log2(v);
 }
 
-struct bpf_map_def SEC("maps") my_lat = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(int),
-   .value_size = sizeof(long),
-   .max_entries = MAX_CPU * MAX_ENTRIES,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, int);
+   __type(value, long);
+   __uint(max_entries, MAX_CPU * MAX_ENTRIES);
+} my_lat SEC(".maps");
 
 SEC("kprobe/trace_preempt_on")
 int bpf_prog2(struct pt_regs *ctx)
diff --git a/samples/bpf/lathist_user.c b/samples/bpf/lathist_user.c
index 2ff2839a52d5..7d8ff2418303 100644
--- a/samples/bpf/lathist_user.c
+++ b/samples/bpf/lathist_user.c
@@ -6,9 +6,8 @@
 #include 
 #include 
 #include 
-#include 
+#include 
 #include 
-#include "bpf_load.h"
 
 #define MAX_ENTRIES20
 #define MAX_CPU4
@@ -81,20 +80,51 @@ static void get_data(int fd)
 
 int main(int argc, char **argv)
 {
+   struct bpf_link *links[2];
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
+   int map_fd, i = 0;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return 0;
+   }
+
+   /* load 

[PATCH bpf-next 3/3] samples: bpf: Refactor tracepoint tracing programs with libbpf

2020-08-23 Thread Daniel T. Lee
For the problem of increasing fragmentation of the bpf loader programs,
instead of using bpf_loader.o, which is used in samples/bpf, this
commit refactors the existing tracepoint tracing programs with libbbpf
bpf loader.

- Adding a tracepoint event and attaching a bpf program to it was done
through bpf_program_attach().
- Instead of using the existing BPF MAP definition, MAP definition
has been refactored with the new BTF-defined MAP format.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   |  6 ++--
 samples/bpf/cpustat_kern.c | 36 +--
 samples/bpf/cpustat_user.c | 47 
 samples/bpf/offwaketime_kern.c | 52 +--
 samples/bpf/offwaketime_user.c | 66 ++
 samples/bpf/syscall_tp_kern.c  | 24 ++---
 samples/bpf/syscall_tp_user.c  | 54 +---
 7 files changed, 192 insertions(+), 93 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index c74d477474e2..a6d3646b3818 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -74,7 +74,7 @@ tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := test_probe_write_user_user.o
 trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
 lathist-objs := lathist_user.o
-offwaketime-objs := bpf_load.o offwaketime_user.o $(TRACE_HELPERS)
+offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
 map_perf_test-objs := map_perf_test_user.o
 test_overhead-objs := bpf_load.o test_overhead_user.o
@@ -100,8 +100,8 @@ xdp_redirect_map-objs := xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
 xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
-syscall_tp-objs := bpf_load.o syscall_tp_user.o
-cpustat-objs := bpf_load.o cpustat_user.o
+syscall_tp-objs := syscall_tp_user.o
+cpustat-objs := cpustat_user.o
 xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := xdpsock_user.o
 xdp_fwd-objs := xdp_fwd_user.o
diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c
index a86a19d5f033..5aefd19cdfa1 100644
--- a/samples/bpf/cpustat_kern.c
+++ b/samples/bpf/cpustat_kern.c
@@ -51,28 +51,28 @@ static int cpu_opps[] = { 208000, 432000, 729000, 96, 
120 };
 #define MAP_OFF_PSTATE_IDX 3
 #define MAP_OFF_NUM4
 
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = MAX_CPU * MAP_OFF_NUM,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, MAX_CPU * MAP_OFF_NUM);
+} my_map SEC(".maps");
 
 /* cstate_duration records duration time for every idle state per CPU */
-struct bpf_map_def SEC("maps") cstate_duration = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = MAX_CPU * MAX_CSTATE_ENTRIES,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, MAX_CPU * MAX_CSTATE_ENTRIES);
+} cstate_duration SEC(".maps");
 
 /* pstate_duration records duration time for every operating point per CPU */
-struct bpf_map_def SEC("maps") pstate_duration = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u64),
-   .max_entries = MAX_CPU * MAX_PSTATE_ENTRIES,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, u64);
+   __uint(max_entries, MAX_CPU * MAX_PSTATE_ENTRIES);
+} pstate_duration SEC(".maps");
 
 /*
  * The trace events for cpu_idle and cpu_frequency are taken from:
diff --git a/samples/bpf/cpustat_user.c b/samples/bpf/cpustat_user.c
index 869a99406dbf..96675985e9e0 100644
--- a/samples/bpf/cpustat_user.c
+++ b/samples/bpf/cpustat_user.c
@@ -9,7 +9,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -18,7 +17,9 @@
 #include 
 
 #include 
-#include "bpf_load.h"
+#include 
+
+static int cstate_map_fd, pstate_map_fd;
 
 #define MAX_CPU8
 #define MAX_PSTATE_ENTRIES 5
@@ -181,21 +182,50 @@ static void int_exit(int sig)
 {
cpu_stat_inject_cpu_idle_event();
cpu_stat_inject_cpu_frequency_event();
-   cpu_stat_update(map_fd[1], map_fd[2]);
+   cpu_stat_update(cstate_map_fd, pstate_map_fd);
cpu_stat_print();
exit(0);
 }
 
 int main(int argc, char **argv)
 {
+   struct bpf_link *link = NULL;
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
int ret;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(file

[PATCH bpf-next 1/3] samples: bpf: cleanup bpf_load.o from Makefile

2020-08-23 Thread Daniel T. Lee
Since commit cc7f641d637b ("samples: bpf: Refactor BPF map performance
test with libbpf") has ommited the removal of bpf_load.o from Makefile,
this commit removes the bpf_load.o rule for targets where bpf_load.o is
not used.

Fixes: cc7f641d637b ("samples: bpf: Refactor BPF map performance test with 
libbpf")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index f87ee02073ba..0cac89230c6d 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -76,7 +76,7 @@ trace_output-objs := bpf_load.o trace_output_user.o 
$(TRACE_HELPERS)
 lathist-objs := bpf_load.o lathist_user.o
 offwaketime-objs := bpf_load.o offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := bpf_load.o spintest_user.o $(TRACE_HELPERS)
-map_perf_test-objs := bpf_load.o map_perf_test_user.o
+map_perf_test-objs := map_perf_test_user.o
 test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
-- 
2.25.1



[PATCH bpf-next 0/3] samples: bpf: Refactor tracing programs with libbpf

2020-08-23 Thread Daniel T. Lee
For the problem of increasing fragmentation of the bpf loader programs,
instead of using bpf_loader.o, which is used in samples/bpf, this
patch refactors the existing kprobe, tracepoint tracing programs with 
libbbpf bpf loader.

- For kprobe events pointing to system calls, the SYSCALL() macro in
trace_common.h was used.
- Adding a kprobe/tracepoint event and attaching a bpf program to it
was done through bpf_program_attach().
- Instead of using the existing BPF MAP definition, MAP definition
has been refactored with the new BTF-defined MAP format.

Daniel T. Lee (3):
  samples: bpf: cleanup bpf_load.o from Makefile
  samples: bpf: Refactor kprobe tracing programs with libbpf
  samples: bpf: Refactor tracepoint tracing programs with libbpf

 samples/bpf/Makefile  | 18 ++---
 samples/bpf/cpustat_kern.c| 36 +-
 samples/bpf/cpustat_user.c| 47 +++--
 samples/bpf/lathist_kern.c| 24 +++
 samples/bpf/lathist_user.c| 42 ++--
 samples/bpf/offwaketime_kern.c| 52 +++---
 samples/bpf/offwaketime_user.c| 66 ++
 samples/bpf/spintest_kern.c   | 36 +-
 samples/bpf/spintest_user.c   | 68 +++
 samples/bpf/syscall_tp_kern.c | 24 +++
 samples/bpf/syscall_tp_user.c | 54 +++
 .../bpf/test_current_task_under_cgroup_kern.c | 27 
 .../bpf/test_current_task_under_cgroup_user.c | 52 +++---
 samples/bpf/test_probe_write_user_kern.c  | 12 ++--
 samples/bpf/test_probe_write_user_user.c  | 49 ++---
 samples/bpf/trace_output_kern.c   | 15 ++--
 samples/bpf/trace_output_user.c   | 55 ++-
 17 files changed, 465 insertions(+), 212 deletions(-)

-- 
2.25.1



[PATCH bpf-next] samples: bpf: Fix broken bpf programs due to removed symbol

2020-08-17 Thread Daniel T. Lee
>From commit f1394b798814 ("block: mark blk_account_io_completion
static") symbol blk_account_io_completion() has been marked as static,
which makes it no longer possible to attach kprobe to this event.
Currently, there are broken samples due to this reason.

As a solution to this, attach kprobe events to blk_account_io_done()
to modify them to perform the same behavior as before.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/task_fd_query_kern.c | 2 +-
 samples/bpf/task_fd_query_user.c | 2 +-
 samples/bpf/tracex3_kern.c   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/samples/bpf/task_fd_query_kern.c b/samples/bpf/task_fd_query_kern.c
index 278ade5427c8..c821294e1774 100644
--- a/samples/bpf/task_fd_query_kern.c
+++ b/samples/bpf/task_fd_query_kern.c
@@ -10,7 +10,7 @@ int bpf_prog1(struct pt_regs *ctx)
return 0;
 }
 
-SEC("kretprobe/blk_account_io_completion")
+SEC("kretprobe/blk_account_io_done")
 int bpf_prog2(struct pt_regs *ctx)
 {
return 0;
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index ff2e9c1c7266..4a74531dc403 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -314,7 +314,7 @@ int main(int argc, char **argv)
/* test two functions in the corresponding *_kern.c file */
CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
   BPF_FD_TYPE_KPROBE));
-   CHECK_AND_RET(test_debug_fs_kprobe(1, "blk_account_io_completion",
+   CHECK_AND_RET(test_debug_fs_kprobe(1, "blk_account_io_done",
   BPF_FD_TYPE_KRETPROBE));
 
/* test nondebug fs kprobe */
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 659613c19a82..710a4410b2fb 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -49,7 +49,7 @@ struct {
__uint(max_entries, SLOTS);
 } lat_map SEC(".maps");
 
-SEC("kprobe/blk_account_io_completion")
+SEC("kprobe/blk_account_io_done")
 int bpf_prog2(struct pt_regs *ctx)
 {
long rq = PT_REGS_PARM1(ctx);
-- 
2.25.1



[PATCH bpf-next] libbf: fix uninitialized pointer at btf__parse_raw()

2020-08-05 Thread Daniel T. Lee
Recently, from commit 94a1fedd63ed ("libbpf: Add btf__parse_raw() and
generic btf__parse() APIs"), new API has been added to libbpf that
allows to parse BTF from raw data file (btf__parse_raw()).

The commit derives build failure of samples/bpf due to improper access
of uninitialized pointer at btf_parse_raw().

btf.c: In function btf__parse_raw:
btf.c:625:28: error: btf may be used uninitialized in this function
  625 |  return err ? ERR_PTR(err) : btf;
  | ~~~^

This commit fixes the build failure of samples/bpf by adding code of
initializing btf pointer as NULL.

Fixes: 94a1fedd63ed ("libbpf: Add btf__parse_raw() and generic btf__parse() 
APIs")
Signed-off-by: Daniel T. Lee 
---
 tools/lib/bpf/btf.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 856b09a04563..4843e44916f7 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -564,8 +564,8 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext 
**btf_ext)
 
 struct btf *btf__parse_raw(const char *path)
 {
+   struct btf *btf = NULL;
void *data = NULL;
-   struct btf *btf;
FILE *f = NULL;
__u16 magic;
int err = 0;
-- 
2.25.1



Re: [PATCH bpf-next v2 4/4] selftests: bpf: remove unused bpf_map_def_legacy struct

2020-07-07 Thread Daniel T. Lee
On Wed, Jul 8, 2020 at 4:00 AM Andrii Nakryiko
 wrote:
>
> On Tue, Jul 7, 2020 at 11:49 AM Daniel T. Lee  wrote:
> >
> > samples/bpf no longer use bpf_map_def_legacy and instead use the
> > libbpf's bpf_map_def or new BTF-defined MAP format. This commit removes
> > unused bpf_map_def_legacy struct from selftests/bpf/bpf_legacy.h.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
>
> Next time please don't forget to keep Ack's you've received on
> previous revision.
>

I'll keep that in mind.

Thank you for your time and effort for the review.
Daniel.

> >  tools/testing/selftests/bpf/bpf_legacy.h | 14 --
> >  1 file changed, 14 deletions(-)
> >
> > diff --git a/tools/testing/selftests/bpf/bpf_legacy.h 
> > b/tools/testing/selftests/bpf/bpf_legacy.h
> > index 6f8988738bc1..719ab56cdb5d 100644
> > --- a/tools/testing/selftests/bpf/bpf_legacy.h
> > +++ b/tools/testing/selftests/bpf/bpf_legacy.h
> > @@ -2,20 +2,6 @@
> >  #ifndef __BPF_LEGACY__
> >  #define __BPF_LEGACY__
> >
> > -/*
> > - * legacy bpf_map_def with extra fields supported only by bpf_load(), do 
> > not
> > - * use outside of samples/bpf
> > - */
> > -struct bpf_map_def_legacy {
> > -   unsigned int type;
> > -   unsigned int key_size;
> > -   unsigned int value_size;
> > -   unsigned int max_entries;
> > -   unsigned int map_flags;
> > -   unsigned int inner_map_idx;
> > -   unsigned int numa_node;
> > -};
> > -
> >  #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
> > struct btf_map_##name { \
> > type_key key;   \
> > --
> > 2.25.1
> >


[PATCH bpf-next v2 4/4] selftests: bpf: remove unused bpf_map_def_legacy struct

2020-07-07 Thread Daniel T. Lee
samples/bpf no longer use bpf_map_def_legacy and instead use the
libbpf's bpf_map_def or new BTF-defined MAP format. This commit removes
unused bpf_map_def_legacy struct from selftests/bpf/bpf_legacy.h.

Signed-off-by: Daniel T. Lee 
---
 tools/testing/selftests/bpf/bpf_legacy.h | 14 --
 1 file changed, 14 deletions(-)

diff --git a/tools/testing/selftests/bpf/bpf_legacy.h 
b/tools/testing/selftests/bpf/bpf_legacy.h
index 6f8988738bc1..719ab56cdb5d 100644
--- a/tools/testing/selftests/bpf/bpf_legacy.h
+++ b/tools/testing/selftests/bpf/bpf_legacy.h
@@ -2,20 +2,6 @@
 #ifndef __BPF_LEGACY__
 #define __BPF_LEGACY__
 
-/*
- * legacy bpf_map_def with extra fields supported only by bpf_load(), do not
- * use outside of samples/bpf
- */
-struct bpf_map_def_legacy {
-   unsigned int type;
-   unsigned int key_size;
-   unsigned int value_size;
-   unsigned int max_entries;
-   unsigned int map_flags;
-   unsigned int inner_map_idx;
-   unsigned int numa_node;
-};
-
 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct btf_map_##name { \
type_key key;   \
-- 
2.25.1



[PATCH bpf-next v2 2/4] samples: bpf: refactor BPF map in map test with libbpf

2020-07-07 Thread Daniel T. Lee
>From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
support"), a way to define internal map in BTF-defined map has been
added.

Instead of using previous 'inner_map_idx' definition, the structure to
be used for the inner map can be directly defined using array directive.

__array(values, struct inner_map)

This commit refactors map in map test program with libbpf by explicitly
defining inner map with BTF-defined format.

Signed-off-by: Daniel T. Lee 

---
Changes in V2:
 - fix wrong error check logic with bpf_program

 samples/bpf/Makefile   |  2 +-
 samples/bpf/test_map_in_map_kern.c | 85 +++---
 samples/bpf/test_map_in_map_user.c | 53 +--
 3 files changed, 91 insertions(+), 49 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 8403e4762306..f87ee02073ba 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
 tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
 lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
-test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
+test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index 36a203e69064..8def45c5b697 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -11,7 +11,6 @@
 #include 
 #include 
 #include 
-#include "bpf_legacy.h"
 #include 
 #include 
 #include "trace_common.h"
@@ -19,60 +18,60 @@
 #define MAX_NR_PORTS 65536
 
 /* map #0 */
-struct bpf_map_def_legacy SEC("maps") port_a = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = MAX_NR_PORTS,
-};
+struct inner_a {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, MAX_NR_PORTS);
+} port_a SEC(".maps");
 
 /* map #1 */
-struct bpf_map_def_legacy SEC("maps") port_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct inner_h {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} port_h SEC(".maps");
 
 /* map #2 */
-struct bpf_map_def_legacy SEC("maps") reg_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} reg_result_h SEC(".maps");
 
 /* map #3 */
-struct bpf_map_def_legacy SEC("maps") inline_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} inline_result_h SEC(".maps");
 
 /* map #4 */ /* Test case #0 */
-struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
-   .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = MAX_NR_PORTS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+   __uint(max_entries, MAX_NR_PORTS);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} a_of_port_a SEC(".maps");
 
 /* map #5 */ /* Test case #1 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} h_of_port_a SEC(".maps");
 
 /* map #6 */ /* Test case #2 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 1, /* map_fd[1] is port_h */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_h); /* use inner_h as inner map */
+} h_of_port_h SEC(".maps");
 
 static __always_inline int do_r

[PATCH bpf-next v2 3/4] samples: bpf: refactor BPF map performance test with libbpf

2020-07-07 Thread Daniel T. Lee
Previously, in order to set the numa_node attribute at the time of map
creation using "libbpf", it was necessary to call bpf_create_map_node()
directly (bpf_load approach), instead of calling bpf_object_load()
that handles everything on its own, including map creation. And because
of this problem, this sample had problems with refactoring from bpf_load
to libbbpf.

However, by commit 1bdb6c9a1c43 ("libbpf: Add a bunch of attribute
getters/setters for map definitions") added the numa_node attribute and
allowed it to be set in the map.

By using libbpf instead of bpf_load, the inner map definition has
been explicitly declared with BTF-defined format. Also, the element of
ARRAY_OF_MAPS was also statically specified using the BTF format. And
for this reason some logic in fixup_map() was not needed and changed
or removed.

Signed-off-by: Daniel T. Lee 

---
Changes in V2:
 - set numa_node 0 declaratively at map definition instead of setting it
 from user-space
 - static initialization of ARRAY_OF_MAPS element with '.values'

 samples/bpf/map_perf_test_kern.c | 179 ---
 samples/bpf/map_perf_test_user.c | 164 ++--
 2 files changed, 196 insertions(+), 147 deletions(-)

diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index c9b31193ca12..8773f22b6a98 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -9,7 +9,6 @@
 #include 
 #include 
 #include 
-#include "bpf_legacy.h"
 #include 
 #include 
 #include "trace_common.h"
@@ -17,89 +16,93 @@
 #define MAX_ENTRIES 1000
 #define MAX_NR_CPUS 1024
 
-struct bpf_map_def_legacy SEC("maps") hash_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, long);
+   __uint(max_entries, MAX_ENTRIES);
+} hash_map SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_LRU_HASH);
+   __type(key, u32);
+   __type(value, long);
+   __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_LRU_HASH);
+   __type(key, u32);
+   __type(value, long);
+   __uint(max_entries, 1);
+   __uint(map_flags, BPF_F_NO_COMMON_LRU);
+} nocommon_lru_hash_map SEC(".maps");
+
+struct inner_lru {
+   __uint(type, BPF_MAP_TYPE_LRU_HASH);
+   __type(key, u32);
+   __type(value, long);
+   __uint(max_entries, MAX_ENTRIES);
+   __uint(map_flags, BPF_F_NUMA_NODE);
+   __uint(numa_node, 0);
+} inner_lru_hash_map SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+   __uint(max_entries, MAX_NR_CPUS);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_lru); /* use inner_lru as inner map */
+} array_of_lru_hashs SEC(".maps") = {
+   /* statically initialize the first element */
+   .values = { &inner_lru_hash_map },
 };
 
-struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = 1,
-};
-
-struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = 1,
-   .map_flags = BPF_F_NO_COMMON_LRU,
-};
-
-struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NUMA_NODE,
-   .numa_node = 0,
-};
-
-struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
-   .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-   .key_size = sizeof(u32),
-   .max_entries = MAX_NR_CPUS,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
-   .type = BPF_MAP_TYPE_PERCPU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
-   .type = BPF_MAP_TYPE_PERCPU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
-   .type = BPF_MAP_TYPE_LPM_TRIE,
-   .key_size = 8,
-

[PATCH bpf-next v2 0/4] samples: bpf: refactor BPF map test with libbpf

2020-07-07 Thread Daniel T. Lee
There have been many changes in how the current bpf program defines
map. The development of libbbpf has led to the new method called 
BTF-defined map, which is a new way of defining BPF maps, and thus has
a lot of differences from the existing MAP definition method.

Although bpf_load was also internally using libbbpf, fragmentation in 
its implementation began to occur, such as using its own structure, 
bpf_load_map_def, to define the map.

Therefore, in this patch set, map test programs, which are closely
related to changes in the definition method of BPF map, were refactored
with libbbpf.

---
Changes in V2:
 - instead of changing event from __x64_sys_connect to __sys_connect,
 fetch and set register values directly
 - fix wrong error check logic with bpf_program
 - set numa_node 0 declaratively at map definition instead of setting it
 from user-space
 - static initialization of ARRAY_OF_MAPS element with '.values'

Daniel T. Lee (4):
  samples: bpf: fix bpf programs with kprobe/sys_connect event
  samples: bpf: refactor BPF map in map test with libbpf
  samples: bpf: refactor BPF map performance test with libbpf
  selftests: bpf: remove unused bpf_map_def_legacy struct

 samples/bpf/Makefile |   2 +-
 samples/bpf/map_perf_test_kern.c | 188 ---
 samples/bpf/map_perf_test_user.c | 164 +---
 samples/bpf/test_map_in_map_kern.c   |  94 ++--
 samples/bpf/test_map_in_map_user.c   |  53 ++-
 samples/bpf/test_probe_write_user_kern.c |   9 +-
 tools/testing/selftests/bpf/bpf_legacy.h |  14 --
 7 files changed, 305 insertions(+), 219 deletions(-)

-- 
2.25.1



[PATCH bpf-next v2 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-07 Thread Daniel T. Lee
Currently, BPF programs with kprobe/sys_connect does not work properly.

Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.

Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.

commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")

However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.

818d3520 <__x64_sys_connect>:
818d3520: e8 fb df 32 00callq   0x81c01520
<__fentry__>
818d3525: 48 8b 57 60   movq96(%rdi), %rdx
818d3529: 48 8b 77 68   movq104(%rdi), %rsi
818d352d: 48 8b 7f 70   movq112(%rdi), %rdi
818d3531: e8 1a ff ff ffcallq   0x818d3450
<__sys_connect>
818d3536: 48 98 cltq
818d3538: c3retq
818d3539: 0f 1f 80 00 00 00 00  nopl(%rax)

As the assembly code for __x64_sys_connect shows, parameters should be
fetched and set into rdi, rsi, rdx registers prior to calling
__sys_connect.

Because of this problem, this commit fixes the sys_connect event by
first getting the value of the rdi register and then the value of the
rdi, rsi, and rdx register through an offset based on that value.

Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee 

---
Changes in V2:
 - instead of changing event from __x64_sys_connect to __sys_connect,
 fetch and set register values directly

 samples/bpf/map_perf_test_kern.c | 9 ++---
 samples/bpf/test_map_in_map_kern.c   | 9 ++---
 samples/bpf/test_probe_write_user_kern.c | 9 ++---
 3 files changed, 18 insertions(+), 9 deletions(-)

diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 12e91ae64d4d..c9b31193ca12 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -11,6 +11,8 @@
 #include 
 #include "bpf_legacy.h"
 #include 
+#include 
+#include "trace_common.h"
 
 #define MAX_ENTRIES 1000
 #define MAX_NR_CPUS 1024
@@ -154,9 +156,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
return 0;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/" SYSCALL(sys_connect))
 int stress_lru_hmap_alloc(struct pt_regs *ctx)
 {
+   struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
union {
u16 dst6[8];
@@ -175,8 +178,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
long val = 1;
u32 key = 0;
 
-   in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
-   addrlen = (int)PT_REGS_PARM3(ctx);
+   in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
+   addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
 
if (addrlen != sizeof(*in6))
return 0;
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index 6cee61e8ce9b..36a203e69064 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -13,6 +13,8 @@
 #include 
 #include "bpf_legacy.h"
 #include 
+#include 
+#include "trace_common.h"
 
 #define MAX_NR_PORTS 65536
 
@@ -102,9 +104,10 @@ static __always_inline int do_inline_hash_lookup(void 
*inner_map, u32 port)
return result ? *result : -ENOENT;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/" SYSCALL(sys_connect))
 int trace_sys_connect(struct pt_regs *ctx)
 {
+   struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
struct sockaddr_in6 *in6;
u16 test_case, port, dst6[8];
int addrlen, ret, inline_ret, ret_key = 0;
@@ -112,8 +115,8 @@ int trace_sys_connect(struct pt_regs *ctx)
void *outer_map, *inner_map;
bool inline_hash = false;
 
-   in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
-   addrlen = (int)PT_REGS_PARM3(ctx);
+   in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
+   addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
 
if (addrlen != sizeof(*in6))
return 0;
diff --git a/samples/bpf/test_probe_write_user_kern.c 
b/samples/bpf/test_probe_write_user_kern.c
index f033f36a13a3..fd651a65281e 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -10,6 +10,8 @@
 #include 

Re: [PATCH bpf-next 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-06 Thread Daniel T. Lee
On Tue, Jul 7, 2020 at 2:15 PM Andrii Nakryiko
 wrote:
>
> On Mon, Jul 6, 2020 at 7:33 PM Daniel T. Lee  wrote:
> >
> > On Tue, Jul 7, 2020 at 8:50 AM Andrii Nakryiko
> >  wrote:
> > >
> > > On Mon, Jul 6, 2020 at 3:28 AM Daniel T. Lee  
> > > wrote:
> > > >
> > > > On Fri, Jul 3, 2020 at 1:04 AM Yonghong Song  wrote:
> > > > >
> > > > >
> > > > >
> > > > > On 7/2/20 4:13 AM, Daniel T. Lee wrote:
> > > > > > On Thu, Jul 2, 2020 at 2:13 PM Yonghong Song  wrote:
> > > > > >>
> > > > > >>
> > > > > >>
> > > > > >> On 7/1/20 7:16 PM, Daniel T. Lee wrote:
> > > > > >>> Currently, BPF programs with kprobe/sys_connect does not work 
> > > > > >>> properly.
> > > > > >>>
> > > > > >>> Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on 
> > > > > >>> x64")
> > > > > >>> This commit modifies the bpf_load behavior of kprobe events in 
> > > > > >>> the x64
> > > > > >>> architecture. If the current kprobe event target starts with 
> > > > > >>> "sys_*",
> > > > > >>> add the prefix "__x64_" to the front of the event.
> > > > > >>>
> > > > > >>> Appending "__x64_" prefix with kprobe/sys_* event was appropriate 
> > > > > >>> as a
> > > > > >>> solution to most of the problems caused by the commit below.
> > > > > >>>
> > > > > >>>   commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename 
> > > > > >>> struct
> > > > > >>>   pt_regs-based sys_*() to __x64_sys_*()")
> > > > > >>>
> > > > > >>> However, there is a problem with the sys_connect kprobe event 
> > > > > >>> that does
> > > > > >>> not work properly. For __sys_connect event, parameters can be 
> > > > > >>> fetched
> > > > > >>> normally, but for __x64_sys_connect, parameters cannot be fetched.
> > > > > >>>
> > > > > >>> Because of this problem, this commit fixes the sys_connect event 
> > > > > >>> by
> > > > > >>> specifying the __sys_connect directly and this will bypass the
> > > > > >>> "__x64_" appending rule of bpf_load.
> > > > > >>
> > > > > >> In the kernel code, we have
> > > > > >>
> > > > > >> SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, 
> > > > > >> uservaddr,
> > > > > >>   int, addrlen)
> > > > > >> {
> > > > > >>   return __sys_connect(fd, uservaddr, addrlen);
> > > > > >> }
> > > > > >>
> > > > > >> Depending on compiler, there is no guarantee that __sys_connect 
> > > > > >> will
> > > > > >> not be inlined. I would prefer to still use the entry point
> > > > > >> __x64_sys_* e.g.,
> > > > > >>  SEC("kprobe/" SYSCALL(sys_write))
> > > > > >>
> > > > > >
> > > > > > As you mentioned, there is clearly a possibility that problems may 
> > > > > > arise
> > > > > > because the symbol does not exist according to the compiler.
> > > > > >
> > > > > > However, in x64, when using Kprobe for __x64_sys_connect event, the
> > > > > > tests are not working properly because the parameters cannot be 
> > > > > > fetched,
> > > > > > and the test under selftests/bpf is using "kprobe/_sys_connect" 
> > > > > > directly.
> > > > >
> > > > > This is the assembly code for __x64_sys_connect.
> > > > >
> > > > > 818d3520 <__x64_sys_connect>:
> > > > > 818d3520: e8 fb df 32 00callq   0x81c01520
> > > > > <__fentry__>
> > > > > 818d3525: 48 8b 57 60   movq96(%rdi), %rdx
> > > > > 818d3529: 

Re: [PATCH bpf-next 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-06 Thread Daniel T. Lee
On Tue, Jul 7, 2020 at 8:50 AM Andrii Nakryiko
 wrote:
>
> On Mon, Jul 6, 2020 at 3:28 AM Daniel T. Lee  wrote:
> >
> > On Fri, Jul 3, 2020 at 1:04 AM Yonghong Song  wrote:
> > >
> > >
> > >
> > > On 7/2/20 4:13 AM, Daniel T. Lee wrote:
> > > > On Thu, Jul 2, 2020 at 2:13 PM Yonghong Song  wrote:
> > > >>
> > > >>
> > > >>
> > > >> On 7/1/20 7:16 PM, Daniel T. Lee wrote:
> > > >>> Currently, BPF programs with kprobe/sys_connect does not work 
> > > >>> properly.
> > > >>>
> > > >>> Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on 
> > > >>> x64")
> > > >>> This commit modifies the bpf_load behavior of kprobe events in the x64
> > > >>> architecture. If the current kprobe event target starts with "sys_*",
> > > >>> add the prefix "__x64_" to the front of the event.
> > > >>>
> > > >>> Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
> > > >>> solution to most of the problems caused by the commit below.
> > > >>>
> > > >>>   commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
> > > >>>   pt_regs-based sys_*() to __x64_sys_*()")
> > > >>>
> > > >>> However, there is a problem with the sys_connect kprobe event that 
> > > >>> does
> > > >>> not work properly. For __sys_connect event, parameters can be fetched
> > > >>> normally, but for __x64_sys_connect, parameters cannot be fetched.
> > > >>>
> > > >>> Because of this problem, this commit fixes the sys_connect event by
> > > >>> specifying the __sys_connect directly and this will bypass the
> > > >>> "__x64_" appending rule of bpf_load.
> > > >>
> > > >> In the kernel code, we have
> > > >>
> > > >> SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
> > > >>   int, addrlen)
> > > >> {
> > > >>   return __sys_connect(fd, uservaddr, addrlen);
> > > >> }
> > > >>
> > > >> Depending on compiler, there is no guarantee that __sys_connect will
> > > >> not be inlined. I would prefer to still use the entry point
> > > >> __x64_sys_* e.g.,
> > > >>  SEC("kprobe/" SYSCALL(sys_write))
> > > >>
> > > >
> > > > As you mentioned, there is clearly a possibility that problems may arise
> > > > because the symbol does not exist according to the compiler.
> > > >
> > > > However, in x64, when using Kprobe for __x64_sys_connect event, the
> > > > tests are not working properly because the parameters cannot be fetched,
> > > > and the test under selftests/bpf is using "kprobe/_sys_connect" 
> > > > directly.
> > >
> > > This is the assembly code for __x64_sys_connect.
> > >
> > > 818d3520 <__x64_sys_connect>:
> > > 818d3520: e8 fb df 32 00callq   0x81c01520
> > > <__fentry__>
> > > 818d3525: 48 8b 57 60   movq96(%rdi), %rdx
> > > 818d3529: 48 8b 77 68   movq104(%rdi), %rsi
> > > 818d352d: 48 8b 7f 70   movq112(%rdi), %rdi
> > > 818d3531: e8 1a ff ff ffcallq   0x818d3450
> > > <__sys_connect>
> > > 818d3536: 48 98 cltq
> > > 818d3538: c3retq
> > > 818d3539: 0f 1f 80 00 00 00 00  nopl(%rax)
> > >
> > > In bpf program, the step is:
> > >struct pt_regs *real_regs = PT_REGS_PARM1(pt_regs);
> > >param1 = PT_REGS_PARM1(real_regs);
> > >param2 = PT_REGS_PARM2(real_regs);
> > >param3 = PT_REGS_PARM3(real_regs);
> > > The same for s390.
> > >
> >
> > I'm sorry that I seem to get it wrong,
> > But is it available to access 'struct pt_regs *' recursively?
> >
> > It seems nested use of PT_REGS_PARM causes invalid memory access.
> >
> > $ sudo ./test_probe_write_user
> > libbpf: load bpf program failed: Permission denied
> > libbpf: -- BEGIN DUMP LOG ---
> > libbp

Re: [PATCH bpf-next 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-06 Thread Daniel T. Lee
On Fri, Jul 3, 2020 at 1:04 AM Yonghong Song  wrote:
>
>
>
> On 7/2/20 4:13 AM, Daniel T. Lee wrote:
> > On Thu, Jul 2, 2020 at 2:13 PM Yonghong Song  wrote:
> >>
> >>
> >>
> >> On 7/1/20 7:16 PM, Daniel T. Lee wrote:
> >>> Currently, BPF programs with kprobe/sys_connect does not work properly.
> >>>
> >>> Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
> >>> This commit modifies the bpf_load behavior of kprobe events in the x64
> >>> architecture. If the current kprobe event target starts with "sys_*",
> >>> add the prefix "__x64_" to the front of the event.
> >>>
> >>> Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
> >>> solution to most of the problems caused by the commit below.
> >>>
> >>>   commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
> >>>   pt_regs-based sys_*() to __x64_sys_*()")
> >>>
> >>> However, there is a problem with the sys_connect kprobe event that does
> >>> not work properly. For __sys_connect event, parameters can be fetched
> >>> normally, but for __x64_sys_connect, parameters cannot be fetched.
> >>>
> >>> Because of this problem, this commit fixes the sys_connect event by
> >>> specifying the __sys_connect directly and this will bypass the
> >>> "__x64_" appending rule of bpf_load.
> >>
> >> In the kernel code, we have
> >>
> >> SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
> >>   int, addrlen)
> >> {
> >>   return __sys_connect(fd, uservaddr, addrlen);
> >> }
> >>
> >> Depending on compiler, there is no guarantee that __sys_connect will
> >> not be inlined. I would prefer to still use the entry point
> >> __x64_sys_* e.g.,
> >>  SEC("kprobe/" SYSCALL(sys_write))
> >>
> >
> > As you mentioned, there is clearly a possibility that problems may arise
> > because the symbol does not exist according to the compiler.
> >
> > However, in x64, when using Kprobe for __x64_sys_connect event, the
> > tests are not working properly because the parameters cannot be fetched,
> > and the test under selftests/bpf is using "kprobe/_sys_connect" directly.
>
> This is the assembly code for __x64_sys_connect.
>
> 818d3520 <__x64_sys_connect>:
> 818d3520: e8 fb df 32 00callq   0x81c01520
> <__fentry__>
> 818d3525: 48 8b 57 60   movq96(%rdi), %rdx
> 818d3529: 48 8b 77 68   movq104(%rdi), %rsi
> 818d352d: 48 8b 7f 70   movq112(%rdi), %rdi
> 818d3531: e8 1a ff ff ffcallq   0x818d3450
> <__sys_connect>
> 818d3536: 48 98 cltq
> 818d3538: c3retq
> 818d3539: 0f 1f 80 00 00 00 00  nopl(%rax)
>
> In bpf program, the step is:
>struct pt_regs *real_regs = PT_REGS_PARM1(pt_regs);
>param1 = PT_REGS_PARM1(real_regs);
>param2 = PT_REGS_PARM2(real_regs);
>param3 = PT_REGS_PARM3(real_regs);
> The same for s390.
>

I'm sorry that I seem to get it wrong,
But is it available to access 'struct pt_regs *' recursively?

It seems nested use of PT_REGS_PARM causes invalid memory access.

$ sudo ./test_probe_write_user
libbpf: load bpf program failed: Permission denied
libbpf: -- BEGIN DUMP LOG ---
libbpf:
Unrecognized arg#0 type PTR
; struct pt_regs *real_regs = PT_REGS_PARM1(ctx);
0: (79) r1 = *(u64 *)(r1 +112)
; void *sockaddr_arg = (void *)PT_REGS_PARM2(real_regs);
1: (79) r6 = *(u64 *)(r1 +104)
R1 invalid mem access 'inv'
processed 2 insns (limit 100) max_states_per_insn 0
total_states 0 peak_states 0 mark_read 0

libbpf: -- END LOG --
libbpf: failed to load program 'kprobe/__x64_sys_connect'
libbpf: failed to load object './test_probe_write_user_kern.o'
ERROR: loading BPF object file failed

I'm not fully aware of the BPF verifier's internal structure.
Is there any workaround to solve this problem?

Thanks for your time and effort for the review.
Daniel.

>
> For other architectures, no above indirection is needed.
>
> I guess you can abstract the above into trace_common.h?
>
> >
> > I'm not sure how to deal with this problem. Any advice and suggestions
> > will be greatly appreciated.
> >
> > T

Re: [PATCH bpf-next 3/4] samples: bpf: refactor BPF map performance test with libbpf

2020-07-02 Thread Daniel T. Lee
On Thu, Jul 2, 2020 at 1:34 PM Andrii Nakryiko
 wrote:
>
> On Wed, Jul 1, 2020 at 7:17 PM Daniel T. Lee  wrote:
> >
> > Previously, in order to set the numa_node attribute at the time of map
> > creation using "libbpf", it was necessary to call bpf_create_map_node()
> > directly (bpf_load approach), instead of calling bpf_object_load()
> > that handles everything on its own, including map creation. And because
> > of this problem, this sample had problems with refactoring from bpf_load
> > to libbbpf.
> >
> > However, by commit 1bdb6c9a1c43 ("libbpf: Add a bunch of attribute
> > getters/setters for map definitions"), a helper function which allows
> > the numa_node attribute to be set in the map prior to calling
> > bpf_object_load() has been added.
> >
> > By using libbpf instead of bpf_load, the inner map definition has
> > been explicitly declared with BTF-defined format. And for this reason
> > some logic in fixup_map() was not needed and changed or removed.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile |   2 +-
> >  samples/bpf/map_perf_test_kern.c | 180 +++
> >  samples/bpf/map_perf_test_user.c | 130 +++---
> >  3 files changed, 181 insertions(+), 131 deletions(-)
> >
>
> [...]
>
> > +struct inner_lru {
> > +   __uint(type, BPF_MAP_TYPE_LRU_HASH);
> > +   __type(key, u32);
> > +   __type(value, long);
> > +   __uint(max_entries, MAX_ENTRIES);
> > +   __uint(map_flags, BPF_F_NUMA_NODE); /* from _user.c, set numa_node 
> > to 0 */
> > +} inner_lru_hash_map SEC(".maps");
>
> you can declaratively set numa_node here with __uint(numa_node, 0),
> which is actually a default, but for explicitness it's better
>

It would make _user.c code cleaner, but as you said,
I'll keep with this implementation.

> > +
> > +struct {
> > +   __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
> > +   __uint(max_entries, MAX_NR_CPUS);
> > +   __uint(key_size, sizeof(u32));
> > +   __array(values, struct inner_lru); /* use inner_lru as inner map */
> > +} array_of_lru_hashs SEC(".maps");
> > +
>
> [...]
>
> > -static void fixup_map(struct bpf_map_data *map, int idx)
> > +static void fixup_map(struct bpf_object *obj)
> >  {
> > +   struct bpf_map *map;
> > int i;
> >
> > -   if (!strcmp("inner_lru_hash_map", map->name)) {
> > -   inner_lru_hash_idx = idx;
> > -   inner_lru_hash_size = map->def.max_entries;
> > -   }
> > +   bpf_object__for_each_map(map, obj) {
> > +   const char *name = bpf_map__name(map);
> >
> > -   if (!strcmp("array_of_lru_hashs", map->name)) {
>
> I'm a bit too lazy right now to figure out exact logic here, but just
> wanted to mention that it is possible to statically set inner map
> elements for array_of_maps and hash_of_maps. Please check
> tools/testing/selftests/bpf/progs/test_btf_map_in_map.c and see if you
> can use this feature to simplify this logic a bit.
>

Thanks for the feedback! But I'm not sure I'm following properly.

If what you are talking about is specifying the inner_map_idx of
array_of_lru_hashes, I've changed it by using the __array() directives
of the BTF-defined MAP.

Since inner_map_idx logic has been replaced with BTF-defined map
definition, the only thing left at here fixup_map() is just resizing map size
with bpf_map__resize.

Thanks for your time and effort for the review.
Daniel

> > -   if (inner_lru_hash_idx == -1) {
> > -   printf("inner_lru_hash_map must be defined before 
> > array_of_lru_hashs\n");
> > -   exit(1);
> > +   /* Only change the max_entries for the enabled test(s) */
> > +   for (i = 0; i < NR_TESTS; i++) {
> > +   if (!strcmp(test_map_names[i], name) &&
> > +   (check_test_flags(i))) {
> > +   bpf_map__resize(map, num_map_entries);
> > +   continue;
> > +   }
> > }
> > -   map->def.inner_map_idx = inner_lru_hash_idx;
> > -   array_of_lru_hashs_idx = idx;
> > }
> >
>
> [...]


Re: [PATCH bpf-next 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-02 Thread Daniel T. Lee
On Thu, Jul 2, 2020 at 2:13 PM Yonghong Song  wrote:
>
>
>
> On 7/1/20 7:16 PM, Daniel T. Lee wrote:
> > Currently, BPF programs with kprobe/sys_connect does not work properly.
> >
> > Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
> > This commit modifies the bpf_load behavior of kprobe events in the x64
> > architecture. If the current kprobe event target starts with "sys_*",
> > add the prefix "__x64_" to the front of the event.
> >
> > Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
> > solution to most of the problems caused by the commit below.
> >
> >  commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
> >  pt_regs-based sys_*() to __x64_sys_*()")
> >
> > However, there is a problem with the sys_connect kprobe event that does
> > not work properly. For __sys_connect event, parameters can be fetched
> > normally, but for __x64_sys_connect, parameters cannot be fetched.
> >
> > Because of this problem, this commit fixes the sys_connect event by
> > specifying the __sys_connect directly and this will bypass the
> > "__x64_" appending rule of bpf_load.
>
> In the kernel code, we have
>
> SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
>  int, addrlen)
> {
>  return __sys_connect(fd, uservaddr, addrlen);
> }
>
> Depending on compiler, there is no guarantee that __sys_connect will
> not be inlined. I would prefer to still use the entry point
> __x64_sys_* e.g.,
> SEC("kprobe/" SYSCALL(sys_write))
>

As you mentioned, there is clearly a possibility that problems may arise
because the symbol does not exist according to the compiler.

However, in x64, when using Kprobe for __x64_sys_connect event, the
tests are not working properly because the parameters cannot be fetched,
and the test under selftests/bpf is using "kprobe/_sys_connect" directly.

I'm not sure how to deal with this problem. Any advice and suggestions
will be greatly appreciated.

Thanks for your time and effort for the review.
Daniel

> >
> > Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
> > Signed-off-by: Daniel T. Lee 
> > ---
> >   samples/bpf/map_perf_test_kern.c | 2 +-
> >   samples/bpf/test_map_in_map_kern.c   | 2 +-
> >   samples/bpf/test_probe_write_user_kern.c | 2 +-
> >   3 files changed, 3 insertions(+), 3 deletions(-)
> >
> > diff --git a/samples/bpf/map_perf_test_kern.c 
> > b/samples/bpf/map_perf_test_kern.c
> > index 12e91ae64d4d..cebe2098bb24 100644
> > --- a/samples/bpf/map_perf_test_kern.c
> > +++ b/samples/bpf/map_perf_test_kern.c
> > @@ -154,7 +154,7 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
> >   return 0;
> >   }
> >
> > -SEC("kprobe/sys_connect")
> > +SEC("kprobe/__sys_connect")
> >   int stress_lru_hmap_alloc(struct pt_regs *ctx)
> >   {
> >   char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
> > diff --git a/samples/bpf/test_map_in_map_kern.c 
> > b/samples/bpf/test_map_in_map_kern.c
> > index 6cee61e8ce9b..b1562ba2f025 100644
> > --- a/samples/bpf/test_map_in_map_kern.c
> > +++ b/samples/bpf/test_map_in_map_kern.c
> > @@ -102,7 +102,7 @@ static __always_inline int do_inline_hash_lookup(void 
> > *inner_map, u32 port)
> >   return result ? *result : -ENOENT;
> >   }
> >
> > -SEC("kprobe/sys_connect")
> > +SEC("kprobe/__sys_connect")
> >   int trace_sys_connect(struct pt_regs *ctx)
> >   {
> >   struct sockaddr_in6 *in6;
> > diff --git a/samples/bpf/test_probe_write_user_kern.c 
> > b/samples/bpf/test_probe_write_user_kern.c
> > index 6579639a83b2..9b3c3918c37d 100644
> > --- a/samples/bpf/test_probe_write_user_kern.c
> > +++ b/samples/bpf/test_probe_write_user_kern.c
> > @@ -26,7 +26,7 @@ struct {
> >* This example sits on a syscall, and the syscall ABI is relatively 
> > stable
> >* of course, across platforms, and over time, the ABI may change.
> >*/
> > -SEC("kprobe/sys_connect")
> > +SEC("kprobe/__sys_connect")
> >   int bpf_prog1(struct pt_regs *ctx)
> >   {
> >   struct sockaddr_in new_addr, orig_addr = {};
> >


Re: [PATCH bpf-next 2/4] samples: bpf: refactor BPF map in map test with libbpf

2020-07-02 Thread Daniel T. Lee
On Thu, Jul 2, 2020 at 1:26 PM Andrii Nakryiko
 wrote:
>
> On Wed, Jul 1, 2020 at 7:17 PM Daniel T. Lee  wrote:
> >
> > From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
> > support"), a way to define internal map in BTF-defined map has been
> > added.
> >
> > Instead of using previous 'inner_map_idx' definition, the structure to
> > be used for the inner map can be directly defined using array directive.
> >
> > __array(values, struct inner_map)
> >
> > This commit refactors map in map test program with libbpf by explicitly
> > defining inner map with BTF-defined format.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >  samples/bpf/Makefile   |  2 +-
> >  samples/bpf/test_map_in_map_kern.c | 85 +++---
> >  samples/bpf/test_map_in_map_user.c | 53 +--
> >  3 files changed, 91 insertions(+), 49 deletions(-)
> >
>
> [...]
>
> > -   if (load_bpf_file(filename)) {
> > -   printf("%s", bpf_log_buf);
> > -   return 1;
> > +   prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
> > +   if (libbpf_get_error(prog)) {
>
> still wrong, just `if (!prog)`
>

Oops, my bad.
Will fix right away.

Thanks for your time and effort for the review.
Daniel.

> > +   printf("finding a prog in obj file failed\n");
> > +   goto cleanup;
> > +   }
> > +
>
> [...]


[PATCH bpf-next 2/4] samples: bpf: refactor BPF map in map test with libbpf

2020-07-01 Thread Daniel T. Lee
>From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
support"), a way to define internal map in BTF-defined map has been
added.

Instead of using previous 'inner_map_idx' definition, the structure to
be used for the inner map can be directly defined using array directive.

__array(values, struct inner_map)

This commit refactors map in map test program with libbpf by explicitly
defining inner map with BTF-defined format.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   |  2 +-
 samples/bpf/test_map_in_map_kern.c | 85 +++---
 samples/bpf/test_map_in_map_user.c | 53 +--
 3 files changed, 91 insertions(+), 49 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index ffd0fda536da..78678d4e6842 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
 tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
 lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
-test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
+test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index b1562ba2f025..d3f56ed78541 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -11,66 +11,65 @@
 #include 
 #include 
 #include 
-#include "bpf_legacy.h"
 #include 
 
 #define MAX_NR_PORTS 65536
 
 /* map #0 */
-struct bpf_map_def_legacy SEC("maps") port_a = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = MAX_NR_PORTS,
-};
+struct inner_a {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, MAX_NR_PORTS);
+} port_a SEC(".maps");
 
 /* map #1 */
-struct bpf_map_def_legacy SEC("maps") port_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct inner_h {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} port_h SEC(".maps");
 
 /* map #2 */
-struct bpf_map_def_legacy SEC("maps") reg_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} reg_result_h SEC(".maps");
 
 /* map #3 */
-struct bpf_map_def_legacy SEC("maps") inline_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} inline_result_h SEC(".maps");
 
 /* map #4 */ /* Test case #0 */
-struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
-   .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = MAX_NR_PORTS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+   __uint(max_entries, MAX_NR_PORTS);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} a_of_port_a SEC(".maps");
 
 /* map #5 */ /* Test case #1 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} h_of_port_a SEC(".maps");
 
 /* map #6 */ /* Test case #2 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 1, /* map_fd[1] is port_h */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_h); /* use inner_h as inner map */
+} h_of_port_h SEC(".maps");
 
 static __always_inline int do_reg_lookup(void *inner_map, u32 port)
 {
diff --git a/samples/bpf/test_map_in_map_user.c 
b/samples/bpf/test_map_in_map_user.c
index eb2

[PATCH bpf-next 4/4] selftests: bpf: remove unused bpf_map_def_legacy struct

2020-07-01 Thread Daniel T. Lee
samples/bpf no longer use bpf_map_def_legacy and instead use the
libbpf's bpf_map_def or new BTF-defined MAP format. This commit removes
unused bpf_map_def_legacy struct from selftests/bpf/bpf_legacy.h.

Signed-off-by: Daniel T. Lee 
---
 tools/testing/selftests/bpf/bpf_legacy.h | 14 --
 1 file changed, 14 deletions(-)

diff --git a/tools/testing/selftests/bpf/bpf_legacy.h 
b/tools/testing/selftests/bpf/bpf_legacy.h
index 6f8988738bc1..719ab56cdb5d 100644
--- a/tools/testing/selftests/bpf/bpf_legacy.h
+++ b/tools/testing/selftests/bpf/bpf_legacy.h
@@ -2,20 +2,6 @@
 #ifndef __BPF_LEGACY__
 #define __BPF_LEGACY__
 
-/*
- * legacy bpf_map_def with extra fields supported only by bpf_load(), do not
- * use outside of samples/bpf
- */
-struct bpf_map_def_legacy {
-   unsigned int type;
-   unsigned int key_size;
-   unsigned int value_size;
-   unsigned int max_entries;
-   unsigned int map_flags;
-   unsigned int inner_map_idx;
-   unsigned int numa_node;
-};
-
 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct btf_map_##name { \
type_key key;   \
-- 
2.25.1



[PATCH bpf-next 0/4] samples: bpf: refactor BPF map test with libbpf

2020-07-01 Thread Daniel T. Lee
There have been many changes in how the current bpf program defines
map. The development of libbbpf has led to the new method called 
BTF-defined map, which is a new way of defining BPF maps, and thus has
a lot of differences from the existing MAP definition method.

Although bpf_load was also internally using libbbpf, fragmentation in 
its implementation began to occur, such as using its own structure, 
bpf_load_map_def, to define the map.

Therefore, in this patch set, map test programs, which are closely
related to changes in the definition method of BPF map, were refactored
with libbbpf.

Daniel T. Lee (4):
  samples: bpf: fix bpf programs with kprobe/sys_connect event
  samples: bpf: refactor BPF map in map test with libbpf
  samples: bpf: refactor BPF map performance test with libbpf
  selftests: bpf: remove unused bpf_map_def_legacy struct

 samples/bpf/Makefile |   4 +-
 samples/bpf/map_perf_test_kern.c | 182 +++
 samples/bpf/map_perf_test_user.c | 130 +++-
 samples/bpf/test_map_in_map_kern.c   |  87 ++-
 samples/bpf/test_map_in_map_user.c   |  53 ++-
 samples/bpf/test_probe_write_user_kern.c |   2 +-
 tools/testing/selftests/bpf/bpf_legacy.h |  14 --
 7 files changed, 275 insertions(+), 197 deletions(-)

-- 
2.25.1



[PATCH bpf-next 1/4] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-07-01 Thread Daniel T. Lee
Currently, BPF programs with kprobe/sys_connect does not work properly.

Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.

Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.

commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")

However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.

Because of this problem, this commit fixes the sys_connect event by
specifying the __sys_connect directly and this will bypass the
"__x64_" appending rule of bpf_load.

Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/map_perf_test_kern.c | 2 +-
 samples/bpf/test_map_in_map_kern.c   | 2 +-
 samples/bpf/test_probe_write_user_kern.c | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 12e91ae64d4d..cebe2098bb24 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -154,7 +154,7 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
return 0;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int stress_lru_hmap_alloc(struct pt_regs *ctx)
 {
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index 6cee61e8ce9b..b1562ba2f025 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -102,7 +102,7 @@ static __always_inline int do_inline_hash_lookup(void 
*inner_map, u32 port)
return result ? *result : -ENOENT;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int trace_sys_connect(struct pt_regs *ctx)
 {
struct sockaddr_in6 *in6;
diff --git a/samples/bpf/test_probe_write_user_kern.c 
b/samples/bpf/test_probe_write_user_kern.c
index 6579639a83b2..9b3c3918c37d 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -26,7 +26,7 @@ struct {
  * This example sits on a syscall, and the syscall ABI is relatively stable
  * of course, across platforms, and over time, the ABI may change.
  */
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int bpf_prog1(struct pt_regs *ctx)
 {
struct sockaddr_in new_addr, orig_addr = {};
-- 
2.25.1



[PATCH bpf-next 3/4] samples: bpf: refactor BPF map performance test with libbpf

2020-07-01 Thread Daniel T. Lee
Previously, in order to set the numa_node attribute at the time of map
creation using "libbpf", it was necessary to call bpf_create_map_node()
directly (bpf_load approach), instead of calling bpf_object_load()
that handles everything on its own, including map creation. And because
of this problem, this sample had problems with refactoring from bpf_load
to libbbpf.

However, by commit 1bdb6c9a1c43 ("libbpf: Add a bunch of attribute
getters/setters for map definitions"), a helper function which allows
the numa_node attribute to be set in the map prior to calling
bpf_object_load() has been added.

By using libbpf instead of bpf_load, the inner map definition has
been explicitly declared with BTF-defined format. And for this reason
some logic in fixup_map() was not needed and changed or removed.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile |   2 +-
 samples/bpf/map_perf_test_kern.c | 180 +++
 samples/bpf/map_perf_test_user.c | 130 +++---
 3 files changed, 181 insertions(+), 131 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 78678d4e6842..0cc7f18370c6 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -76,7 +76,7 @@ trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
 lathist-objs := lathist_user.o
 offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
 spintest-objs := spintest_user.o $(TRACE_HELPERS)
-map_perf_test-objs := bpf_load.o map_perf_test_user.o
+map_perf_test-objs := map_perf_test_user.o
 test_overhead-objs := bpf_load.o test_overhead_user.o
 test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
 test_cgrp2_attach-objs := test_cgrp2_attach.o
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index cebe2098bb24..13ca14e34f66 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -9,95 +9,95 @@
 #include 
 #include 
 #include 
-#include "bpf_legacy.h"
 #include 
+#include "trace_common.h"
 
 #define MAX_ENTRIES 1000
 #define MAX_NR_CPUS 1024
 
-struct bpf_map_def_legacy SEC("maps") hash_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = 1,
-};
-
-struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = 1,
-   .map_flags = BPF_F_NO_COMMON_LRU,
-};
-
-struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NUMA_NODE,
-   .numa_node = 0,
-};
-
-struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
-   .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-   .key_size = sizeof(u32),
-   .max_entries = MAX_NR_CPUS,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
-   .type = BPF_MAP_TYPE_PERCPU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
-   .type = BPF_MAP_TYPE_PERCPU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-   .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
-   .type = BPF_MAP_TYPE_LPM_TRIE,
-   .key_size = 8,
-   .value_size = sizeof(long),
-   .max_entries = 1,
-   .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") array_map = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
-   .type = BPF_MAP_TYPE_LRU_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(long),
-   .max_entries = MAX_ENTRIES,
-};
-
-SEC("kprobe/sys_getuid")
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, long);
+   __uint(max_entries, MAX_ENTRIES);
+} hash_map SEC(".maps");
+
+struct {
+   __uint(type, BPF_MAP_TYPE_LRU_HASH);
+   __type(key, u32);
+

Re: [PATCH 3/3] samples: bpf: refactor BPF map in map test with libbpf

2020-06-26 Thread Daniel T. Lee
On Sat, Jun 27, 2020 at 7:19 AM Andrii Nakryiko
 wrote:
>
> On Fri, Jun 26, 2020 at 3:14 PM Daniel T. Lee  wrote:
> >
> > On Sat, Jun 27, 2020 at 5:30 AM Andrii Nakryiko
> >  wrote:
> > >
> > > On Fri, Jun 26, 2020 at 1:18 AM Daniel T. Lee  
> > > wrote:
> > > >
> > > > From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
> > > > support"), a way to define internal map in BTF-defined map has been
> > > > added.
> > > >
> > > > Instead of using previous 'inner_map_idx' definition, the structure to
> > > > be used for the inner map can be directly defined using array directive.
> > > >
> > > > __array(values, struct inner_map)
> > > >
> > > > This commit refactors map in map test program with libbpf by explicitly
> > > > defining inner map with BTF-defined format.
> > > >
> > > > Signed-off-by: Daniel T. Lee 
> > > > ---
> > >
> > > Thanks for the clean up, looks good except that prog NULL check.
> > >
> >
> > I'll fix this NULL check as well too.
> >
> > > It also seems like this is the last use of bpf_map_def_legacy, do you
> > > mind removing it as well?
> > >
> >
> > Actually, there is one more place that uses bpf_map_def_legacy.
> > map_perf_test_kern.c is the one, and I'm currently working on it, but
> > I'm having difficulty with refactoring this file at the moment.
> >
> > It has a hash_map map definition named inner_lru_hash_map with
> > BPF_F_NUMA_NODE flag and '.numa_node = 0'.
> >
> > The bpf_map_def in libbpf has the attribute name map_flags but
> > it does not have the numa_node attribute. Because the numa node
>
> It does since 1 or 2 days ago ([0])
>
>   [0] 
> https://patchwork.ozlabs.org/project/netdev/patch/20200621062112.3006313-1-andr...@fb.com/
>
>
> > for bpf_map_def cannot be explicitly specified, this means that there
> > is no way to set the numa node where the map will be placed at the
> > time of bpf_object__load.
> >
> > The only approach currently available is not to use libbbpf to handle
> > everything (bpf_object_load), but instead to create a map directly with
> > specifying numa node (bpf_load approach).
> >
> > bpf_create_map_in_map_node
> > bpf_create_map_node
> >
> > I'm trying to stick with the libbpf implementation only, and I'm wondering
> > If I have to create bpf maps manually at _user.c program.
> >
> > Any advice and suggestions will be greatly appreciated.
> >
>
> It should be super straightforward now with a BTF-defined map
> supporting numa_node attribute.
>

Awesome, thanks for letting me know!

I will use this new attribute for the map_perf_test refactoring.
Problem Solved!

Thanks.

> > Thanks for your time and effort for the review.
> > Daniel.
> >
> > >
> > > >  samples/bpf/Makefile   |  2 +-
> > > >  samples/bpf/test_map_in_map_kern.c | 85 +++---
> > > >  samples/bpf/test_map_in_map_user.c | 53 +--
> > > >  3 files changed, 91 insertions(+), 49 deletions(-)
> > > >
> > >
> > > [...]
> > >
> > > >
> > > > snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
> > > > +   obj = bpf_object__open_file(filename, NULL);
> > > > +   if (libbpf_get_error(obj)) {
> > >
> > > this is right, but...
> > >
> > > > +   fprintf(stderr, "ERROR: opening BPF object file 
> > > > failed\n");
> > > > +   return 0;
> > > > +   }
> > > >
> > > > -   if (load_bpf_file(filename)) {
> > > > -   printf("%s", bpf_log_buf);
> > > > -   return 1;
> > > > +   prog = bpf_object__find_program_by_name(obj, 
> > > > "trace_sys_connect");
> > > > +   if (libbpf_get_error(prog)) {
> > >
> > > this is wrong. Just NULL check. libbpf APIs are not very consistent
> > > with what they return, unfortunately.
> > >
> > > > +   printf("finding a prog in obj file failed\n");
> > > > +   goto cleanup;
> > > > +   }
> > > > +
> > >
> > > [...]


Re: [PATCH 3/3] samples: bpf: refactor BPF map in map test with libbpf

2020-06-26 Thread Daniel T. Lee
On Sat, Jun 27, 2020 at 5:30 AM Andrii Nakryiko
 wrote:
>
> On Fri, Jun 26, 2020 at 1:18 AM Daniel T. Lee  wrote:
> >
> > From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
> > support"), a way to define internal map in BTF-defined map has been
> > added.
> >
> > Instead of using previous 'inner_map_idx' definition, the structure to
> > be used for the inner map can be directly defined using array directive.
> >
> > __array(values, struct inner_map)
> >
> > This commit refactors map in map test program with libbpf by explicitly
> > defining inner map with BTF-defined format.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
>
> Thanks for the clean up, looks good except that prog NULL check.
>

I'll fix this NULL check as well too.

> It also seems like this is the last use of bpf_map_def_legacy, do you
> mind removing it as well?
>

Actually, there is one more place that uses bpf_map_def_legacy.
map_perf_test_kern.c is the one, and I'm currently working on it, but
I'm having difficulty with refactoring this file at the moment.

It has a hash_map map definition named inner_lru_hash_map with
BPF_F_NUMA_NODE flag and '.numa_node = 0'.

The bpf_map_def in libbpf has the attribute name map_flags but
it does not have the numa_node attribute. Because the numa node
for bpf_map_def cannot be explicitly specified, this means that there
is no way to set the numa node where the map will be placed at the
time of bpf_object__load.

The only approach currently available is not to use libbbpf to handle
everything (bpf_object_load), but instead to create a map directly with
specifying numa node (bpf_load approach).

bpf_create_map_in_map_node
bpf_create_map_node

I'm trying to stick with the libbpf implementation only, and I'm wondering
If I have to create bpf maps manually at _user.c program.

Any advice and suggestions will be greatly appreciated.

Thanks for your time and effort for the review.
Daniel.

>
> >  samples/bpf/Makefile   |  2 +-
> >  samples/bpf/test_map_in_map_kern.c | 85 +++---
> >  samples/bpf/test_map_in_map_user.c | 53 +--
> >  3 files changed, 91 insertions(+), 49 deletions(-)
> >
>
> [...]
>
> >
> > snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
> > +   obj = bpf_object__open_file(filename, NULL);
> > +   if (libbpf_get_error(obj)) {
>
> this is right, but...
>
> > +   fprintf(stderr, "ERROR: opening BPF object file failed\n");
> > +   return 0;
> > +   }
> >
> > -   if (load_bpf_file(filename)) {
> > -   printf("%s", bpf_log_buf);
> > -   return 1;
> > +   prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
> > +   if (libbpf_get_error(prog)) {
>
> this is wrong. Just NULL check. libbpf APIs are not very consistent
> with what they return, unfortunately.
>
> > +   printf("finding a prog in obj file failed\n");
> > +   goto cleanup;
> > +   }
> > +
>
> [...]


Re: [PATCH 2/3] samples: bpf: cleanup pointer error check with libbpf

2020-06-26 Thread Daniel T. Lee
2020년 6월 27일 (토) 05:25, Andrii Nakryiko 님이 작성:
>
> On Fri, Jun 26, 2020 at 1:18 AM Daniel T. Lee  wrote:
> >
> > Libbpf has its own helper function to check for errors in the bpf
> > data structure (pointer). And Some codes do not use this libbbpf
> > helper function and check the pointer's error directly.
> >
> > This commit clean up the existing pointer error check logic with
> > libbpf.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
>
> This entire patch is wrong. bpf_object__find_program_by_name() returns
> NULL if the program is not found, not an error code.
>

Oops, I'll drop the patch and resend with the next version.

Thanks for your time and effort for the review.
Daniel

> >  samples/bpf/sampleip_user.c| 2 +-
> >  samples/bpf/trace_event_user.c | 2 +-
> >  samples/bpf/tracex1_user.c | 2 +-
> >  samples/bpf/tracex5_user.c | 2 +-
> >  samples/bpf/tracex7_user.c | 2 +-
> >  5 files changed, 5 insertions(+), 5 deletions(-)
> >
>
> [...]


[PATCH 2/3] samples: bpf: cleanup pointer error check with libbpf

2020-06-26 Thread Daniel T. Lee
Libbpf has its own helper function to check for errors in the bpf
data structure (pointer). And Some codes do not use this libbbpf
helper function and check the pointer's error directly.

This commit clean up the existing pointer error check logic with
libbpf.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sampleip_user.c| 2 +-
 samples/bpf/trace_event_user.c | 2 +-
 samples/bpf/tracex1_user.c | 2 +-
 samples/bpf/tracex5_user.c | 2 +-
 samples/bpf/tracex7_user.c | 2 +-
 5 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index 921c505bb567..554dfa1cb34d 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -186,7 +186,7 @@ int main(int argc, char **argv)
}
 
prog = bpf_object__find_program_by_name(obj, "do_sample");
-   if (!prog) {
+   if (libbpf_get_error(prog)) {
fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
goto cleanup;
}
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index ac1ba368195c..8bcb9b4bfbe6 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -318,7 +318,7 @@ int main(int argc, char **argv)
}
 
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
-   if (!prog) {
+   if (libbpf_get_error(prog)) {
printf("finding a prog in obj file failed\n");
goto cleanup;
}
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
index 9d4adb7fd834..d3e01807dcd0 100644
--- a/samples/bpf/tracex1_user.c
+++ b/samples/bpf/tracex1_user.c
@@ -20,7 +20,7 @@ int main(int ac, char **argv)
}
 
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
-   if (!prog) {
+   if (libbpf_get_error(prog)) {
fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
goto cleanup;
}
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 98dad57a96c4..65c753b30121 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -53,7 +53,7 @@ int main(int ac, char **argv)
}
 
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
-   if (!prog) {
+   if (libbpf_get_error(prog)) {
printf("finding a prog in obj file failed\n");
goto cleanup;
}
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
index fdcd6580dd73..10c896732139 100644
--- a/samples/bpf/tracex7_user.c
+++ b/samples/bpf/tracex7_user.c
@@ -22,7 +22,7 @@ int main(int argc, char **argv)
}
 
prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
-   if (!prog) {
+   if (libbpf_get_error(prog)) {
fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
goto cleanup;
}
-- 
2.25.1



[PATCH 3/3] samples: bpf: refactor BPF map in map test with libbpf

2020-06-26 Thread Daniel T. Lee
>From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map
support"), a way to define internal map in BTF-defined map has been
added.

Instead of using previous 'inner_map_idx' definition, the structure to
be used for the inner map can be directly defined using array directive.

__array(values, struct inner_map)

This commit refactors map in map test program with libbpf by explicitly
defining inner map with BTF-defined format.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   |  2 +-
 samples/bpf/test_map_in_map_kern.c | 85 +++---
 samples/bpf/test_map_in_map_user.c | 53 +--
 3 files changed, 91 insertions(+), 49 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index ffd0fda536da..78678d4e6842 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
 tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
 lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
-test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
+test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index b1562ba2f025..d3f56ed78541 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -11,66 +11,65 @@
 #include 
 #include 
 #include 
-#include "bpf_legacy.h"
 #include 
 
 #define MAX_NR_PORTS 65536
 
 /* map #0 */
-struct bpf_map_def_legacy SEC("maps") port_a = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = MAX_NR_PORTS,
-};
+struct inner_a {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, MAX_NR_PORTS);
+} port_a SEC(".maps");
 
 /* map #1 */
-struct bpf_map_def_legacy SEC("maps") port_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct inner_h {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} port_h SEC(".maps");
 
 /* map #2 */
-struct bpf_map_def_legacy SEC("maps") reg_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} reg_result_h SEC(".maps");
 
 /* map #3 */
-struct bpf_map_def_legacy SEC("maps") inline_result_h = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(int),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u32);
+   __type(value, int);
+   __uint(max_entries, 1);
+} inline_result_h SEC(".maps");
 
 /* map #4 */ /* Test case #0 */
-struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
-   .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = MAX_NR_PORTS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+   __uint(max_entries, MAX_NR_PORTS);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} a_of_port_a SEC(".maps");
 
 /* map #5 */ /* Test case #1 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 0, /* map_fd[0] is port_a */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_a); /* use inner_a as inner map */
+} h_of_port_a SEC(".maps");
 
 /* map #6 */ /* Test case #2 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
-   .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-   .key_size = sizeof(u32),
-   .inner_map_idx = 1, /* map_fd[1] is port_h */
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+   __uint(max_entries, 1);
+   __uint(key_size, sizeof(u32));
+   __array(values, struct inner_h); /* use inner_h as inner map */
+} h_of_port_h SEC(".maps");
 
 static __always_inline int do_reg_lookup(void *inner_map, u32 port)
 {
diff --git a/samples/bpf/test_map_in_map_user.c 
b/samples/bpf/test_map_in_map_user.c
index eb2

[PATCH 1/3] samples: bpf: fix bpf programs with kprobe/sys_connect event

2020-06-26 Thread Daniel T. Lee
Currently, BPF programs with kprobe/sys_connect does not work properly.

Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.

Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.

commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")

However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.

Because of this problem, this commit fixes the sys_connect event by
specifying the __sys_connect directly and this will bypass the
"__x64_" appending rule of bpf_load.

Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee 
---
 samples/bpf/map_perf_test_kern.c | 2 +-
 samples/bpf/test_map_in_map_kern.c   | 2 +-
 samples/bpf/test_probe_write_user_kern.c | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 1bb1fdb9cdf8..69ecd717d998 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -159,7 +159,7 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
return 0;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int stress_lru_hmap_alloc(struct pt_regs *ctx)
 {
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
diff --git a/samples/bpf/test_map_in_map_kern.c 
b/samples/bpf/test_map_in_map_kern.c
index 6cee61e8ce9b..b1562ba2f025 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -102,7 +102,7 @@ static __always_inline int do_inline_hash_lookup(void 
*inner_map, u32 port)
return result ? *result : -ENOENT;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int trace_sys_connect(struct pt_regs *ctx)
 {
struct sockaddr_in6 *in6;
diff --git a/samples/bpf/test_probe_write_user_kern.c 
b/samples/bpf/test_probe_write_user_kern.c
index 6579639a83b2..9b3c3918c37d 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -26,7 +26,7 @@ struct {
  * This example sits on a syscall, and the syscall ABI is relatively stable
  * of course, across platforms, and over time, the ABI may change.
  */
-SEC("kprobe/sys_connect")
+SEC("kprobe/__sys_connect")
 int bpf_prog1(struct pt_regs *ctx)
 {
struct sockaddr_in new_addr, orig_addr = {};
-- 
2.25.1



[PATCH bpf-next v2 3/5] samples: bpf: refactor tail call user progs with libbpf

2020-05-15 Thread Daniel T. Lee
BPF tail call uses the BPF_MAP_TYPE_PROG_ARRAY type map for calling
into other BPF programs and this PROG_ARRAY should be filled prior to
use. Currently, samples with the PROG_ARRAY type MAP fill this program
array with bpf_load. For bpf_load to fill this map, kernel BPF program
must specify the section with specific format of /
(e.g. SEC("socket/0"))

But by using libbpf instead of bpf_load, user program can specify which
programs should be added to PROG_ARRAY. The advantage of this approach
is that you can selectively add only the programs you want, rather than
adding all of them to PROG_ARRAY, and it's much more intuitive than the
traditional approach.

This commit refactors user programs with the PROG_ARRAY type MAP with
libbpf instead of using bpf_load.

Signed-off-by: Daniel T. Lee 

---
Changes in V2:
 - refactor pointer error check with libbpf_get_error
 - on bpf object open failure, return instead jump to cleanup

 samples/bpf/Makefile   |  4 +--
 samples/bpf/sockex3_user.c | 64 
 samples/bpf/tracex5_user.c | 66 +-
 3 files changed, 103 insertions(+), 31 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4c91e5914329..8403e4762306 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -63,12 +63,12 @@ TRACE_HELPERS := 
../../tools/testing/selftests/bpf/trace_helpers.o
 fds_example-objs := fds_example.o
 sockex1-objs := sockex1_user.o
 sockex2-objs := sockex2_user.o
-sockex3-objs := bpf_load.o sockex3_user.o
+sockex3-objs := sockex3_user.o
 tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
 tracex2-objs := tracex2_user.o
 tracex3-objs := tracex3_user.o
 tracex4-objs := tracex4_user.o
-tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
+tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index bbb1cd0666a9..4dbee7427d47 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -1,18 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
 #include 
-#include 
 #include 
-#include "bpf_load.h"
+#include 
 #include "sock_example.h"
 #include 
 #include 
 #include 
 
-#define PARSE_IP 3
-#define PARSE_IP_PROG_FD (prog_fd[0])
-#define PROG_ARRAY_FD (map_fd[0])
-
 struct flow_key_record {
__be32 src;
__be32 dst;
@@ -30,31 +25,55 @@ struct pair {
 
 int main(int argc, char **argv)
 {
+   int i, sock, key, fd, main_prog_fd, jmp_table_fd, hash_map_fd;
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
+   const char *title;
FILE *f;
-   int i, sock, err, id, key = PARSE_IP;
-   struct bpf_prog_info info = {};
-   uint32_t info_len = sizeof(info);
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
setrlimit(RLIMIT_MEMLOCK, &r);
 
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return 0;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   goto cleanup;
+   }
+
+   jmp_table_fd = bpf_object__find_map_fd_by_name(obj, "jmp_table");
+   hash_map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map");
+   if (jmp_table_fd < 0 || hash_map_fd < 0) {
+   fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+   goto cleanup;
}
 
-   /* Test fd array lookup which returns the id of the bpf_prog */
-   err = bpf_obj_get_info_by_fd(PARSE_IP_PROG_FD, &info, &info_len);
-   assert(!err);
-   err = bpf_map_lookup_elem(PROG_ARRAY_FD, &key, &id);
-   assert(!err);
-   assert(id == info.id);
+   bpf_object__for_each_program(prog, obj) {
+   fd = bpf_program__fd(prog);
+
+   title = bpf_program__title(prog, false);
+   if (sscanf(title, "socket/%d", &key) != 1) {
+   fprintf(stderr, "ERROR: finding prog failed\n");
+   goto cleanup;
+   }
+
+   if (key == 0)
+   main_prog_fd = fd;
+   else
+   bpf_map_update_elem(jmp_table_fd, &key, &fd, BPF_ANY);
+   }
 
sock = open_raw_sock("lo");
 
-   assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd[4],

[PATCH bpf-next v2 4/5] samples: bpf: add tracex7 test file to .gitignore

2020-05-15 Thread Daniel T. Lee
This commit adds tracex7 test file (testfile.img) to .gitignore which
comes from test_override_return.sh.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/.gitignore | 1 +
 1 file changed, 1 insertion(+)

diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 23837f2ed458..034800c4d1e6 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -50,3 +50,4 @@ xdp_rxq_info
 xdp_sample_pkts
 xdp_tx_iptunnel
 xdpsock
+testfile.img
-- 
2.25.1



[PATCH bpf-next v2 5/5] samples: bpf: refactor kprobe, tail call kern progs map definition

2020-05-15 Thread Daniel T. Lee
Because the previous two commit replaced the bpf_load implementation of
the user program with libbpf, the corresponding kernel program's MAP
definition can be replaced with new BTF-defined map syntax.

This commit only updates the samples which uses libbpf API for loading
bpf program not with bpf_load.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sampleip_kern.c| 12 +--
 samples/bpf/sockex3_kern.c | 36 
 samples/bpf/trace_event_kern.c | 24 ++---
 samples/bpf/tracex2_kern.c | 24 ++---
 samples/bpf/tracex3_kern.c | 24 ++---
 samples/bpf/tracex4_kern.c | 12 +--
 samples/bpf/tracex5_kern.c | 14 ++---
 samples/bpf/tracex6_kern.c | 38 ++
 8 files changed, 93 insertions(+), 91 deletions(-)

diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index e504dc308371..f24806ac24e7 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -13,12 +13,12 @@
 
 #define MAX_IPS8192
 
-struct bpf_map_def SEC("maps") ip_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u64),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_IPS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u64);
+   __type(value, u32);
+   __uint(max_entries, MAX_IPS);
+} ip_map SEC(".maps");
 
 SEC("perf_event")
 int do_sample(struct bpf_perf_event_data *ctx)
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index 779a5249c418..cab9cca0b8eb 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -19,12 +19,12 @@
 
 #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
 
-struct bpf_map_def SEC("maps") jmp_table = {
-   .type = BPF_MAP_TYPE_PROG_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u32),
-   .max_entries = 8,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+   __uint(key_size, sizeof(u32));
+   __uint(value_size, sizeof(u32));
+   __uint(max_entries, 8);
+} jmp_table SEC(".maps");
 
 #define PARSE_VLAN 1
 #define PARSE_MPLS 2
@@ -92,12 +92,12 @@ struct globals {
struct flow_key_record flow;
 };
 
-struct bpf_map_def SEC("maps") percpu_map = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(__u32),
-   .value_size = sizeof(struct globals),
-   .max_entries = 32,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, __u32);
+   __type(value, struct globals);
+   __uint(max_entries, 32);
+} percpu_map SEC(".maps");
 
 /* user poor man's per_cpu until native support is ready */
 static struct globals *this_cpu_globals(void)
@@ -113,12 +113,12 @@ struct pair {
__u64 bytes;
 };
 
-struct bpf_map_def SEC("maps") hash_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(struct flow_key_record),
-   .value_size = sizeof(struct pair),
-   .max_entries = 1024,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, struct flow_key_record);
+   __type(value, struct pair);
+   __uint(max_entries, 1024);
+} hash_map SEC(".maps");
 
 static void update_stats(struct __sk_buff *skb, struct globals *g)
 {
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index da1d69e20645..7d3c66fb3f88 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -18,19 +18,19 @@ struct key_t {
u32 userstack;
 };
 
-struct bpf_map_def SEC("maps") counts = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(struct key_t),
-   .value_size = sizeof(u64),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, struct key_t);
+   __type(value, u64);
+   __uint(max_entries, 1);
+} counts SEC(".maps");
 
-struct bpf_map_def SEC("maps") stackmap = {
-   .type = BPF_MAP_TYPE_STACK_TRACE,
-   .key_size = sizeof(u32),
-   .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+   __uint(key_size, sizeof(u32));
+   __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+   __uint(max_entries, 1);
+} stackmap SEC(".maps");
 
 #define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
 #define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index cc5f94c098f8..5bc696bac27d 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -12,12 +12,12 @@
 #include 
 #include "trace_common.h"
 
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   

[PATCH bpf-next v2 2/5] samples: bpf: refactor kprobe tracing user progs with libbpf

2020-05-15 Thread Daniel T. Lee
Currently, the kprobe BPF program attachment method for bpf_load is
quite old. The implementation of bpf_load "directly" controls and
manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
using using the libbpf automatically manages the kprobe event.
(under bpf_link interface)

By calling bpf_program__attach(_kprobe) in libbpf, the corresponding
kprobe is created and the BPF program will be attached to this kprobe.
To remove this, by simply invoking bpf_link__destroy will clean up the
event.

This commit refactors kprobe tracing programs (tracex{1~7}_user.c) with
libbpf using bpf_link interface and bpf_program__attach.

tracex2_kern.c, which tracks system calls (sys_*), has been modified to
append prefix depending on architecture.

Signed-off-by: Daniel T. Lee 

---
Changes in V2:
 - refactor pointer error check with libbpf_get_error
 - on bpf object open failure, return instead jump to cleanup
 - add macro for adding architecture prefix to system calls (sys_*)

 samples/bpf/Makefile   | 12 
 samples/bpf/trace_common.h | 13 
 samples/bpf/tracex1_user.c | 37 ++-
 samples/bpf/tracex2_kern.c |  3 +-
 samples/bpf/tracex2_user.c | 51 +--
 samples/bpf/tracex3_user.c | 61 --
 samples/bpf/tracex4_user.c | 51 ---
 samples/bpf/tracex6_user.c | 49 ++
 samples/bpf/tracex7_user.c | 39 +++-
 9 files changed, 252 insertions(+), 64 deletions(-)
 create mode 100644 samples/bpf/trace_common.h

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 424f6fe7ce38..4c91e5914329 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -64,13 +64,13 @@ fds_example-objs := fds_example.o
 sockex1-objs := sockex1_user.o
 sockex2-objs := sockex2_user.o
 sockex3-objs := bpf_load.o sockex3_user.o
-tracex1-objs := bpf_load.o tracex1_user.o $(TRACE_HELPERS)
-tracex2-objs := bpf_load.o tracex2_user.o
-tracex3-objs := bpf_load.o tracex3_user.o
-tracex4-objs := bpf_load.o tracex4_user.o
+tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
+tracex2-objs := tracex2_user.o
+tracex3-objs := tracex3_user.o
+tracex4-objs := tracex4_user.o
 tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
-tracex6-objs := bpf_load.o tracex6_user.o
-tracex7-objs := bpf_load.o tracex7_user.o
+tracex6-objs := tracex6_user.o
+tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
 trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
 lathist-objs := bpf_load.o lathist_user.o
diff --git a/samples/bpf/trace_common.h b/samples/bpf/trace_common.h
new file mode 100644
index ..8cb5400aed1f
--- /dev/null
+++ b/samples/bpf/trace_common.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __TRACE_COMMON_H
+#define __TRACE_COMMON_H
+
+#ifdef __x86_64__
+#define SYSCALL(SYS) "__x64_" __stringify(SYS)
+#elif defined(__s390x__)
+#define SYSCALL(SYS) "__s390x_" __stringify(SYS)
+#else
+#define SYSCALL(SYS)  __stringify(SYS)
+#endif
+
+#endif
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
index 55fddbd08702..9d4adb7fd834 100644
--- a/samples/bpf/tracex1_user.c
+++ b/samples/bpf/tracex1_user.c
@@ -1,21 +1,41 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
-#include 
 #include 
-#include 
-#include "bpf_load.h"
+#include 
 #include "trace_helpers.h"
 
 int main(int ac, char **argv)
 {
-   FILE *f;
+   struct bpf_link *link = NULL;
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
+   FILE *f;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (libbpf_get_error(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   return 0;
+   }
+
+   prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+   if (!prog) {
+   fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+   goto cleanup;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   goto cleanup;
+   }
 
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
+   link = bpf_program__attach(prog);
+   if (libbpf_get_error(link)) {
+   fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+   link = NULL;
+   goto cleanup;
}
 
f = popen("taskset 1 ping -c5 localhost", "r");
@@ -23,5 +43,8 @@ int main(int ac, char **argv)
 
read_trace_pipe();
 
+cleanup:
+   bpf_link__destroy(link);
+   bpf_ob

[PATCH bpf-next v2 1/5] samples: bpf: refactor pointer error check with libbpf

2020-05-15 Thread Daniel T. Lee
Current method of checking pointer error is not user friendly.
Especially the __must_check define makes this less intuitive.

Since, libbpf has an API libbpf_get_error() which checks pointer error,
this commit refactors existing pointer error check logic with libbpf.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sampleip_user.c | 7 ++-
 samples/bpf/trace_event_user.c  | 9 +++--
 samples/bpf/xdp_redirect_cpu_user.c | 5 +
 3 files changed, 6 insertions(+), 15 deletions(-)

diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index 4372d2da2f9e..921c505bb567 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -18,9 +18,6 @@
 #include "perf-sys.h"
 #include "trace_helpers.h"
 
-#define __must_check
-#include 
-
 #define DEFAULT_FREQ   99
 #define DEFAULT_SECS   5
 #define MAX_IPS8192
@@ -57,7 +54,7 @@ static int sampling_start(int freq, struct bpf_program *prog,
return 1;
}
links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
-   if (IS_ERR(links[i])) {
+   if (libbpf_get_error(links[i])) {
fprintf(stderr, "ERROR: Attach perf event\n");
links[i] = NULL;
close(pmu_fd);
@@ -182,7 +179,7 @@ int main(int argc, char **argv)
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
-   if (IS_ERR(obj)) {
+   if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
obj = NULL;
goto cleanup;
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index b6cd358d0418..ac1ba368195c 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -16,9 +16,6 @@
 #include "perf-sys.h"
 #include "trace_helpers.h"
 
-#define __must_check
-#include 
-
 #define SAMPLE_FREQ 50
 
 static int pid;
@@ -159,7 +156,7 @@ static void test_perf_event_all_cpu(struct perf_event_attr 
*attr)
goto all_cpu_err;
}
links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
-   if (IS_ERR(links[i])) {
+   if (libbpf_get_error(links[i])) {
printf("bpf_program__attach_perf_event failed\n");
links[i] = NULL;
close(pmu_fd);
@@ -198,7 +195,7 @@ static void test_perf_event_task(struct perf_event_attr 
*attr)
goto err;
}
link = bpf_program__attach_perf_event(prog, pmu_fd);
-   if (IS_ERR(link)) {
+   if (libbpf_get_error(link)) {
printf("bpf_program__attach_perf_event failed\n");
link = NULL;
close(pmu_fd);
@@ -314,7 +311,7 @@ int main(int argc, char **argv)
}
 
obj = bpf_object__open_file(filename, NULL);
-   if (IS_ERR(obj)) {
+   if (libbpf_get_error(obj)) {
printf("opening BPF object file failed\n");
obj = NULL;
goto cleanup;
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index 9b8f21abeac4..f3468168982e 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -19,9 +19,6 @@ static const char *__doc__ =
 #include 
 #include 
 
-#define __must_check
-#include 
-
 #include 
 #include 
 
@@ -622,7 +619,7 @@ static struct bpf_link * attach_tp(struct bpf_object *obj,
}
 
link = bpf_program__attach_tracepoint(prog, tp_category, tp_name);
-   if (IS_ERR(link))
+   if (libbpf_get_error(link))
exit(EXIT_FAIL_BPF);
 
return link;
-- 
2.25.1



[PATCH bpf-next v2 0/5] samples: bpf: refactor kprobe tracing progs with libbpf

2020-05-15 Thread Daniel T. Lee
Currently, the kprobe BPF program attachment method for bpf_load is
pretty outdated. The implementation of bpf_load "directly" controls and
manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
using using the libbpf automatically manages the kprobe event.
(under bpf_link interface)

This patchset refactors kprobe tracing programs with using libbpf API
for loading bpf program instead of previous bpf_load implementation.

---
Changes in V2:
 - refactor pointer error check with libbpf_get_error
 - on bpf object open failure, return instead jump to cleanup
 - add macro for adding architecture prefix to system calls (sys_*)

Daniel T. Lee (5):
  samples: bpf: refactor pointer error check with libbpf
  samples: bpf: refactor kprobe tracing user progs with libbpf
  samples: bpf: refactor tail call user progs with libbpf
  samples: bpf: add tracex7 test file to .gitignore
  samples: bpf: refactor kprobe, tail call kern progs map definition

 samples/bpf/.gitignore  |  1 +
 samples/bpf/Makefile| 16 +++
 samples/bpf/sampleip_kern.c | 12 +++---
 samples/bpf/sampleip_user.c |  7 +--
 samples/bpf/sockex3_kern.c  | 36 
 samples/bpf/sockex3_user.c  | 64 +++-
 samples/bpf/trace_common.h  | 13 ++
 samples/bpf/trace_event_kern.c  | 24 +--
 samples/bpf/trace_event_user.c  |  9 ++--
 samples/bpf/tracex1_user.c  | 37 +---
 samples/bpf/tracex2_kern.c  | 27 ++--
 samples/bpf/tracex2_user.c  | 51 ++
 samples/bpf/tracex3_kern.c  | 24 +--
 samples/bpf/tracex3_user.c  | 61 +++---
 samples/bpf/tracex4_kern.c  | 12 +++---
 samples/bpf/tracex4_user.c  | 51 +-
 samples/bpf/tracex5_kern.c  | 14 +++---
 samples/bpf/tracex5_user.c  | 66 +
 samples/bpf/tracex6_kern.c  | 38 +
 samples/bpf/tracex6_user.c  | 49 ++---
 samples/bpf/tracex7_user.c  | 39 +
 samples/bpf/xdp_redirect_cpu_user.c |  5 +--
 22 files changed, 455 insertions(+), 201 deletions(-)
 create mode 100644 samples/bpf/trace_common.h

-- 
2.25.1



Re: [PATCH bpf-next 1/3] samples: bpf: refactor kprobe tracing user progs with libbpf

2020-05-15 Thread Daniel T. Lee
On Thu, May 14, 2020 at 12:29 AM Yonghong Song  wrote:
>
>
>
> On 5/12/20 11:51 PM, Daniel T. Lee wrote:
> > On Wed, May 13, 2020 at 10:40 AM Yonghong Song  wrote:
> >>
> >>
> >>
> >> On 5/12/20 7:43 AM, Daniel T. Lee wrote:
> >>> Currently, the kprobe BPF program attachment method for bpf_load is
> >>> quite old. The implementation of bpf_load "directly" controls and
> >>> manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
> >>> using using the libbpf automatically manages the kprobe event.
> >>> (under bpf_link interface)
> >>>
> >>> By calling bpf_program__attach(_kprobe) in libbpf, the corresponding
> >>> kprobe is created and the BPF program will be attached to this kprobe.
> >>> To remove this, by simply invoking bpf_link__destroy will clean up the
> >>> event.
> >>>
> >>> This commit refactors kprobe tracing programs (tracex{1~7}_user.c) with
> >>> libbpf using bpf_link interface and bpf_program__attach.
> >>>
> >>> tracex2_kern.c, which tracks system calls (sys_*), has been modified to
> >>> append prefix depending on architecture.
> >>>
> >>> Signed-off-by: Daniel T. Lee 
> >>> ---
> >>>samples/bpf/Makefile   | 12 +++
> >>>samples/bpf/tracex1_user.c | 41 
> >>>samples/bpf/tracex2_kern.c |  8 -
> >>>samples/bpf/tracex2_user.c | 55 ++--
> >>>samples/bpf/tracex3_user.c | 65 --
> >>>samples/bpf/tracex4_user.c | 55 +---
> >>>samples/bpf/tracex6_user.c | 53 +++
> >>>samples/bpf/tracex7_user.c | 43 -
> >>>8 files changed, 268 insertions(+), 64 deletions(-)
> >>>
> >>> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> >>> index 424f6fe7ce38..4c91e5914329 100644
> >>> --- a/samples/bpf/Makefile
> >>> +++ b/samples/bpf/Makefile
> >>> @@ -64,13 +64,13 @@ fds_example-objs := fds_example.o
> >>>sockex1-objs := sockex1_user.o
> >>>sockex2-objs := sockex2_user.o
> >>>sockex3-objs := bpf_load.o sockex3_user.o
> >>> -tracex1-objs := bpf_load.o tracex1_user.o $(TRACE_HELPERS)
> >>> -tracex2-objs := bpf_load.o tracex2_user.o
> >>> -tracex3-objs := bpf_load.o tracex3_user.o
> >>> -tracex4-objs := bpf_load.o tracex4_user.o
> >>> +tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
> >>> +tracex2-objs := tracex2_user.o
> >>> +tracex3-objs := tracex3_user.o
> >>> +tracex4-objs := tracex4_user.o
> >>>tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
> >>> -tracex6-objs := bpf_load.o tracex6_user.o
> >>> -tracex7-objs := bpf_load.o tracex7_user.o
> >>> +tracex6-objs := tracex6_user.o
> >>> +tracex7-objs := tracex7_user.o
> >>>test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
> >>>trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
> >>>lathist-objs := bpf_load.o lathist_user.o
> >>> diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
> >>> index 55fddbd08702..1b15ab98f7d3 100644
> >>> --- a/samples/bpf/tracex1_user.c
> >>> +++ b/samples/bpf/tracex1_user.c
> >>> @@ -1,21 +1,45 @@
> >>>// SPDX-License-Identifier: GPL-2.0
> >>>#include 
> >>> -#include 
> >>>#include 
> >>> -#include 
> >>> -#include "bpf_load.h"
> >>> +#include 
> >>>#include "trace_helpers.h"
> >>>
> >>> +#define __must_check
> >>
> >> This is not very user friendly.
> >> Maybe not including linux/err.h and
> >> use libbpf API libbpf_get_error() instead?
> >>
> >
> > This approach looks more apparent and can stick with the libbpf API.
> > I'll update code using this way.
> >
> >>> +#include 
> >>> +
> >>>int main(int ac, char **argv)
> >>>{
> >>> - FILE *f;
> >>> + struct bpf_link *link = NULL;
> >>> + struct bpf_program *prog;
> >>> + struct bpf_object *obj;
> >>>char filename[256];
> >>> +   

Re: [PATCH bpf-next 1/3] samples: bpf: refactor kprobe tracing user progs with libbpf

2020-05-12 Thread Daniel T. Lee
On Wed, May 13, 2020 at 10:40 AM Yonghong Song  wrote:
>
>
>
> On 5/12/20 7:43 AM, Daniel T. Lee wrote:
> > Currently, the kprobe BPF program attachment method for bpf_load is
> > quite old. The implementation of bpf_load "directly" controls and
> > manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
> > using using the libbpf automatically manages the kprobe event.
> > (under bpf_link interface)
> >
> > By calling bpf_program__attach(_kprobe) in libbpf, the corresponding
> > kprobe is created and the BPF program will be attached to this kprobe.
> > To remove this, by simply invoking bpf_link__destroy will clean up the
> > event.
> >
> > This commit refactors kprobe tracing programs (tracex{1~7}_user.c) with
> > libbpf using bpf_link interface and bpf_program__attach.
> >
> > tracex2_kern.c, which tracks system calls (sys_*), has been modified to
> > append prefix depending on architecture.
> >
> > Signed-off-by: Daniel T. Lee 
> > ---
> >   samples/bpf/Makefile   | 12 +++
> >   samples/bpf/tracex1_user.c | 41 
> >   samples/bpf/tracex2_kern.c |  8 -
> >   samples/bpf/tracex2_user.c | 55 ++--
> >   samples/bpf/tracex3_user.c | 65 --
> >   samples/bpf/tracex4_user.c | 55 +---
> >   samples/bpf/tracex6_user.c | 53 +++
> >   samples/bpf/tracex7_user.c | 43 -
> >   8 files changed, 268 insertions(+), 64 deletions(-)
> >
> > diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> > index 424f6fe7ce38..4c91e5914329 100644
> > --- a/samples/bpf/Makefile
> > +++ b/samples/bpf/Makefile
> > @@ -64,13 +64,13 @@ fds_example-objs := fds_example.o
> >   sockex1-objs := sockex1_user.o
> >   sockex2-objs := sockex2_user.o
> >   sockex3-objs := bpf_load.o sockex3_user.o
> > -tracex1-objs := bpf_load.o tracex1_user.o $(TRACE_HELPERS)
> > -tracex2-objs := bpf_load.o tracex2_user.o
> > -tracex3-objs := bpf_load.o tracex3_user.o
> > -tracex4-objs := bpf_load.o tracex4_user.o
> > +tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
> > +tracex2-objs := tracex2_user.o
> > +tracex3-objs := tracex3_user.o
> > +tracex4-objs := tracex4_user.o
> >   tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
> > -tracex6-objs := bpf_load.o tracex6_user.o
> > -tracex7-objs := bpf_load.o tracex7_user.o
> > +tracex6-objs := tracex6_user.o
> > +tracex7-objs := tracex7_user.o
> >   test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
> >   trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
> >   lathist-objs := bpf_load.o lathist_user.o
> > diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
> > index 55fddbd08702..1b15ab98f7d3 100644
> > --- a/samples/bpf/tracex1_user.c
> > +++ b/samples/bpf/tracex1_user.c
> > @@ -1,21 +1,45 @@
> >   // SPDX-License-Identifier: GPL-2.0
> >   #include 
> > -#include 
> >   #include 
> > -#include 
> > -#include "bpf_load.h"
> > +#include 
> >   #include "trace_helpers.h"
> >
> > +#define __must_check
>
> This is not very user friendly.
> Maybe not including linux/err.h and
> use libbpf API libbpf_get_error() instead?
>

This approach looks more apparent and can stick with the libbpf API.
I'll update code using this way.

> > +#include 
> > +
> >   int main(int ac, char **argv)
> >   {
> > - FILE *f;
> > + struct bpf_link *link = NULL;
> > + struct bpf_program *prog;
> > + struct bpf_object *obj;
> >   char filename[256];
> > + FILE *f;
> >
> >   snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
> > + obj = bpf_object__open_file(filename, NULL);
> > + if (IS_ERR(obj)) {
> > + fprintf(stderr, "ERROR: opening BPF object file failed\n");
> > + obj = NULL;
> > + goto cleanup;
>
> You do not need to goto cleanup, directly return 0 is okay here.
> The same for other files in this patch.
>

As you said, it would be better to return right away than to proceed
any further. I'll apply the code at next patch.

> > + }
> > +
> > + prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
> > + if (!prog) {
> > + fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
> > + goto cleanup;

[PATCH bpf-next 2/3] samples: bpf: refactor tail call user progs with libbpf

2020-05-12 Thread Daniel T. Lee
BPF tail call uses the BPF_MAP_TYPE_PROG_ARRAY type map for calling
into other BPF programs and this PROG_ARRAY should be filled prior to
use. Currently, samples with the PROG_ARRAY type MAP fill this program
array with bpf_load. For bpf_load to fill this map, kernel BPF program
must specify the section with specific format of /
(e.g. SEC("socket/0"))

But by using libbpf instead of bpf_load, user program can specify which
programs should be added to PROG_ARRAY. The advantage of this approach
is that you can selectively add only the programs you want, rather than
adding all of them to PROG_ARRAY, and it's much more intuitive than the
traditional approach.

This commit refactors user programs with the PROG_ARRAY type MAP with
libbpf instead of using bpf_load.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   |  4 +--
 samples/bpf/sockex3_user.c | 66 ---
 samples/bpf/tracex5_user.c | 70 +-
 3 files changed, 110 insertions(+), 30 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4c91e5914329..8403e4762306 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -63,12 +63,12 @@ TRACE_HELPERS := 
../../tools/testing/selftests/bpf/trace_helpers.o
 fds_example-objs := fds_example.o
 sockex1-objs := sockex1_user.o
 sockex2-objs := sockex2_user.o
-sockex3-objs := bpf_load.o sockex3_user.o
+sockex3-objs := sockex3_user.o
 tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
 tracex2-objs := tracex2_user.o
 tracex3-objs := tracex3_user.o
 tracex4-objs := tracex4_user.o
-tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
+tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
 tracex6-objs := tracex6_user.o
 tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index bbb1cd0666a9..961ae9c7deb9 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -1,17 +1,15 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
 #include 
-#include 
 #include 
-#include "bpf_load.h"
+#include 
 #include "sock_example.h"
 #include 
 #include 
 #include 
 
-#define PARSE_IP 3
-#define PARSE_IP_PROG_FD (prog_fd[0])
-#define PROG_ARRAY_FD (map_fd[0])
+#define __must_check
+#include 
 
 struct flow_key_record {
__be32 src;
@@ -30,31 +28,56 @@ struct pair {
 
 int main(int argc, char **argv)
 {
+   int i, sock, key, fd, main_prog_fd, jmp_table_fd, hash_map_fd;
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
+   const char *title;
FILE *f;
-   int i, sock, err, id, key = PARSE_IP;
-   struct bpf_prog_info info = {};
-   uint32_t info_len = sizeof(info);
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
setrlimit(RLIMIT_MEMLOCK, &r);
 
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
+   obj = bpf_object__open_file(filename, NULL);
+   if (IS_ERR(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   obj = NULL;
+   goto cleanup;
}
 
-   /* Test fd array lookup which returns the id of the bpf_prog */
-   err = bpf_obj_get_info_by_fd(PARSE_IP_PROG_FD, &info, &info_len);
-   assert(!err);
-   err = bpf_map_lookup_elem(PROG_ARRAY_FD, &key, &id);
-   assert(!err);
-   assert(id == info.id);
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   goto cleanup;
+   }
+
+   jmp_table_fd = bpf_object__find_map_fd_by_name(obj, "jmp_table");
+   hash_map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map");
+   if (jmp_table_fd < 0 || hash_map_fd < 0) {
+   fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+   goto cleanup;
+   }
+
+   bpf_object__for_each_program(prog, obj) {
+   fd = bpf_program__fd(prog);
+
+   title = bpf_program__title(prog, false);
+   if (sscanf(title, "socket/%d", &key) != 1) {
+   fprintf(stderr, "ERROR: finding prog failed\n");
+   goto cleanup;
+   }
+
+   if (key == 0)
+   main_prog_fd = fd;
+   else
+   bpf_map_update_elem(jmp_table_fd, &key, &fd, BPF_ANY);
+   }
 
sock = open_raw_sock("lo");
 
-   assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd[4],
+   /* attach BPF program to socket */
+   assert(setsockopt(sock, SOL_SOCKET, 

[PATCH bpf-next 1/3] samples: bpf: refactor kprobe tracing user progs with libbpf

2020-05-12 Thread Daniel T. Lee
Currently, the kprobe BPF program attachment method for bpf_load is
quite old. The implementation of bpf_load "directly" controls and
manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
using using the libbpf automatically manages the kprobe event.
(under bpf_link interface)

By calling bpf_program__attach(_kprobe) in libbpf, the corresponding
kprobe is created and the BPF program will be attached to this kprobe.
To remove this, by simply invoking bpf_link__destroy will clean up the
event.

This commit refactors kprobe tracing programs (tracex{1~7}_user.c) with
libbpf using bpf_link interface and bpf_program__attach.

tracex2_kern.c, which tracks system calls (sys_*), has been modified to
append prefix depending on architecture.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/Makefile   | 12 +++
 samples/bpf/tracex1_user.c | 41 
 samples/bpf/tracex2_kern.c |  8 -
 samples/bpf/tracex2_user.c | 55 ++--
 samples/bpf/tracex3_user.c | 65 --
 samples/bpf/tracex4_user.c | 55 +---
 samples/bpf/tracex6_user.c | 53 +++
 samples/bpf/tracex7_user.c | 43 -
 8 files changed, 268 insertions(+), 64 deletions(-)

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 424f6fe7ce38..4c91e5914329 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -64,13 +64,13 @@ fds_example-objs := fds_example.o
 sockex1-objs := sockex1_user.o
 sockex2-objs := sockex2_user.o
 sockex3-objs := bpf_load.o sockex3_user.o
-tracex1-objs := bpf_load.o tracex1_user.o $(TRACE_HELPERS)
-tracex2-objs := bpf_load.o tracex2_user.o
-tracex3-objs := bpf_load.o tracex3_user.o
-tracex4-objs := bpf_load.o tracex4_user.o
+tracex1-objs := tracex1_user.o $(TRACE_HELPERS)
+tracex2-objs := tracex2_user.o
+tracex3-objs := tracex3_user.o
+tracex4-objs := tracex4_user.o
 tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
-tracex6-objs := bpf_load.o tracex6_user.o
-tracex7-objs := bpf_load.o tracex7_user.o
+tracex6-objs := tracex6_user.o
+tracex7-objs := tracex7_user.o
 test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
 trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
 lathist-objs := bpf_load.o lathist_user.o
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
index 55fddbd08702..1b15ab98f7d3 100644
--- a/samples/bpf/tracex1_user.c
+++ b/samples/bpf/tracex1_user.c
@@ -1,21 +1,45 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
-#include 
 #include 
-#include 
-#include "bpf_load.h"
+#include 
 #include "trace_helpers.h"
 
+#define __must_check
+#include 
+
 int main(int ac, char **argv)
 {
-   FILE *f;
+   struct bpf_link *link = NULL;
+   struct bpf_program *prog;
+   struct bpf_object *obj;
char filename[256];
+   FILE *f;
 
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+   obj = bpf_object__open_file(filename, NULL);
+   if (IS_ERR(obj)) {
+   fprintf(stderr, "ERROR: opening BPF object file failed\n");
+   obj = NULL;
+   goto cleanup;
+   }
+
+   prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+   if (!prog) {
+   fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+   goto cleanup;
+   }
+
+   /* load BPF program */
+   if (bpf_object__load(obj)) {
+   fprintf(stderr, "ERROR: loading BPF object file failed\n");
+   goto cleanup;
+   }
 
-   if (load_bpf_file(filename)) {
-   printf("%s", bpf_log_buf);
-   return 1;
+   link = bpf_program__attach(prog);
+   if (IS_ERR(link)) {
+   fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+   link = NULL;
+   goto cleanup;
}
 
f = popen("taskset 1 ping -c5 localhost", "r");
@@ -23,5 +47,8 @@ int main(int ac, char **argv)
 
read_trace_pipe();
 
+cleanup:
+   bpf_link__destroy(link);
+   bpf_object__close(obj);
return 0;
 }
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index d865bb309bcb..ff5d00916733 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -11,6 +11,12 @@
 #include 
 #include 
 
+#ifdef __x86_64__
+#define SYSCALL "__x64_"
+#else
+#define SYSCALL
+#endif
+
 struct bpf_map_def SEC("maps") my_map = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(long),
@@ -77,7 +83,7 @@ struct bpf_map_def SEC("maps") my_hist_map = {
.max_entries = 1024,
 };
 
-SEC("kprobe/sys_write")
+SEC("kprobe/" SYSCALL "sys_write")
 int bpf_prog3(struct pt_regs *ctx)
 {
long write_size =

[PATCH bpf-next 3/3] samples: bpf: refactor kprobe, tail call kern progs map definition

2020-05-12 Thread Daniel T. Lee
Because the previous two commit replaced the bpf_load implementation of
the user program with libbpf, the corresponding kernel program's MAP
definition can be replaced with new BTF-defined map syntax.

This commit only updates the samples which uses libbpf API for loading
bpf program not with bpf_load.

Signed-off-by: Daniel T. Lee 
---
 samples/bpf/sampleip_kern.c| 12 +--
 samples/bpf/sockex3_kern.c | 36 
 samples/bpf/trace_event_kern.c | 24 ++---
 samples/bpf/tracex2_kern.c | 24 ++---
 samples/bpf/tracex3_kern.c | 24 ++---
 samples/bpf/tracex4_kern.c | 12 +--
 samples/bpf/tracex5_kern.c | 14 ++---
 samples/bpf/tracex6_kern.c | 38 ++
 8 files changed, 93 insertions(+), 91 deletions(-)

diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index e504dc308371..f24806ac24e7 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -13,12 +13,12 @@
 
 #define MAX_IPS8192
 
-struct bpf_map_def SEC("maps") ip_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(u64),
-   .value_size = sizeof(u32),
-   .max_entries = MAX_IPS,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, u64);
+   __type(value, u32);
+   __uint(max_entries, MAX_IPS);
+} ip_map SEC(".maps");
 
 SEC("perf_event")
 int do_sample(struct bpf_perf_event_data *ctx)
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index 36d4dac23549..3304fabd636d 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -19,12 +19,12 @@
 
 #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
 
-struct bpf_map_def SEC("maps") jmp_table = {
-   .type = BPF_MAP_TYPE_PROG_ARRAY,
-   .key_size = sizeof(u32),
-   .value_size = sizeof(u32),
-   .max_entries = 8,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+   __uint(key_size, sizeof(u32));
+   __uint(value_size, sizeof(u32));
+   __uint(max_entries, 8);
+} jmp_table SEC(".maps");
 
 #define PARSE_VLAN 1
 #define PARSE_MPLS 2
@@ -92,12 +92,12 @@ struct globals {
struct flow_key_record flow;
 };
 
-struct bpf_map_def SEC("maps") percpu_map = {
-   .type = BPF_MAP_TYPE_ARRAY,
-   .key_size = sizeof(__u32),
-   .value_size = sizeof(struct globals),
-   .max_entries = 32,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_ARRAY);
+   __type(key, __u32);
+   __type(value, struct globals);
+   __uint(max_entries, 32);
+} percpu_map SEC(".maps");
 
 /* user poor man's per_cpu until native support is ready */
 static struct globals *this_cpu_globals(void)
@@ -113,12 +113,12 @@ struct pair {
__u64 bytes;
 };
 
-struct bpf_map_def SEC("maps") hash_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(struct flow_key_record),
-   .value_size = sizeof(struct pair),
-   .max_entries = 1024,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, struct flow_key_record);
+   __type(value, struct pair);
+   __uint(max_entries, 1024);
+} hash_map SEC(".maps");
 
 static void update_stats(struct __sk_buff *skb, struct globals *g)
 {
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index da1d69e20645..7d3c66fb3f88 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -18,19 +18,19 @@ struct key_t {
u32 userstack;
 };
 
-struct bpf_map_def SEC("maps") counts = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(struct key_t),
-   .value_size = sizeof(u64),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_HASH);
+   __type(key, struct key_t);
+   __type(value, u64);
+   __uint(max_entries, 1);
+} counts SEC(".maps");
 
-struct bpf_map_def SEC("maps") stackmap = {
-   .type = BPF_MAP_TYPE_STACK_TRACE,
-   .key_size = sizeof(u32),
-   .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
-   .max_entries = 1,
-};
+struct {
+   __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+   __uint(key_size, sizeof(u32));
+   __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+   __uint(max_entries, 1);
+} stackmap SEC(".maps");
 
 #define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
 #define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index ff5d00916733..a2409abd7b87 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -17,12 +17,12 @@
 #define SYSCALL
 #endif
 
-struct bpf_map_def SEC("maps") my_map = {
-   .type = BPF_MAP_TYPE_HASH,
-   .key_size = sizeof(long),
-  

[PATCH bpf-next 0/3] samples: bpf: refactor kprobe tracing progs with libbpf

2020-05-12 Thread Daniel T. Lee
Currently, the kprobe BPF program attachment method for bpf_load is
pretty outdated. The implementation of bpf_load "directly" controls and
manages(create, delete) the kprobe events of DEBUGFS. On the other hand,
using using the libbpf automatically manages the kprobe event.
(under bpf_link interface)

This patchset refactors kprobe tracing programs with using libbpf API
for loading bpf program instead of previous bpf_load implementation.

Daniel T. Lee (3):
  samples: bpf: refactor kprobe tracing user progs with libbpf
  samples: bpf: refactor tail call user progs with libbpf
  samples: bpf: refactor kprobe, tail call kern progs map definition

 samples/bpf/Makefile   | 16 
 samples/bpf/sampleip_kern.c| 12 +++---
 samples/bpf/sockex3_kern.c | 36 -
 samples/bpf/sockex3_user.c | 66 ++--
 samples/bpf/trace_event_kern.c | 24 ++--
 samples/bpf/tracex1_user.c | 41 
 samples/bpf/tracex2_kern.c | 32 +---
 samples/bpf/tracex2_user.c | 55 +-
 samples/bpf/tracex3_kern.c | 24 ++--
 samples/bpf/tracex3_user.c | 65 +++
 samples/bpf/tracex4_kern.c | 12 +++---
 samples/bpf/tracex4_user.c | 55 --
 samples/bpf/tracex5_kern.c | 14 +++
 samples/bpf/tracex5_user.c | 70 ++
 samples/bpf/tracex6_kern.c | 38 +-
 samples/bpf/tracex6_user.c | 53 ++---
 samples/bpf/tracex7_user.c | 43 +
 17 files changed, 471 insertions(+), 185 deletions(-)

-- 
2.25.1



  1   2   3   >