From: Zhenzhong Wu <[email protected]>
Date: Sun, 19 Apr 2026 02:13:33 +0800
> After migrate_dance() moves established children to the target
> listener, add it to an epoll set and verify that epoll_wait(..., 0)
> reports it ready before accept().
>
> This adds epoll coverage for the TCP_ESTABLISHED reuseport migration
> case in migrate_reuseport.
>
> Keep the check limited to TCP_ESTABLISHED cases. TCP_SYN_RECV and
> TCP_NEW_SYN_RECV still depend on asynchronous handshake completion,
> so a zero-timeout epoll_wait() would race there.
>
> Signed-off-by: Zhenzhong Wu <[email protected]>
> ---
> .../bpf/prog_tests/migrate_reuseport.c | 32 ++++++++++++++++++-
> 1 file changed, 31 insertions(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
> b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
> index 653b0a20f..580a53424 100644
> --- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
> +++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
> @@ -18,13 +18,16 @@
> * 9. call shutdown() for the second server
> * and migrate the requests in the accept queue
> * to the last server socket.
> - * 10. call accept() for the last server socket.
> + * 10. for TCP_ESTABLISHED cases, call epoll_wait(..., 0)
> + * for the last server socket.
> + * 11. call accept() for the last server socket.
> *
> * Author: Kuniyuki Iwashima <[email protected]>
> */
>
> #include <bpf/bpf.h>
> #include <bpf/libbpf.h>
> +#include <sys/epoll.h>
>
> #include "test_progs.h"
> #include "test_migrate_reuseport.skel.h"
> @@ -522,6 +525,33 @@ static void run_test(struct migrate_reuseport_test_case
> *test_case,
> goto close_clients;
> }
>
> + /* Only TCP_ESTABLISHED has already-migrated accept-queue entries
> + * here. Later states still depend on follow-up handshake work.
> + */
> + if (test_case->state == BPF_TCP_ESTABLISHED) {
> + struct epoll_event ev = {
> + .events = EPOLLIN,
> + };
> + int epfd;
> + int nfds;
> +
> + epfd = epoll_create1(EPOLL_CLOEXEC);
> + if (!ASSERT_NEQ(epfd, -1, "epoll_create1"))
> + goto close_clients;
> +
> + ev.data.fd = test_case->servers[MIGRATED_TO];
> + if (!ASSERT_OK(epoll_ctl(epfd, EPOLL_CTL_ADD,
> + test_case->servers[MIGRATED_TO], &ev),
> + "epoll_ctl"))
> + goto close_epfd;
> +
> + nfds = epoll_wait(epfd, &ev, 1, 0);
> + ASSERT_EQ(nfds, 1, "epoll_wait");
Thanks for the update, but the test passes without patch 1.
I think it would be best to test just after shutdown()
where migration happens.
Also, TCP_SYN_RECV should be covered in the same way.
---8<---
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
index 580a534249a7..66fea936649e 100644
--- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -353,8 +353,29 @@ static int update_maps(struct migrate_reuseport_test_case
*test_case,
static int migrate_dance(struct migrate_reuseport_test_case *test_case)
{
+ struct epoll_event ev = {
+ .events = EPOLLIN,
+ };
+ int epoll, nfds;
int i, err;
+ if (test_case->state != BPF_TCP_NEW_SYN_RECV) {
+ epoll = epoll_create1(0);
+ if (!ASSERT_NEQ(epoll, -1, "epoll_create1"))
+ return -1;
+
+ ev.data.fd = test_case->servers[MIGRATED_TO];
+ if (!ASSERT_OK(epoll_ctl(epoll, EPOLL_CTL_ADD,
+ test_case->servers[MIGRATED_TO], &ev),
+ "epoll_ctl")) {
+ goto close_epoll;
+ }
+
+ nfds = epoll_wait(epoll, &ev, 1, 0);
+ if (!ASSERT_EQ(nfds, 0, "epoll_wait 1"))
+ goto close_epoll;
+ }
+
/* Migrate TCP_ESTABLISHED and TCP_SYN_RECV requests
* to the last listener based on eBPF.
*/
@@ -368,6 +389,15 @@ static int migrate_dance(struct
migrate_reuseport_test_case *test_case)
if (test_case->state == BPF_TCP_NEW_SYN_RECV)
return 0;
+ nfds = epoll_wait(epoll, &ev, 1, 0);
+ if (!ASSERT_EQ(nfds, 1, "epoll_wait 2")) {
+close_epoll:
+ close(epoll);
+ return -1;
+ }
+
+ close(epoll);
+
/* Note that we use the second listener instead of the
* first one here.
*
@@ -525,33 +555,6 @@ static void run_test(struct migrate_reuseport_test_case
*test_case,
goto close_clients;
}
- /* Only TCP_ESTABLISHED has already-migrated accept-queue entries
- * here. Later states still depend on follow-up handshake work.
- */
- if (test_case->state == BPF_TCP_ESTABLISHED) {
- struct epoll_event ev = {
- .events = EPOLLIN,
- };
- int epfd;
- int nfds;
-
- epfd = epoll_create1(EPOLL_CLOEXEC);
- if (!ASSERT_NEQ(epfd, -1, "epoll_create1"))
- goto close_clients;
-
- ev.data.fd = test_case->servers[MIGRATED_TO];
- if (!ASSERT_OK(epoll_ctl(epfd, EPOLL_CTL_ADD,
- test_case->servers[MIGRATED_TO], &ev),
- "epoll_ctl"))
- goto close_epfd;
-
- nfds = epoll_wait(epfd, &ev, 1, 0);
- ASSERT_EQ(nfds, 1, "epoll_wait");
-
-close_epfd:
- close(epfd);
- }
-
count_requests(test_case, skel);
close_clients:
---8<---