From: Bobby Eshleman <[email protected]>

Add test case for autorelease.

THe test case is the same as the RX test, but enables autorelease.  The
original RX test is changed to use the -a 0 flag to disable autorelease.

TAP version 13
1..4
ok 1 devmem.check_rx
ok 2 devmem.check_rx_autorelease
ok 3 devmem.check_tx
ok 4 devmem.check_tx_chunks

Signed-off-by: Bobby Eshleman <[email protected]>
---
Changes in v8:
- removed stale/missing tests

Changes in v7:
- use autorelease netlink
- remove sockopt tests
---
 tools/testing/selftests/drivers/net/hw/devmem.py  | 21 +++++++++++++++++++--
 tools/testing/selftests/drivers/net/hw/ncdevmem.c | 19 +++++++++++++------
 2 files changed, 32 insertions(+), 8 deletions(-)

diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py 
b/tools/testing/selftests/drivers/net/hw/devmem.py
index 45c2d49d55b6..dbe696a445bd 100755
--- a/tools/testing/selftests/drivers/net/hw/devmem.py
+++ b/tools/testing/selftests/drivers/net/hw/devmem.py
@@ -25,7 +25,24 @@ def check_rx(cfg) -> None:
 
     port = rand_port()
     socat = f"socat -u - 
TCP{cfg.addr_ipver}:{cfg.baddr}:{port},bind={cfg.remote_baddr}:{port}"
-    listen_cmd = f"{cfg.bin_local} -l -f {cfg.ifname} -s {cfg.addr} -p {port} 
-c {cfg.remote_addr} -v 7"
+    listen_cmd = f"{cfg.bin_local} -l -f {cfg.ifname} -s {cfg.addr} -p {port} 
-c {cfg.remote_addr} -v 7 -a 0"
+
+    with bkg(listen_cmd, exit_wait=True) as ncdevmem:
+        wait_port_listen(port)
+        cmd(f"yes $(echo -e \x01\x02\x03\x04\x05\x06) | \
+            head -c 1K | {socat}", host=cfg.remote, shell=True)
+
+    ksft_eq(ncdevmem.ret, 0)
+
+
+@ksft_disruptive
+def check_rx_autorelease(cfg) -> None:
+    require_devmem(cfg)
+
+    port = rand_port()
+    socat = f"socat -u - 
TCP{cfg.addr_ipver}:{cfg.baddr}:{port},bind={cfg.remote_baddr}:{port}"
+    listen_cmd = f"{cfg.bin_local} -l -f {cfg.ifname} -s {cfg.addr} -p {port} \
+                  -c {cfg.remote_addr} -v 7 -a 1"
 
     with bkg(listen_cmd, exit_wait=True) as ncdevmem:
         wait_port_listen(port)
@@ -68,7 +85,7 @@ def main() -> None:
         cfg.bin_local = path.abspath(path.dirname(__file__) + "/ncdevmem")
         cfg.bin_remote = cfg.remote.deploy(cfg.bin_local)
 
-        ksft_run([check_rx, check_tx, check_tx_chunks],
+        ksft_run([check_rx, check_rx_autorelease, check_tx, check_tx_chunks],
                  args=(cfg, ))
     ksft_exit()
 
diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c 
b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
index 3288ed04ce08..406f1771d9ec 100644
--- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c
+++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
@@ -92,6 +92,7 @@ static char *port;
 static size_t do_validation;
 static int start_queue = -1;
 static int num_queues = -1;
+static int devmem_autorelease;
 static char *ifname;
 static unsigned int ifindex;
 static unsigned int dmabuf_id;
@@ -679,7 +680,8 @@ static int configure_flow_steering(struct sockaddr_in6 
*server_sin)
 
 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
                         struct netdev_queue_id *queues,
-                        unsigned int n_queue_index, struct ynl_sock **ys)
+                        unsigned int n_queue_index, struct ynl_sock **ys,
+                        int autorelease)
 {
        struct netdev_bind_rx_req *req = NULL;
        struct netdev_bind_rx_rsp *rsp = NULL;
@@ -695,6 +697,7 @@ static int bind_rx_queue(unsigned int ifindex, unsigned int 
dmabuf_fd,
        req = netdev_bind_rx_req_alloc();
        netdev_bind_rx_req_set_ifindex(req, ifindex);
        netdev_bind_rx_req_set_fd(req, dmabuf_fd);
+       netdev_bind_rx_req_set_autorelease(req, autorelease);
        __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
 
        rsp = netdev_bind_rx(*ys, req);
@@ -872,7 +875,8 @@ static int do_server(struct memory_buffer *mem)
                goto err_reset_rss;
        }
 
-       if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) {
+       if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys,
+                         devmem_autorelease)) {
                pr_err("Failed to bind");
                goto err_reset_flow_steering;
        }
@@ -1092,7 +1096,7 @@ int run_devmem_tests(void)
                goto err_reset_headersplit;
        }
 
-       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys, 0)) {
                pr_err("Binding empty queues array should have failed");
                goto err_unbind;
        }
@@ -1108,7 +1112,7 @@ int run_devmem_tests(void)
                goto err_reset_headersplit;
        }
 
-       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+       if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys, 0)) {
                pr_err("Configure dmabuf with header split off should have 
failed");
                goto err_unbind;
        }
@@ -1124,7 +1128,7 @@ int run_devmem_tests(void)
                goto err_reset_headersplit;
        }
 
-       if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
+       if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys, 0)) {
                pr_err("Failed to bind");
                goto err_reset_headersplit;
        }
@@ -1397,7 +1401,7 @@ int main(int argc, char *argv[])
        int is_server = 0, opt;
        int ret, err = 1;
 
-       while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) {
+       while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:a:")) != -1) {
                switch (opt) {
                case 'l':
                        is_server = 1;
@@ -1426,6 +1430,9 @@ int main(int argc, char *argv[])
                case 'z':
                        max_chunk = atoi(optarg);
                        break;
+               case 'a':
+                       devmem_autorelease = atoi(optarg);
+                       break;
                case '?':
                        fprintf(stderr, "unknown option: %c\n", optopt);
                        break;

-- 
2.47.3


Reply via email to