On 8/25/23 18:50, Ihar Hrachyshka wrote:
> The daemon life cycle spans over the whole test case life time, which
> significantly speeds up test cases that rely on fmt_pkt for packet byte
> representations.
> 
> The speed-up comes from the fact that we no longer start a python
> environment with all scapy modules imported on any fmt_pkt invocation;
> but only on the first call to fmt_pkt.
> 
> The daemon is not started for test cases that don't trigger fmt_pkt.
> (The server is started lazily as part of fmt_pkt call.)
> 
> For example, without the daemon, all tests that use fmt_pkt took the
> following on my vagrant box:
> 
> real    15m27.117s
> user    23m55.023s
> sys     4m35.469s
> 
> With the daemon, the same set of tests run in:
> 
> real    4m34.092s
> user    3m20.231s
> sys     1m10.886s
> 
> We may want to make the daemon global, so that it's invoked once per
> test suite run. But I haven't figured out, yet, how to run a trap to
> clean up the deamon and its socket and pid files on suite exit (and not
> on test case exit.)

Hi, Ihar.  Not a full review, but a few comments inline.

> 
> Signed-off-by: Ihar Hrachyshka <ihrac...@redhat.com>
> ---
>  tests/automake.mk     |   1 +
>  tests/ovn-macros.at   |  27 ++++++++++--
>  tests/scapy_server.py | 100 ++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 124 insertions(+), 4 deletions(-)
>  create mode 100755 tests/scapy_server.py
> 
> diff --git a/tests/automake.mk b/tests/automake.mk
> index eea0d00f4..0f7f1bd9a 100644
> --- a/tests/automake.mk
> +++ b/tests/automake.mk
> @@ -309,6 +309,7 @@ tests_ovstest_LDADD = $(OVS_LIBDIR)/daemon.lo \
>  CHECK_PYFILES = \
>       tests/test-l7.py \
>       tests/uuidfilt.py \
> +     tests/scapy_server.py \
>       tests/test-tcp-rst.py \
>       tests/check_acl_log.py
>  
> diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at
> index 13d5dc3d4..8b12f8e55 100644
> --- a/tests/ovn-macros.at
> +++ b/tests/ovn-macros.at
> @@ -816,6 +816,15 @@ ovn_trace_client() {
>      ovs-appctl -t $target trace "$@" | tee trace | sed '/^# /d'
>  }
>  
> +wait_unix_socket() {
> +    local socketfile=$1
> +    for i in `seq 100`; do
> +        nc -zU "$socketfile" && return 0
> +        sleep 0.1
> +    done
> +    return 1
> +}
> +
>  # Receives a string with scapy python code that represents a packet.
>  # Returns a hex-string that contains bytes that reflect the packet symbolic
>  # description.
> @@ -833,10 +842,20 @@ ovn_trace_client() {
>  # ovs-appctl netdev-dummy/receive $vif $packet
>  #
>  fmt_pkt() {
> -    echo "from scapy.all import *; \
> -          import binascii; \
> -          out = binascii.hexlify(raw($1)); \
> -          print(out.decode())" | $PYTHON3
> +    sockfile=$ovs_base/scapy.sock
> +    if [[ ! -e $sockfile ]]; then
> +        start_scapy_server > /dev/null
> +        wait_unix_socket $sockfile

This should probbaly be OVS_WAIT_UNTIL macro that tests for
a socket file to exist.

> +    fi
> +    echo "$(echo $1 | nc -U $sockfile)"
> +}
> +
> +start_scapy_server() {
> +    pidfile=$ovs_base/scapy.pid
> +    sockfile=$ovs_base/scapy.sock
> +    "$top_srcdir"/tests/scapy_server.py --pidfile=$pidfile 
> --sockfile=$sockfile
> +    on_exit "test -e \"$pidfile\" && kill \`cat \"$pidfile\"\`"
> +    on_exit "test -e \"$sockfile\" && rm \"$sockfile\""
>  }
>  
>  sleep_sb() {
> diff --git a/tests/scapy_server.py b/tests/scapy_server.py
> new file mode 100755
> index 000000000..a417d42c9
> --- /dev/null
> +++ b/tests/scapy_server.py
> @@ -0,0 +1,100 @@
> +#!/usr/bin/env python3
> +
> +import argparse
> +import atexit
> +import os
> +import socket
> +import sys
> +import threading
> +
> +import binascii
> +from scapy.all import *  # noqa: F401,F403
> +from scapy.all import raw
> +
> +
> +_MAX_PATTERN_LEN = 1024 * 32
> +
> +
> +def write_pidfile(pidfile):
> +    pid = str(os.getpid())
> +    with open(pidfile, 'w') as f:
> +        f.write(pid)
> +
> +
> +def remove_pidfile(pidfile):
> +    os.remove(pidfile)
> +
> +
> +def check_pidfile(pidfile):
> +    if os.path.exists(pidfile):
> +        with open(pidfile, 'r') as f:
> +            pid = f.read().strip()
> +            try:
> +                pid = int(pid)
> +                if os.path.exists(f"/proc/{pid}"):
> +                    print("Scapy server is already running:", pid)
> +                    sys.exit(1)
> +            except ValueError:
> +                sys.exit(1)
> +
> +
> +def fork():
> +    try:
> +        pid = os.fork()
> +        if pid > 0:
> +            sys.exit(0)
> +    except OSError as e:
> +        print("Fork failed:", e)
> +        sys.exit(1)
> +
> +
> +def daemonize(pidfile):
> +    fork()
> +    check_pidfile(pidfile)
> +    write_pidfile(pidfile)
> +    atexit.register(remove_pidfile, pidfile)
> +
> +
> +def process(data):
> +    try:
> +        return binascii.hexlify(raw(eval(data)))
> +    except Exception:
> +        return ""
> +
> +
> +def handle_client(client_socket):
> +    data = client_socket.recv(_MAX_PATTERN_LEN)
> +    data = data.strip()
> +    if data:
> +        client_socket.send(process(data))
> +    client_socket.close()
> +
> +
> +def main(pidfile, socket_path):
> +    daemonize(pidfile)
> +
> +    if os.path.exists(socket_path):
> +        os.remove(socket_path)
> +
> +    server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
> +    server_socket.bind(socket_path)
> +    server_socket.listen()
> +
> +    while True:
> +        client_socket, _ = server_socket.accept()
> +        handler = threading.Thread(target=handle_client, 
> args=(client_socket,))
> +        handler.start()
> +
> +
> +def parse_args():
> +    parser = argparse.ArgumentParser()
> +    parser.add_argument("--pidfile", help="Path to PID file",
> +                        default="/tmp/scapy.pid")
> +    parser.add_argument("--sockfile", help="Path to Unix socket file",
> +                        default="/tmp/scapy.sock")
> +    return parser.parse_args()
> +
> +
> +if __name__ == "__main__":
> +    args = parse_args()
> +    main(args.pidfile, args.sockfile)

A lot of infrastructure for daemons is already implemented in
OVS python library.  It it even may be more robust than a manual
implementation.  I'd suggest to implement this daemon as unixctl
server and avoid all the manual daemonization, pidfile and other
management, netcat stuff.  You may find an example of a simple
unixctl daemon in tests/test-unixctl.py in OVS'es repo.
You'll be able to use ovs/ovn-appctl to talk to it.

Best regards, Ilya Maximets.
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to