This is an automated email from the ASF dual-hosted git repository. gmurthy pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/qpid-dispatch.git
The following commit(s) were added to refs/heads/main by this push: new ec62cb2 DISPATCH-2326: Additional fix. Remove the nghttp2 library from CMakeLists.txt and other files ec62cb2 is described below commit ec62cb2e6f1cd1cdf367103a8ad7dc9e21b21cff Author: Ganesh Murthy <gmur...@apache.org> AuthorDate: Thu Feb 17 14:35:19 2022 -0500 DISPATCH-2326: Additional fix. Remove the nghttp2 library from CMakeLists.txt and other files --- .github/workflows/build.yaml | 12 +- .travis.yml | 32 +--- CMakeLists.txt | 5 - README.adoc | 42 ----- cmake/Findlibnghttp2.cmake | 47 ----- dockerfiles/Dockerfile-fedora | 4 +- dockerfiles/Dockerfile-ubuntu | 4 +- tests/CMakeLists.txt | 4 +- tests/TCP_echo_client.py | 320 ---------------------------------- tests/TCP_echo_server.py | 397 ------------------------------------------ tests/http2_slow_q2_server.py | 116 ------------ tests/tox.ini.in | 23 --- 12 files changed, 8 insertions(+), 998 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2d1a1a2..85e58e8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -109,7 +109,7 @@ jobs: - name: Install Linux build dependencies if: ${{ runner.os == 'Linux' }} run: | - sudo apt update; sudo apt install -y swig libpython3-dev libsasl2-dev libjsoncpp-dev libwebsockets-dev libnghttp2-dev ccache ninja-build pixz libbenchmark-dev + sudo apt update; sudo apt install -y swig libpython3-dev libsasl2-dev libjsoncpp-dev libwebsockets-dev ccache ninja-build pixz libbenchmark-dev - name: Zero ccache stats run: ccache -z @@ -207,7 +207,7 @@ jobs: architecture: x64 - name: Install Python runtime/test dependencies - run: python -m pip install tox quart selectors h2 grpcio protobuf websockets pytest + run: python -m pip install tox websockets pytest - name: Install Linux runtime/test dependencies if: ${{ runner.os == 'Linux' }} @@ -427,11 +427,7 @@ jobs: run: env -0 | sort -z | tr '\0' '\n' - name: Install Python runtime/test dependencies - run: python3 -m pip install tox quart selectors h2 protobuf websockets pytest - - - name: Install Python runtime/test dependencies (Fedora) - if: ${{ matrix.container == 'fedora' }} - run: python3 -m pip install grpcio + run: python3 -m pip install tox websockets pytest - name: Install Linux runtime/test dependencies if: ${{ runner.os == 'Linux' }} @@ -505,7 +501,7 @@ jobs: - name: Install Linux build dependencies if: ${{ runner.os == 'Linux' }} run: | - sudo apt update; sudo apt install -y libqpid-proton-proactor1-dev python3-qpid-proton libpython3-dev libwebsockets-dev libnghttp2-dev ninja-build + sudo apt update; sudo apt install -y libqpid-proton-proactor1-dev python3-qpid-proton libpython3-dev libwebsockets-dev ninja-build - name: Install Linux docs dependencies if: ${{ runner.os == 'Linux' }} diff --git a/.travis.yml b/.travis.yml index 08947b9..f204e61 100644 --- a/.travis.yml +++ b/.travis.yml @@ -79,16 +79,9 @@ jobs: - sudo apt-get install -y python3-pip - python3 -m pip install --user --upgrade pip - python3 -m pip install --user tox virtualenv - # Install quart, h2 to run the http2 tests. - - python3 -m pip install --user quart h2 - # DISPATCH-1883: Install selectors to run tcp echo server/client tools - - python3 -m pip install --user selectors - # Install grpcio and protobuf to run the grpc tests. - - python3 -m pip install --user grpcio protobuf env: - QPID_SYSTEM_TEST_TIMEOUT=300 - QPID_SYSTEM_TEST_SKIP_FALLBACK_SWITCHOVER_TEST=True - - QPID_SYSTEM_TEST_SKIP_HTTP2_LARGE_IMAGE_UPLOAD_TEST=True - CC=clang-13 - CXX=clang++-13 - PATH="/usr/bin:$PATH" PROTON_VERSION=0.36.0 BUILD_TYPE=RelWithDebInfo @@ -103,15 +96,7 @@ jobs: # Update pip, it may prevent issues later - python3 -m pip install --user --upgrade pip - python3 -m pip install --user tox virtualenv - # Install quart to run the http2 tests. - - python3 -m pip install --user quart - # DISPATCH-1883: Install selectors to run tcp echo server/client tools - - python3 -m pip install --user selectors - # Install grpcio and protobuf to run the grpc tests. - # Installation on s390x currently broken https://github.com/grpc/grpc/pull/25363 # Binary wheel is not available in PyPI for s390x and source install requires fetching git submodules first - - python3 -m pip install --user protobuf - - sudo apt install python3-grpcio env: - QPID_SYSTEM_TEST_TIMEOUT=300 - QPID_SYSTEM_TEST_SKIP_FALLBACK_SWITCHOVER_TEST=True @@ -129,12 +114,6 @@ jobs: # Update pip, it may prevent issues later - python3 -m pip install --user --upgrade pip - python3 -m pip install --user tox virtualenv - # Install quart to run the http2 tests. - - python3 -m pip install --user quart - # DISPATCH-1883: Install selectors to run tcp echo server/client tools - - python3 -m pip install --user selectors - # Install grpcio and protobuf to run the grpc tests. - - python3 -m pip install --user grpcio protobuf env: - QPID_SYSTEM_TEST_TIMEOUT=300 - QPID_SYSTEM_TEST_SKIP_FALLBACK_SWITCHOVER_TEST=True @@ -158,18 +137,11 @@ jobs: - sudo apt-get install -y python3-pip - python3 -m pip install --user --upgrade pip - python3 -m pip install --user tox virtualenv - # Install quart to run the http2 tests. - - python3 -m pip install --user quart - # DISPATCH-1883: Install selectors to run tcp echo server/client tools - - python3 -m pip install --user selectors - # Install grpcio and protobuf to run the grpc tests. - - python3 -m pip install --user grpcio protobuf env: - CC=gcc-11 - CXX=g++-11 - QPID_SYSTEM_TEST_TIMEOUT=300 - QPID_SYSTEM_TEST_SKIP_FALLBACK_SWITCHOVER_TEST=True - - QPID_SYSTEM_TEST_SKIP_HTTP2_LARGE_IMAGE_UPLOAD_TEST=True - PATH="/usr/bin:$PATH" PROTON_VERSION=0.36.0 - DISPATCH_CMAKE_ARGS='-DRUNTIME_CHECK=tsan' - name: "qdrouterd:RelWithDebInfo+MemoryDebug (clang on macOS)" @@ -181,11 +153,10 @@ jobs: - PATH="/opt/local/bin:/opt/local/sbin:/usr/local/bin:$PATH" PROTON_VERSION=main - DISPATCH_CMAKE_ARGS='-DRUNTIME_CHECK=asan -DCMAKE_C_FLAGS=-DQD_MEMORY_DEBUG -DQD_ENABLE_ASSERTIONS=ON -DDISPATCH_TEST_TIMEOUT=500' # exclude tests that require raw_connection functionality; not available in libuv proactor - - DISPATCH_CTEST_EXTRA='-E system_tests_tcp_adaptor|system_tests_http1_adaptor|system_tests_http2|system_tests_grpc|system_tests_http1_over_tcp' before_install: - bash ./macports.sh - export COLUMNS=80 - - yes | sudo port install cmake swig swig-python libuv jsoncpp libwebsockets nghttp2 cyrus-sasl2 pkgconfig python37 py37-pip + - yes | sudo port install cmake swig swig-python libuv jsoncpp libwebsockets cyrus-sasl2 pkgconfig python37 py37-pip # set aliases for CMake's PythonInterp and PythonLibs to find MacPort's `python` on the path first - sudo port select --set python python37 - sudo port select --set python3 python37 @@ -201,7 +172,6 @@ addons: - libssl-dev - sasl2-bin - swig - - libnghttp2-dev # documentation - asciidoc - asciidoctor diff --git a/CMakeLists.txt b/CMakeLists.txt index 914aaf6..e2e098e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -118,11 +118,6 @@ message(STATUS "Found Proton: ${Proton_LIBRARIES} (found version \"${Proton_VERS # google benchmark tests are disabled by default OPTION(BUILD_BENCHMARKS "Enable building and running benchmarks with Google Benchmark" OFF) -# http2 support is optional -find_package(libnghttp2 1.33.0) -CMAKE_DEPENDENT_OPTION(USE_LIBNGHTTP2 "Use libnghttp2 for HTTP/2 support" ON - "libnghttp2_FOUND" OFF) - # Web Sockets find_package(LibWebSockets 3.0.1) CMAKE_DEPENDENT_OPTION(USE_LIBWEBSOCKETS "Use libwebsockets for WebSocket support" ON diff --git a/README.adoc b/README.adoc index 2e8e8f4..3a5be2b 100644 --- a/README.adoc +++ b/README.adoc @@ -48,7 +48,6 @@ packages installed: - python3-devel - cyrus-sasl-plain - cyrus-sasl-devel -- libnghttp2-devel - asciidoc (for building docs) - asciidoctor (for building docs) @@ -93,24 +92,6 @@ $ ./run.py === Test-only dependencies -The HTTP2 system tests (`tests/system_tests_http2.py`) use the Python Quart and hyper-h2 frameworks to start a HTTP2 server. -The HTTP2 system tests will run only if - -1. Python version >= 3.7 -2. Python Web Microframework Quart version >= 0.13 -3. curl is available -4. hyper-h2 is available (pure-Python implementation of a HTTP/2 protocol stack) - -The TCP system tests (tests/system_tests_tcp_adaptor.py) use the -Python selectors module when running echo clients and servers. -The TCP system tests run only if Python selectors is available. - -.Install quart, h2 and selectors -[source,shell script] ----- -pip3 install --user quart h2 selectors ----- - Websocket system tests use the Python websockets asyncio module. .Install websockets @@ -119,29 +100,6 @@ Websocket system tests use the Python websockets asyncio module. pip3 install --user websockets ---- -The gRPC system tests (tests/system_tests_grpc.py) use grpcio and protobuf modules. - -.Install gRPC libraries -[source,shell script] ----- -pip3 install --user grpcio protobuf ----- - -In order to regenerate the auto generated pb2 files used by system_tests_grpc.py, -you must also install the following dependency: - -[source,shell script] ----- -pip3 install --user grpcio-tools ----- - -And run the following command to generate grpc code: - -[source,shell script] ----- -python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. ./friendship.proto ----- - The system tests are implemented using Python's unittest library. This library is used to run the tests by default. The tests can be also run using `xmlrunner` or `pytest`. Pytest can generate a JUnit-compatible XML report containing an entry for each Python test method. diff --git a/cmake/Findlibnghttp2.cmake b/cmake/Findlibnghttp2.cmake deleted file mode 100644 index f21ff39..0000000 --- a/cmake/Findlibnghttp2.cmake +++ /dev/null @@ -1,47 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Sets LIBNGHTTP2_VERSION_STRING from nghttp2ver.h - -find_library(NGHTTP2_LIBRARIES - NAMES libnghttp2 nghttp2 -) - -find_path(NGHTTP2_INCLUDE_DIRS - NAMES "nghttp2/nghttp2.h" "nghttp2/nghttp2ver.h" - HINTS "${CMAKE_INSTALL_PREFIX}/include" - PATHS "/usr/include" -) - -if(NGHTTP2_INCLUDE_DIRS AND EXISTS "${NGHTTP2_INCLUDE_DIRS}/nghttp2/nghttp2ver.h") - # Extract the version info from nghttp2ver.h and set it in LIBNGHTTP2_VERSION_STRING - file(STRINGS "${NGHTTP2_INCLUDE_DIRS}/nghttp2/nghttp2ver.h" libnghttp2_version_str - REGEX "^#define[ \t]+NGHTTP2_VERSION[ \t]+\"[^\"]+\"") - string(REGEX REPLACE "^#define[ \t]+NGHTTP2_VERSION[ \t]+\"([^\"]+)\".*" "\\1" - LIBNGHTTP2_VERSION_STRING "${libnghttp2_version_str}") - unset(libnghttp2_version_str) -endif() - -if (LIBNGHTTP2_VERSION_STRING AND libnghttp2_FIND_VERSION AND (LIBNGHTTP2_VERSION_STRING VERSION_LESS libnghttp2_FIND_VERSION)) - message(STATUS "Found libnghttp2 version ${LIBNGHTTP2_VERSION_STRING} but at least ${libnghttp2_FIND_VERSION} is required. http2 support is disabled") -else() - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args( - libnghttp2 DEFAULT_MSG LIBNGHTTP2_VERSION_STRING NGHTTP2_LIBRARIES NGHTTP2_INCLUDE_DIRS) -endif() diff --git a/dockerfiles/Dockerfile-fedora b/dockerfiles/Dockerfile-fedora index 6af7335..dd00631 100644 --- a/dockerfiles/Dockerfile-fedora +++ b/dockerfiles/Dockerfile-fedora @@ -30,9 +30,7 @@ FROM fedora:latest MAINTAINER "d...@qpid.apache.org" # Install required packages. Some in this list are from proton's INSTALL.md (https://github.com/apache/qpid-proton/blob/main/INSTALL.md) and the rest are from dispatch (https://github.com/apache/qpid-dispatch/blob/main/README) -RUN dnf -y install gcc gcc-c++ cmake openssl-devel cyrus-sasl-devel cyrus-sasl-plain cyrus-sasl-gssapi cyrus-sasl-md5 swig java-1.8.0-openjdk-devel git make valgrind emacs libwebsockets-devel python-devel libnghttp2-devel curl - -RUN pip3 install quart grpcio protobuf h2 selectors +RUN dnf -y install gcc gcc-c++ cmake openssl-devel cyrus-sasl-devel cyrus-sasl-plain cyrus-sasl-gssapi cyrus-sasl-md5 swig java-1.8.0-openjdk-devel git make valgrind emacs libwebsockets-devel python-devel curl # Create a main directory and clone the qpid-proton repo from github RUN mkdir /main && cd /main && git clone https://gitbox.apache.org/repos/asf/qpid-proton.git && cd /main/qpid-proton && mkdir /main/qpid-proton/build diff --git a/dockerfiles/Dockerfile-ubuntu b/dockerfiles/Dockerfile-ubuntu index 26eef78..72a461f 100644 --- a/dockerfiles/Dockerfile-ubuntu +++ b/dockerfiles/Dockerfile-ubuntu @@ -26,11 +26,9 @@ MAINTAINER "d...@qpid.apache.org" ARG DEBIAN_FRONTEND=noninteractive # Install all the required packages. Some in this list were picked off from proton's INSTALL.md (https://github.com/apache/qpid-proton/blob/main/INSTALL.md) and the rest are from dispatch (https://github.com/apache/qpid-dispatch/blob/main/README) RUN apt-get update && \ - apt-get install -y curl gcc g++ automake libwebsockets-dev libtool zlib1g-dev cmake libsasl2-dev libssl-dev libnghttp2-dev python3-dev libuv1-dev sasl2-bin swig maven git && \ + apt-get install -y curl gcc g++ automake libwebsockets-dev libtool zlib1g-dev cmake libsasl2-dev libssl-dev python3-dev libuv1-dev sasl2-bin swig maven git && \ apt-get -y clean -RUN pip3 install quart selectors grpcio protobuf h2 - RUN git clone https://gitbox.apache.org/repos/asf/qpid-dispatch.git && cd /qpid-dispatch && git submodule add https://gitbox.apache.org/repos/asf/qpid-proton.git && git submodule update --init WORKDIR /qpid-dispatch diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a9908a7..7f243a9 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -245,8 +245,6 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/policy-2/policy-photoserver-sasl.sasldb D file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/policy-3/test-sender-receiver-limits.json DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/policy-3) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/policy-4/management-access.json DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/policy-4/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/authservice.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/TCP_echo_server.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/TCP_echo_client.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) # following install() functions will be called only if you do a make "install" install(FILES ${SYSTEM_TEST_FILES} @@ -274,4 +272,4 @@ endif() if(BUILD_BENCHMARKS) add_subdirectory(c_benchmarks) -endif() \ No newline at end of file +endif() diff --git a/tests/TCP_echo_client.py b/tests/TCP_echo_client.py deleted file mode 100755 index 05d58e1..0000000 --- a/tests/TCP_echo_client.py +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/env python - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import argparse -import selectors -import signal -import socket -import sys -from threading import Thread -import time -import traceback - -from system_test import Logger -from system_test import TIMEOUT - - -class GracefulExitSignaler: - kill_now = False - - def __init__(self): - signal.signal(signal.SIGINT, self.exit_gracefully) - signal.signal(signal.SIGTERM, self.exit_gracefully) - - def exit_gracefully(self, signum, frame): - self.kill_now = True - - -def split_chunk_for_display(raw_bytes): - """ - Given some raw bytes, return a display string - Only show the beginning and end of largish (2xMAGIC_SIZE) arrays. - :param raw_bytes: - :return: display string - """ - MAGIC_SIZE = 50 # Content repeats after chunks this big - used by echo client, too - if len(raw_bytes) > 2 * MAGIC_SIZE: - result = repr(raw_bytes[:MAGIC_SIZE]) + " ... " + repr(raw_bytes[-MAGIC_SIZE:]) - else: - result = repr(raw_bytes) - return result - - -class TcpEchoClient: - - def __init__(self, prefix, host, port, size, count, timeout, logger): - """ - :param host: connect to this host - :param port: connect to this port - :param size: size of individual payload chunks in bytes - :param count: number of payload chunks - :param strategy: "1" Send one payload; # TODO more strategies - Recv one payload - :param logger: Logger() object - :return: - """ - # Start up - self.sock = None - self.prefix = prefix - self.host = host - self.port = int(port) - self.size = size - self.count = count - self.timeout = timeout - self.logger = logger - self.keep_running = True - self.is_running = False - self.exit_status = None - self.error = None - self._thread = Thread(target=self.run) - self._thread.daemon = True - self._thread.start() - - def run(self): - self.logger.log("%s Client is starting up" % self.prefix) - try: - start_time = time.time() - self.is_running = True - self.logger.log('%s Connecting to host:%s, port:%d, size:%d, count:%d' % - (self.prefix, self.host, self.port, self.size, self.count)) - total_sent = 0 - total_rcvd = 0 - - if self.count > 0 and self.size > 0: - # outbound payload only if count and size both greater than zero - payload_out = [] - out_list_idx = 0 # current _out array being sent - out_byte_idx = 0 # next-to-send in current array - out_ready_to_send = True - # Generate unique content for each message so you can tell where the message - # or fragment belongs in the whole stream. Chunks look like: - # b'[localhost:33333:6:0]ggggggggggggggggggggggggggggg' - # host: localhost - # port: 33333 - # index: 6 - # offset into message: 0 - CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo server, too - for idx in range(self.count): - body_msg = "" - padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30] - while len(body_msg) < self.size: - chunk = "[%s:%d:%d:%d]" % (self.host, self.port, idx, len(body_msg)) - padlen = CONTENT_CHUNK_SIZE - len(chunk) - chunk += padchar * padlen - body_msg += chunk - if len(body_msg) > self.size: - body_msg = body_msg[:self.size] - payload_out.append(bytearray(body_msg.encode())) - # incoming payloads - payload_in = [] - in_list_idx = 0 # current _in array being received - for i in range(self.count): - payload_in.append(bytearray()) - else: - # when count or size .LE. zero then just connect-disconnect - self.keep_running = False - - # set up connection - host_address = (self.host, self.port) - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.connect(host_address) - self.sock.setblocking(False) - - # set up selector - sel = selectors.DefaultSelector() - sel.register(self.sock, - selectors.EVENT_READ | selectors.EVENT_WRITE) - - # event loop - while self.keep_running: - if self.timeout > 0.0: - elapsed = time.time() - start_time - if elapsed > self.timeout: - self.exit_status = "%s Exiting due to timeout. Total sent= %d, total rcvd= %d" % \ - (self.prefix, total_sent, total_rcvd) - break - for key, mask in sel.select(timeout=0.1): - sock = key.fileobj - if mask & selectors.EVENT_READ: - recv_data = sock.recv(1024) - if recv_data: - total_rcvd = len(recv_data) - payload_in[in_list_idx].extend(recv_data) - if len(payload_in[in_list_idx]) == self.size: - self.logger.log("%s Rcvd message %d" % (self.prefix, in_list_idx)) - in_list_idx += 1 - if in_list_idx == self.count: - # Received all bytes of all chunks - done. - self.keep_running = False - # Verify the received data - if payload_in != payload_out: - for idxc in range(self.count): - if not payload_in[idxc] == payload_out[idxc]: - for idxs in range(self.size): - ob = payload_out[idxc][idxs] - ib = payload_in[idxc][idxs] - if ob != ib: - self.error = "%s ERROR Rcvd message verify fail. row:%d, col:%d, " \ - "expected:%s, actual:%s" \ - % (self.prefix, idxc, idxs, repr(ob), repr(ib)) - break - else: - out_ready_to_send = True - sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE) - elif len(payload_in[in_list_idx]) > self.size: - self.error = "ERROR Received message too big. Expected:%d, actual:%d" % \ - (self.size, len(payload_in[in_list_idx])) - break - else: - pass # still accumulating a message - else: - # socket closed - self.keep_running = False - if not in_list_idx == self.count: - self.error = "ERROR server closed. Echoed %d of %d messages." % (in_list_idx, self.count) - if self.keep_running and mask & selectors.EVENT_WRITE: - if out_ready_to_send: - n_sent = self.sock.send(payload_out[out_list_idx][out_byte_idx:]) - total_sent += n_sent - out_byte_idx += n_sent - if out_byte_idx == self.size: - self.logger.log("%s Sent message %d" % (self.prefix, out_list_idx)) - out_byte_idx = 0 - out_list_idx += 1 - sel.modify(self.sock, selectors.EVENT_READ) # turn off write events - out_ready_to_send = False # turn on when rcvr receives - else: - pass # logger.log("DEBUG: ignoring EVENT_WRITE") - - # shut down - sel.unregister(self.sock) - self.sock.close() - - except Exception: - self.error = "ERROR: exception : '%s'" % traceback.format_exc() - self.sock.close() - - self.is_running = False - - def wait(self, timeout=TIMEOUT): - self.logger.log("%s Client is shutting down" % self.prefix) - self.keep_running = False - self._thread.join(timeout) - - -def main(argv): - retval = 0 - # parse args - p = argparse.ArgumentParser() - p.add_argument('--host', '-b', - help='Required target host') - p.add_argument('--port', '-p', type=int, - help='Required target port number') - p.add_argument('--size', '-s', type=int, default=100, const=1, nargs='?', - help='Size of payload in bytes must be >= 0. Size of zero connects and disconnects with no data traffic.') - p.add_argument('--count', '-c', type=int, default=1, const=1, nargs='?', - help='Number of payloads to process must be >= 0. Count of zero connects and disconnects with no data traffic.') - p.add_argument('--name', - help='Optional logger prefix') - p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?", - help='Timeout in seconds. Default value "0" disables timeouts') - p.add_argument('--log', '-l', - action='store_true', - help='Write activity log to console') - del argv[0] - args = p.parse_args(argv) - - # host - if args.host is None: - raise Exception("User must specify a host") - host = args.host - - # port - if args.port is None: - raise Exception("User must specify a port number") - port = args.port - - # size - if args.size < 0: - raise Exception("Size must be greater than or equal to zero") - size = args.size - - # count - if args.count < 0: - raise Exception("Count must be greater than or equal to zero") - count = args.count - - # name / prefix - prefix = args.name if args.name is not None else "ECHO_CLIENT (%d_%d_%d)" % \ - (port, size, count) - - # timeout - if args.timeout < 0.0: - raise Exception("Timeout must be greater than or equal to zero") - - signaller = GracefulExitSignaler() - logger = None - - try: - # logging - logger = Logger(title="%s host:%s port %d size:%d count:%d" % (prefix, host, port, size, count), - print_to_console=args.log, - save_for_dump=False) - - client = TcpEchoClient(prefix, host, port, size, count, args.timeout, logger) - - keep_running = True - while keep_running: - time.sleep(0.1) - if client.error is not None: - logger.log("%s Client stopped with error: %s" % (prefix, client.error)) - keep_running = False - retval = 1 - if client.exit_status is not None: - logger.log("%s Client stopped with status: %s" % (prefix, client.exit_status)) - keep_running = False - if signaller.kill_now: - logger.log("%s Process killed with signal" % prefix) - keep_running = False - if keep_running and not client.is_running: - logger.log("%s Client stopped with no error or status" % prefix) - keep_running = False - - except Exception: - client.error = "ERROR: exception : '%s'" % traceback.format_exc() - if logger is not None: - logger.log("%s Exception: %s" % (prefix, traceback.format_exc())) - retval = 1 - - if client.error is not None: - # write client errors to stderr - def eprint(*args, **kwargs): - print(*args, file=sys.stderr, **kwargs) - - elines = client.error.split("\n") - for line in elines: - eprint("ERROR:", prefix, line) - - return retval - - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/tests/TCP_echo_server.py b/tests/TCP_echo_server.py deleted file mode 100755 index 2da12d1..0000000 --- a/tests/TCP_echo_server.py +++ /dev/null @@ -1,397 +0,0 @@ -#!/usr/bin/env python - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import argparse -import selectors -import signal -import socket -import sys -import time -import traceback -from threading import Condition, Thread -from typing import Union - -from system_test import Logger -from system_test import TIMEOUT - - -class ClientRecord: - """ - Object to register with the selector 'data' field - for incoming user connections. This is *not* used - for the listening socket. - This object holds the socketId in the address and - the inbound and outbound data list buffers for this - socket's payload. - """ - def __init__(self, address): - self.addr = address - self.inb = b'' - self.outb = b'' - - def __repr__(self): - return str(self.addr) + " len(in)=" + str(len(self.inb)) + " len(out)=" + str(len(self.outb)) - - def __str__(self): - return self.__repr__() - - -class GracefulExitSignaler: - kill_now = False - - def __init__(self): - signal.signal(signal.SIGINT, self.exit_gracefully) - signal.signal(signal.SIGTERM, self.exit_gracefully) - - def exit_gracefully(self, signum, frame): - self.kill_now = True - - -def split_chunk_for_display(raw_bytes): - """ - Given some raw bytes, return a display string - Only show the beginning and end of largish (2x CONTENT_CHUNK_SIZE) arrays. - :param raw_bytes: - :return: display string - """ - CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo client, too - if len(raw_bytes) > 2 * CONTENT_CHUNK_SIZE: - result = repr(raw_bytes[:CONTENT_CHUNK_SIZE]) + " ... " + repr(raw_bytes[-CONTENT_CHUNK_SIZE:]) - else: - result = repr(raw_bytes) - return result - - -class TcpEchoServer: - - def __init__(self, prefix="ECHO_SERVER", port: Union[str, int] = "0", echo_count=0, timeout=0.0, logger=None, - conn_stall=0.0, close_on_conn=False, close_on_data=False) -> None: - """ - Start echo server in separate thread - - :param prefix: log prefix - :param port: port to listen on - :param echo_count: exit after echoing this many bytes - :param timeout: exit after this many seconds - :param logger: Logger() object - """ - self.sock: socket.socket - self.prefix = prefix - self.port = int(port) - self.echo_count = echo_count - self.timeout = timeout - self.logger = logger - self.conn_stall = conn_stall - self.close_on_conn = close_on_conn - self.close_on_data = close_on_data - self.keep_running = True - self.HOST = '127.0.0.1' - self._cv = Condition() - self._is_running = None - self.exit_status = None - self.error = None - self._thread = Thread(target=self.run) - self._thread.daemon = True - self._thread.start() - - @property - def is_running(self): - with self._cv: - self._cv.wait_for(lambda: self._is_running is not None, timeout=10) - return self._is_running - - @is_running.setter - def is_running(self, value): - with self._cv: - self._is_running = value - self._cv.notify_all() - - def get_listening_port(self) -> int: - address, port, *_ = self.sock.getsockname() - return port - - def run(self): - """ - Run server in daemon thread. - A single thread runs multiple sockets through selectors. - Note that timeouts and such are done in line and processing stops for - all sockets when one socket is timing out. For the intended one-at-a-time - test cases this works but it is not general solution for all cases. - :return: - """ - try: - # set up spontaneous exit settings - start_time = time.time() - total_echoed = 0 - - # set up listening socket - try: - self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.sock.bind((self.HOST, self.port)) - self.sock.listen() - if self.port == 0: - self.port = self.get_listening_port() - self.sock.setblocking(False) - self.logger.log('%s Listening on host:%s, port:%s' % (self.prefix, self.HOST, self.port)) - except Exception: - self.error = ('%s Opening listen socket %s:%s exception: %s' % - (self.prefix, self.HOST, self.port, traceback.format_exc())) - self.logger.log(self.error) - return 1 - - # notify whoever is waiting on the condition variable for this - self.is_running = True - - # set up selector - sel = selectors.DefaultSelector() - sel.register(self.sock, selectors.EVENT_READ, data=None) - - # event loop - while True: - if not self.keep_running: - self.exit_status = "INFO: command shutdown:" - break - if self.timeout > 0.0: - elapsed = time.time() - start_time - if elapsed > self.timeout: - self.exit_status = "Exiting due to timeout. Total echoed = %d" % total_echoed - break - if self.echo_count > 0: - if total_echoed >= self.echo_count: - self.exit_status = "Exiting due to echo byte count. Total echoed = %d" % total_echoed - break - events = sel.select(timeout=0.1) - if events: - for key, mask in events: - if key.data is None: - if key.fileobj is self.sock: - self.do_accept(key.fileobj, sel, self.logger, self.conn_stall, self.close_on_conn) - else: - pass # Only listener 'sock' has None in opaque data field - else: - n_echoed = self.do_service(key, mask, sel, self.logger, self.close_on_data) - total_echoed += n_echoed if n_echoed > 0 else 0 - else: - pass # select timeout. probably. - - sel.unregister(self.sock) - self.sock.close() - - except Exception: - self.error = "ERROR: exception : '%s'" % traceback.format_exc() - - self.is_running = False - - def do_accept(self, sock, sel, logger, conn_stall, close_on_conn): - conn, addr = sock.accept() - logger.log('%s Accepted connection from %s:%d' % (self.prefix, addr[0], addr[1])) - if conn_stall > 0.0: - logger.log('%s Connection from %s:%d stall start' % (self.prefix, addr[0], addr[1])) - time.sleep(conn_stall) - logger.log('%s Connection from %s:%d stall end' % (self.prefix, addr[0], addr[1])) - if close_on_conn: - logger.log('%s Connection from %s:%d closing due to close_on_conn' % (self.prefix, addr[0], addr[1])) - conn.close() - return - conn.setblocking(False) - events = selectors.EVENT_READ | selectors.EVENT_WRITE - sel.register(conn, events, data=ClientRecord(addr)) - - def do_service(self, key, mask, sel, logger, close_on_data): - retval = 0 - sock = key.fileobj - data = key.data - if mask & selectors.EVENT_READ: - try: - recv_data = sock.recv(1024) - except IOError: - logger.log('%s Connection to %s:%d IOError: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - sel.unregister(sock) - sock.close() - return 0 - except Exception: - self.error = ('%s Connection to %s:%d exception: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - logger.log(self.error) - sel.unregister(sock) - sock.close() - return 1 - if recv_data: - data.outb += recv_data - if close_on_data: - logger.log('%s Connection to %s:%d closed due to close_on_data' % (self.prefix, data.addr[0], data.addr[1])) - sel.unregister(sock) - sock.close() - return 0 - logger.log('%s read from: %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], len(recv_data), - split_chunk_for_display(recv_data))) - sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, data=data) - else: - while data.outb: - logger.log('%s Client closed: flush client input to %s:%d' % (self.prefix, data.addr[0], data.addr[1])) - try: - sent = sock.send(data.outb) - data.outb = data.outb[sent:] - except IOError: - logger.log('%s Connection to %s:%d IOError: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - sel.unregister(sock) - sock.close() - return 0 - except Exception: - self.error = ('%s Connection to %s:%d exception: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - logger.log(self.error) - sel.unregister(sock) - sock.close() - return 1 - logger.log('%s Client closed: closing connection to %s:%d' % (self.prefix, data.addr[0], data.addr[1])) - sel.unregister(sock) - sock.close() - return 0 - if mask & selectors.EVENT_WRITE: - if data.outb: - try: - sent = sock.send(data.outb) - except IOError: - logger.log('%s Connection to %s:%d IOError: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - sel.unregister(sock) - sock.close() - return 0 - except Exception: - self.error = ('%s Connection to %s:%d exception: %s' % - (self.prefix, data.addr[0], data.addr[1], traceback.format_exc())) - logger.log(self.error) - sel.unregister(sock) - sock.close() - return 1 - retval += sent - if sent > 0: - logger.log('%s write to : %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], sent, - split_chunk_for_display(data.outb[:sent]))) - else: - logger.log('%s write to : %s:%d len:0' % (self.prefix, data.addr[0], data.addr[1])) - data.outb = data.outb[sent:] - else: - sel.modify(sock, selectors.EVENT_READ, data=data) - return retval - - def wait(self, timeout=TIMEOUT): - self.logger.log("%s Server is shutting down" % self.prefix) - self.keep_running = False - self._thread.join(timeout) - - -def main(argv): - retval = 0 - logger = None - # parse args - p = argparse.ArgumentParser() - p.add_argument('--port', '-p', - help='Required listening port number') - p.add_argument('--name', - help='Optional logger prefix') - p.add_argument('--echo', '-e', type=int, default=0, const=1, nargs="?", - help='Exit after echoing this many bytes. Default value "0" disables exiting on byte count.') - p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?", - help='Timeout in seconds. Default value "0.0" disables timeouts') - p.add_argument('--log', '-l', - action='store_true', - help='Write activity log to console') - # Add controlled server misbehavior for testing conditions seen in the field - # Stall required to trigger Q2 testing for DISPATCH-1947 and improving test DISPATCH-1981 - p.add_argument('--connect-stall', type=float, default=0.0, const=1, nargs="?", - help='Accept connections but wait this many seconds before reading from socket. Default value "0.0" disables stall') - # Close on connect - exercises control paths scrutinized under DISPATCH-1968 - p.add_argument('--close-on-connect', - action='store_true', - help='Close client connection without reading from socket when listener connects. If stall is specified then stall before closing.') - # Close on data - exercises control paths scrutinized under DISPATCH-1968 - p.add_argument('--close-on-data', - action='store_true', - help='Close client connection as soon as data arrives.') - del argv[0] - args = p.parse_args(argv) - - # port - if args.port is None: - raise Exception("User must specify a port number") - port = args.port - - # name / prefix - prefix = args.name if args.name is not None else "ECHO_SERVER (%s)" % (str(port)) - - # echo - if args.echo < 0: - raise Exception("Echo count must be greater than zero") - - # timeout - if args.timeout < 0.0: - raise Exception("Timeout must be greater than or equal to zero") - - # timeout - if args.connect_stall < 0.0: - raise Exception("Connect-stall must be greater than or equal to zero") - - signaller = GracefulExitSignaler() - server = None - - try: - # logging - logger = Logger(title="%s port %s" % (prefix, port), - print_to_console=args.log, - save_for_dump=False) - - server = TcpEchoServer(prefix, port, args.echo, args.timeout, logger, - args.connect_stall, args.close_on_connect, args.close_on_data) - - keep_running = True - while keep_running: - time.sleep(0.1) - if server.error is not None: - logger.log("%s Server stopped with error: %s" % (prefix, server.error)) - keep_running = False - retval = 1 - if server.exit_status is not None: - logger.log("%s Server stopped with status: %s" % (prefix, server.exit_status)) - keep_running = False - if signaller.kill_now: - logger.log("%s Process killed with signal" % prefix) - keep_running = False - if keep_running and not server.is_running: - logger.log("%s Server stopped with no error or status" % prefix) - keep_running = False - - except Exception: - if logger is not None: - logger.log("%s Exception: %s" % (prefix, traceback.format_exc())) - retval = 1 - - if server is not None and server.sock is not None: - server.sock.close() - - return retval - - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/tests/http2_slow_q2_server.py b/tests/http2_slow_q2_server.py deleted file mode 100644 index e2c6a3e..0000000 --- a/tests/http2_slow_q2_server.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import socket -import signal -import sys -import os -import h2.connection -import h2.events -import h2.config -import h2.errors - -BYTES = 16384 - - -def receive_signal(signalNumber, frame): - print('Received:', signalNumber) - sys.exit(0) - - -def send_response(event, conn): - """ - conn.close_connection() sends a goaway frame to the client - and closes the connection. - """ - stream_id = event.stream_id - conn.send_headers(stream_id=stream_id, - headers=[(':status', '200'), ('server', 'h2_slow_q2_server/0.1.0')]) - conn.send_data(stream_id=stream_id, - data=b'Success!', - end_stream=True) - - -def handle_events(conn, events): - for event in events: - if isinstance(event, h2.events.DataReceived): - # When the server receives a DATA frame from the router, we send back a WINDOW_UPDATE frame - # with a window size increment of only 1k (1024 bytes) - # This pushes the router into q2 since it is able to only send two qd_buffers at a time. - conn.increment_flow_control_window(1024, None) - conn.increment_flow_control_window(1024, event.stream_id) - elif isinstance(event, h2.events.StreamEnded): - send_response(event, conn) - - -def handle(sock): - config = h2.config.H2Configuration(client_side=False) - - # The default initial window per HTTP2 spec is 64K. - # That means that the router is allowed to send only 64k before it needs more WINDOW_UPDATE frames - # providing more credit for the router to send more data. - conn = h2.connection.H2Connection(config=config) - conn.initiate_connection() - sock.sendall(conn.data_to_send()) - - while True: - data = None - try: - data = sock.recv(BYTES) - except: - pass - if not data: - break - try: - events = conn.receive_data(data) - except Exception as e: - print(e) - break - handle_events(conn, events) - data_to_send = conn.data_to_send() - if data_to_send: - sock.sendall(data_to_send) - - -def main(): - signal.signal(signal.SIGHUP, receive_signal) - signal.signal(signal.SIGINT, receive_signal) - signal.signal(signal.SIGQUIT, receive_signal) - signal.signal(signal.SIGILL, receive_signal) - signal.signal(signal.SIGTERM, receive_signal) - - port = os.getenv('SERVER_LISTEN_PORT') - if port is None: - raise RuntimeError("Environment variable `SERVER_LISTEN_PORT` is not set.") - - sock = socket.socket() - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.bind(('0.0.0.0', int(port))) - sock.listen(5) - - while True: - # The accept method blocks until someone attempts to connect to our TCP - # port: when they do, it returns a tuple: the first element is a new - # socket object, the second element is a tuple of the address the new - # connection is from - handle(sock.accept()[0]) - - -if __name__ == '__main__': - main() diff --git a/tests/tox.ini.in b/tests/tox.ini.in index 96786c2..237c9e5 100644 --- a/tests/tox.ini.in +++ b/tests/tox.ini.in @@ -139,11 +139,6 @@ ignore = W503, W504, -exclude = - # TODO(DISPATCH-1974) generated by gRPC tooling - friendship_pb2.py, - friendship_pb2_grpc.py, - [pytest] # https://github.com/pytest-dev/pytest/blob/main/src/_pytest/junitxml.py # do not filter test file names @@ -260,24 +255,6 @@ ignore_missing_imports = True [mypy-werkzeug.*] ignore_missing_imports = True -[mypy-selectors] -ignore_missing_imports = True - -[mypy-h2.*] -ignore_missing_imports = True - -[mypy-google.protobuf] -ignore_missing_imports = True - -[mypy-grpc] -ignore_missing_imports = True - -[mypy-grpcio] -ignore_missing_imports = True - -[mypy-protobuf] -ignore_missing_imports = True - [mypy-websockets] ignore_missing_imports = True --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@qpid.apache.org For additional commands, e-mail: commits-h...@qpid.apache.org