Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package cpp-httplib for openSUSE:Factory checked in at 2026-05-06 19:17:51 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/cpp-httplib (Old) and /work/SRC/openSUSE:Factory/.cpp-httplib.new.30200 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "cpp-httplib" Wed May 6 19:17:51 2026 rev:19 rq:1351009 version:0.43.3 Changes: -------- --- /work/SRC/openSUSE:Factory/cpp-httplib/cpp-httplib.changes 2026-04-28 11:53:57.975514111 +0200 +++ /work/SRC/openSUSE:Factory/.cpp-httplib.new.30200/cpp-httplib.changes 2026-05-06 19:18:40.663014884 +0200 @@ -1,0 +2,19 @@ +Tue May 5 15:20:24 UTC 2026 - Marius Grossu <[email protected]> + +- Updte to 0.43.3: + * Fix OSS-Fuzz #508342856: cap Content-Length reservation by payload_max_length_ to prevent excessive memory allocation (2d2efe4) + * Fix OSS-Fuzz #508087118: avoid stack overflow in str2tag (92aecf8) + * Fuzzing / tests + * Run all fuzzers via make fuzz_test (cae7534) + * Add OSS-Fuzz #508370122 reproducer to client_fuzzer corpus (b223e29) + * Make fuzz_test robust to missing corpus files (35c4026) + * Drop Str2tagTest unit test that broke split / -fno-exceptions builds (f6524c0) + * Document str2tag_core's compile-time-only role (40e1846) +- 0.43.2: + * Reproducer test for #2431 (getaddrinfo_a use-after-free) + * Fix #2431: drop getaddrinfo_a path (stack-use-after-free) + * Add client fuzzing harness + * Fix #2435: allow mmap to open files held open for writing + * Re-enable getaddrinfo_a with worker-completion wait (#2431) + +------------------------------------------------------------------- Old: ---- cpp-httplib-0.43.1.tar.gz New: ---- cpp-httplib-0.43.3.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ cpp-httplib.spec ++++++ --- /var/tmp/diff_new_pack.ixw5KU/_old 2026-05-06 19:18:41.363043732 +0200 +++ /var/tmp/diff_new_pack.ixw5KU/_new 2026-05-06 19:18:41.367043897 +0200 @@ -20,7 +20,7 @@ %define sover 0.43 %define libver 0_43 Name: cpp-httplib -Version: 0.43.1 +Version: 0.43.3 Release: 0 Summary: A C++11 HTTP/HTTPS server and client library License: MIT ++++++ cpp-httplib-0.43.1.tar.gz -> cpp-httplib-0.43.3.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/.github/workflows/test.yaml new/cpp-httplib-0.43.3/.github/workflows/test.yaml --- old/cpp-httplib-0.43.1/.github/workflows/test.yaml 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/.github/workflows/test.yaml 2026-05-04 09:19:49.000000000 +0200 @@ -25,7 +25,9 @@ cancel-in-progress: true env: - GTEST_FILTER: ${{ github.event.inputs.gtest_filter || '*' }} + # Exclude *_Online tests by default — they hit external services and flake on + # CI runners. Run with workflow_dispatch + a custom filter to include them. + GTEST_FILTER: ${{ github.event.inputs.gtest_filter || '-*_Online' }} jobs: style-check: @@ -75,6 +77,7 @@ github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) || (github.event_name == 'workflow_dispatch' && github.event.inputs.test_linux == 'true') strategy: + fail-fast: false matrix: tls_backend: [openssl, mbedtls, wolfssl] name: ubuntu (${{ matrix.tls_backend }}) @@ -101,7 +104,10 @@ LSAN_OPTIONS: suppressions=lsan_suppressions.txt - name: build and run tests (Mbed TLS) if: matrix.tls_backend == 'mbedtls' - run: cd test && make test_split_mbedtls && make test_mbedtls_parallel + # Run mbedTLS shards with reduced parallelism — under ASAN+mbedTLS the + # default 4 shards overload CI runners enough that timing-sensitive + # ServerTest cases flake on first-request keep-alive reuse. + run: cd test && make test_split_mbedtls && SHARDS=2 make test_mbedtls_parallel - name: build and run tests (wolfSSL) if: matrix.tls_backend == 'wolfssl' run: cd test && make test_split_wolfssl && make test_wolfssl_parallel @@ -114,6 +120,92 @@ - name: build and run ThreadPool test run: cd test && make test_thread_pool && ./test_thread_pool + # Reproducer for https://github.com/yhirose/cpp-httplib/issues/2431. + # On Linux/glibc, getaddrinfo_with_timeout() schedules an asynchronous + # DNS lookup with getaddrinfo_a(GAI_NOWAIT) using a stack-local gaicb. + # When gai_suspend() hits the connection timeout, gai_cancel() is called + # but does not block; the resolver worker can later write back into the + # destroyed stack frame. To make the worker actually reach that write, + # the test job runs a loopback UDP responder (test/dns_test_fixture.py) + # that delays its reply past the test's 1s timeout, and uses an iptables + # NAT rule so glibc's lookups land on that fixture instead of a real + # nameserver. With ASAN's detect_stack_use_after_return enabled, the + # late write-back is reported as a stack-use-after-return. + issue-2431-repro: + runs-on: ubuntu-latest + if: > + (github.event_name == 'push') || + (github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) || + (github.event_name == 'workflow_dispatch' && github.event.inputs.test_linux == 'true') + name: issue-2431 repro (Linux + ASAN) + # Bound the whole job in case anything in the test harness hangs + # unexpectedly. With the fixture in place a normal run is well under + # a minute either way (ASAN abort on broken HEAD, clean pass on fix). + timeout-minutes: 5 + env: + DNS_FIXTURE_PORT: "15353" + DNS_FIXTURE_DELAY: "3" + steps: + - name: checkout + uses: actions/checkout@v4 + - name: install libraries + run: | + sudo apt-get update + sudo apt-get install -y libssl-dev zlib1g-dev libbrotli-dev \ + libzstd-dev libcurl4-openssl-dev iptables util-linux iproute2 + - name: start loopback DNS test fixture + run: | + # Force glibc through its DNS code path: Ubuntu's default + # nsswitch short-circuits to NOTFOUND through mdns4_minimal, + # which would skip the buggy code entirely. + sudo sed -i 's/^hosts:.*/hosts: dns/' /etc/nsswitch.conf + # Run the loopback fixture (delayed UDP responder). + python3 test/dns_test_fixture.py "$DNS_FIXTURE_PORT" "$DNS_FIXTURE_DELAY" \ + >/tmp/dns_fixture.log 2>&1 & + echo $! | sudo tee /tmp/dns_fixture.pid >/dev/null + # Wait for the fixture to start listening. + for _ in $(seq 1 50); do + if ss -lun "( sport = :$DNS_FIXTURE_PORT )" | grep -q ":$DNS_FIXTURE_PORT"; then + break + fi + sleep 0.1 + done + ss -lun "( sport = :$DNS_FIXTURE_PORT )" | grep -q ":$DNS_FIXTURE_PORT" \ + || { echo "fixture failed to start"; cat /tmp/dns_fixture.log; exit 1; } + # Send the test process's DNS lookups to the loopback fixture. + # NAT only the local OUTPUT chain; conntrack handles the reply path. + sudo iptables -t nat -I OUTPUT -p udp --dport 53 \ + -j REDIRECT --to-port "$DNS_FIXTURE_PORT" + # Sanity check: a query must take at least the fixture delay + # and resolve to NXDOMAIN (proving traffic reaches the fixture). + start=$(date +%s) + getent hosts unresolvable-host.invalid >/dev/null 2>&1 || true + elapsed=$(( $(date +%s) - start )) + if [ "$elapsed" -lt 2 ]; then + echo "ERROR: lookup returned in ${elapsed}s; fixture not in path" >&2 + exit 1 + fi + echo "[ok] DNS lookups are routed to the test fixture (took ${elapsed}s)" + - name: build test binary + run: cd test && make test + - name: run GetAddrInfoAsyncCancelTest + run: | + cd test + ARCH=$(uname -m) + CPPHTTPLIB_TEST_ISSUE_2431=1 \ + ASAN_OPTIONS=detect_stack_use_after_return=1 \ + LSAN_OPTIONS=suppressions=lsan_suppressions.txt \ + setarch "$ARCH" -R \ + ./test --gtest_filter='GetAddrInfoAsyncCancelTest.*' + - name: tear down test fixture + if: always() + run: | + sudo iptables -t nat -F OUTPUT || true + if [ -f /tmp/dns_fixture.pid ]; then + sudo kill "$(cat /tmp/dns_fixture.pid)" 2>/dev/null || true + fi + macos: runs-on: macos-latest if: > @@ -122,6 +214,7 @@ github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) || (github.event_name == 'workflow_dispatch' && github.event.inputs.test_macos == 'true') strategy: + fail-fast: false matrix: tls_backend: [openssl, mbedtls, wolfssl] name: macos (${{ matrix.tls_backend }}) @@ -141,7 +234,10 @@ LSAN_OPTIONS: suppressions=lsan_suppressions.txt - name: build and run tests (Mbed TLS) if: matrix.tls_backend == 'mbedtls' - run: cd test && make test_split_mbedtls && make test_mbedtls_parallel + # macOS runners under ASAN+mbedTLS still flake at SHARDS=2 (rapid + # bind/connect on the fixture's fixed port races on the slower + # macos-latest runner). Serialize fully here; ubuntu stays at 2. + run: cd test && make test_split_mbedtls && SHARDS=1 make test_mbedtls_parallel - name: build and run tests (wolfSSL) if: matrix.tls_backend == 'wolfssl' run: cd test && make test_split_wolfssl && make test_wolfssl_parallel @@ -162,6 +258,7 @@ github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) || (github.event_name == 'workflow_dispatch' && github.event.inputs.test_windows == 'true') strategy: + fail-fast: false matrix: config: - with_ssl: false @@ -228,7 +325,7 @@ for ($i = 0; $i -lt $shards; $i++) { $log = "shard_${i}.log" $procs += Start-Process -FilePath ./Release/httplib-test.exe ` - -ArgumentList "--gtest_color=yes","--gtest_filter=${{ github.event.inputs.gtest_filter || '*' }}" ` + -ArgumentList "--gtest_color=yes","--gtest_filter=${{ github.event.inputs.gtest_filter || '-*_Online' }}" ` -NoNewWindow -PassThru -RedirectStandardOutput $log -RedirectStandardError "${log}.err" ` -Environment @{ GTEST_TOTAL_SHARDS="$shards"; GTEST_SHARD_INDEX="$i" } } @@ -236,11 +333,14 @@ $failed = $false for ($i = 0; $i -lt $shards; $i++) { $log = "shard_${i}.log" - if (Select-String -Path $log -Pattern "\[ PASSED \]" -Quiet) { + $proc = $procs[$i] + $hasPassed = Select-String -Path $log -Pattern "\[ PASSED \]" -Quiet + $hasFailed = Select-String -Path $log -Pattern "\[ FAILED \]" -Quiet + if ($hasPassed -and -not $hasFailed -and $proc.ExitCode -eq 0) { $passed = (Select-String -Path $log -Pattern "\[ PASSED \]").Line Write-Host "Shard ${i}: $passed" } else { - Write-Host "=== Shard $i FAILED ===" + Write-Host "=== Shard $i FAILED (exit=$($proc.ExitCode)) ===" Get-Content $log if (Test-Path "${log}.err") { Get-Content "${log}.err" } $failed = $true diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/.gitignore new/cpp-httplib-0.43.3/.gitignore --- old/cpp-httplib-0.43.1/.gitignore 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/.gitignore 2026-05-04 09:19:49.000000000 +0200 @@ -43,6 +43,9 @@ test/test_wolfssl test/test_no_tls test/server_fuzzer +test/client_fuzzer +test/header_parser_fuzzer +test/url_parser_fuzzer test/test_proxy test/test_proxy_mbedtls test/test_proxy_wolfssl diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/docs-src/config.toml new/cpp-httplib-0.43.3/docs-src/config.toml --- old/cpp-httplib-0.43.1/docs-src/config.toml 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/docs-src/config.toml 2026-05-04 09:19:49.000000000 +0200 @@ -4,7 +4,7 @@ [site] title = "cpp-httplib" -version = "0.43.1" +version = "0.43.3" hostname = "https://yhirose.github.io" base_path = "/cpp-httplib" footer_message = "© 2026 Yuji Hirose. All rights reserved." diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/httplib.h new/cpp-httplib-0.43.3/httplib.h --- old/cpp-httplib-0.43.1/httplib.h 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/httplib.h 2026-05-04 09:19:49.000000000 +0200 @@ -8,8 +8,8 @@ #ifndef CPPHTTPLIB_HTTPLIB_H #define CPPHTTPLIB_HTTPLIB_H -#define CPPHTTPLIB_VERSION "0.43.1" -#define CPPHTTPLIB_VERSION_NUM "0x002b01" +#define CPPHTTPLIB_VERSION "0.43.3" +#define CPPHTTPLIB_VERSION_NUM "0x002b03" #ifdef _WIN32 #if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0A00 @@ -5319,8 +5319,9 @@ auto wpath = u8string_to_wstring(path); if (wpath.empty()) { return false; } - hFile_ = ::CreateFile2(wpath.c_str(), GENERIC_READ, FILE_SHARE_READ, - OPEN_EXISTING, NULL); + hFile_ = + ::CreateFile2(wpath.c_str(), GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE, OPEN_EXISTING, NULL); if (hFile_ == INVALID_HANDLE_VALUE) { return false; } @@ -5907,56 +5908,50 @@ return 0; #elif defined(_GNU_SOURCE) && defined(__GLIBC__) && \ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 2)) - // Linux implementation using getaddrinfo_a for asynchronous DNS resolution - struct gaicb request; + // #2431: gai_cancel() is non-blocking and may return EAI_NOTCANCELED while + // the resolver worker still references the stack-local gaicb. The cancel + // path therefore waits (gai_suspend with no timeout) for the worker to + // actually finish before letting the stack frame go. The trade-off is that + // a wedged DNS server can hold this thread for the system resolver timeout + // (~30s by default) past the caller's connection timeout. + struct gaicb request {}; struct gaicb *requests[1] = {&request}; - struct sigevent sevp; - struct timespec timeout; + struct sigevent sevp {}; + struct timespec timeout { + timeout_sec, 0 + }; - // Initialize the request structure - memset(&request, 0, sizeof(request)); request.ar_name = node; request.ar_service = service; request.ar_request = hints; - - // Set up timeout - timeout.tv_sec = timeout_sec; - timeout.tv_nsec = 0; - - // Initialize sigevent structure (not used, but required) - memset(&sevp, 0, sizeof(sevp)); sevp.sigev_notify = SIGEV_NONE; - // Start asynchronous resolution - int start_result = getaddrinfo_a(GAI_NOWAIT, requests, 1, &sevp); - if (start_result != 0) { return start_result; } + int rc = getaddrinfo_a(GAI_NOWAIT, requests, 1, &sevp); + if (rc != 0) { return rc; } - // Wait for completion with timeout - int wait_result = - gai_suspend((const struct gaicb *const *)requests, 1, &timeout); + auto cleanup = scope_exit([&] { + if (request.ar_result) { freeaddrinfo(request.ar_result); } + }); + + int wait_result = gai_suspend(requests, 1, &timeout); if (wait_result == 0 || wait_result == EAI_ALLDONE) { - // Completed successfully, get the result int gai_result = gai_error(&request); if (gai_result == 0) { *res = request.ar_result; + request.ar_result = nullptr; return 0; - } else { - // Clean up on error - if (request.ar_result) { freeaddrinfo(request.ar_result); } - return gai_result; - } - } else if (wait_result == EAI_AGAIN) { - // Timeout occurred, cancel the request - gai_cancel(&request); - return EAI_AGAIN; - } else { - // Other error occurred - gai_cancel(&request); - return wait_result; + } + return gai_result; + } + + gai_cancel(&request); + while (gai_error(&request) == EAI_INPROGRESS) { + gai_suspend(requests, 1, nullptr); } + return wait_result; #else - // Fallback implementation using thread-based timeout for other Unix systems + // Fallback implementation using thread-based timeout for other Unix systems. struct GetAddrInfoState { ~GetAddrInfoState() { @@ -6366,6 +6361,10 @@ } } +// Recursive form retained so operator""_t below can compute hashes for +// switch-case labels at compile time (C++11 constexpr forbids loops). Do not +// call from runtime paths with arbitrary-length inputs — use str2tag() +// instead, which is iterative and stack-safe. inline constexpr unsigned int str2tag_core(const char *s, size_t l, unsigned int h) { return (l == 0) @@ -6379,7 +6378,16 @@ } inline unsigned int str2tag(const std::string &s) { - return str2tag_core(s.data(), s.size(), 0); + // Iterative form of str2tag_core: the recursive constexpr version is kept + // for compile-time UDL evaluation of short string literals, but at runtime + // we may receive arbitrarily long inputs (e.g. fuzzed Content-Type) that + // would blow the stack with one frame per character. + unsigned int h = 0; + for (auto c : s) { + h = (((std::numeric_limits<unsigned int>::max)() >> 6) & h * 33) ^ + static_cast<unsigned char>(c); + } + return h; } namespace udl { @@ -13637,7 +13645,15 @@ output_error_log(error, &req); return false; } - res.body.reserve(static_cast<size_t>(len)); + // Cap the reservation by payload_max_length_ to avoid OOM when a + // hostile or malformed server sends an enormous Content-Length. + // The actual body read below is bounded by payload_max_length_, + // so reserving more than that is never useful. + auto reserve_len = static_cast<size_t>(len); + if (payload_max_length_ > 0 && reserve_len > payload_max_length_) { + reserve_len = payload_max_length_; + } + res.body.reserve(reserve_len); } } @@ -17997,6 +18013,9 @@ err.code = impl::map_mbedtls_error(ret, err.sys_errno); err.backend_code = static_cast<uint64_t>(-ret); impl::mbedtls_last_error() = ret; + // mbedTLS signals a clean close_notify via a negative error code rather + // than 0; surface it as a clean EOF the way OpenSSL/wolfSSL do. + if (err.code == ErrorCode::PeerClosed) { return 0; } return -1; } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/Makefile new/cpp-httplib-0.43.3/test/Makefile --- old/cpp-httplib-0.43.1/test/Makefile 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/test/Makefile 2026-05-04 09:19:49.000000000 +0200 @@ -67,22 +67,29 @@ define run_parallel @echo "Running $(1) with $(SHARDS) shards in parallel..." - @fail=0; \ + @fail=0; pids=""; \ for i in $$(seq 0 $$(($(SHARDS) - 1))); do \ GTEST_TOTAL_SHARDS=$(SHARDS) GTEST_SHARD_INDEX=$$i \ LSAN_OPTIONS=suppressions=lsan_suppressions.txt \ $(SETARCH) ./$(1) --gtest_color=yes > $(1)_shard_$$i.log 2>&1 & \ + pids="$$pids $$!"; \ done; \ - wait; \ - for i in $$(seq 0 $$(($(SHARDS) - 1))); do \ - if ! grep -q "\[ PASSED \]" $(1)_shard_$$i.log; then \ - echo "=== Shard $$i FAILED ==="; \ - cat $(1)_shard_$$i.log; \ - fail=1; \ - else \ - passed=$$(grep "\[ PASSED \]" $(1)_shard_$$i.log); \ + exits=""; \ + for pid in $$pids; do \ + wait $$pid; exits="$$exits $$?"; \ + done; \ + i=0; \ + for ec in $$exits; do \ + log=$(1)_shard_$$i.log; \ + if grep -q "\[ PASSED \]" $$log && ! grep -q "\[ FAILED \]" $$log && [ $$ec -eq 0 ]; then \ + passed=$$(grep "\[ PASSED \]" $$log); \ echo "Shard $$i: $$passed"; \ + else \ + echo "=== Shard $$i FAILED (exit=$$ec) ==="; \ + cat $$log; \ + fail=1; \ fi; \ + i=$$((i+1)); \ done; \ if [ $$fail -ne 0 ]; then exit 1; fi; \ echo "All shards passed." @@ -244,16 +251,38 @@ test_proxy_wolfssl : test_proxy.cc ../httplib.h Makefile cert.pem $(CXX) -o $@ -I.. $(CXXFLAGS) test_proxy.cc $(TEST_ARGS_WOLFSSL) -# Runs server_fuzzer.cc based on value of $(LIB_FUZZING_ENGINE). -# Usage: make fuzz_test LIB_FUZZING_ENGINE=/path/to/libFuzzer -fuzz_test: server_fuzzer - ./server_fuzzer fuzzing/corpus/* +# Runs all fuzz harnesses based on the value of $(LIB_FUZZING_ENGINE). +# By default LIB_FUZZING_ENGINE is standalone_fuzz_target_runner.o, so each +# fuzzer is replayed over its regression corpus. +# Override for actual fuzzing: +# make fuzz_test LIB_FUZZING_ENGINE=/path/to/libFuzzer +fuzz_test: server_fuzzer client_fuzzer header_parser_fuzzer url_parser_fuzzer + @m=""; for f in fuzzing/corpus/[0-9]* fuzzing/corpus/issue1264 fuzzing/corpus/clusterfuzz-testcase-minimized-server_fuzzer-*; do if [ -f "$$f" ]; then m="$$m $$f"; fi; done; \ + if [ -n "$$m" ]; then echo "./server_fuzzer$$m"; ./server_fuzzer $$m; else echo "(no server_fuzzer corpus)"; fi + @m=""; for f in fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-*; do if [ -f "$$f" ]; then m="$$m $$f"; fi; done; \ + if [ -n "$$m" ]; then echo "./client_fuzzer$$m"; ./client_fuzzer $$m; else echo "(no client_fuzzer corpus)"; fi + @m=""; for f in fuzzing/corpus/clusterfuzz-testcase-minimized-header_parser_fuzzer-*; do if [ -f "$$f" ]; then m="$$m $$f"; fi; done; \ + if [ -n "$$m" ]; then echo "./header_parser_fuzzer$$m"; ./header_parser_fuzzer $$m; else echo "(no header_parser_fuzzer corpus)"; fi + @m=""; for f in fuzzing/corpus/clusterfuzz-testcase-minimized-url_parser_fuzzer-*; do if [ -f "$$f" ]; then m="$$m $$f"; fi; done; \ + if [ -n "$$m" ]; then echo "./url_parser_fuzzer$$m"; ./url_parser_fuzzer $$m; else echo "(no url_parser_fuzzer corpus)"; fi # Fuzz target, so that you can choose which $(LIB_FUZZING_ENGINE) to use. server_fuzzer : fuzzing/server_fuzzer.cc ../httplib.h standalone_fuzz_target_runner.o $(CXX) -o $@ -I.. $(CXXFLAGS) $< $(OPENSSL_SUPPORT) $(ZLIB_SUPPORT) $(BROTLI_SUPPORT) $(LIB_FUZZING_ENGINE) $(ZSTD_SUPPORT) $(LIBS) @file $@ +client_fuzzer : fuzzing/client_fuzzer.cc ../httplib.h standalone_fuzz_target_runner.o + $(CXX) -o $@ -I.. $(CXXFLAGS) $< $(OPENSSL_SUPPORT) $(ZLIB_SUPPORT) $(BROTLI_SUPPORT) $(LIB_FUZZING_ENGINE) $(ZSTD_SUPPORT) $(LIBS) + @file $@ + +header_parser_fuzzer : fuzzing/header_parser_fuzzer.cc ../httplib.h standalone_fuzz_target_runner.o + $(CXX) -o $@ -I.. $(CXXFLAGS) $< $(OPENSSL_SUPPORT) $(ZLIB_SUPPORT) $(BROTLI_SUPPORT) $(LIB_FUZZING_ENGINE) $(ZSTD_SUPPORT) $(LIBS) + @file $@ + +url_parser_fuzzer : fuzzing/url_parser_fuzzer.cc ../httplib.h standalone_fuzz_target_runner.o + $(CXX) -o $@ -I.. $(CXXFLAGS) $< $(OPENSSL_SUPPORT) $(ZLIB_SUPPORT) $(BROTLI_SUPPORT) $(LIB_FUZZING_ENGINE) $(ZSTD_SUPPORT) $(LIBS) + @file $@ + # Standalone fuzz runner, which just reads inputs from fuzzing/corpus/ dir and # feeds it to server_fuzzer. standalone_fuzz_target_runner.o : fuzzing/standalone_fuzz_target_runner.cpp @@ -266,5 +295,5 @@ ./gen-certs.sh clean: - rm -rf test test_split test_mbedtls test_split_mbedtls test_wolfssl test_split_wolfssl test_no_tls, test_split_no_tls test_proxy test_proxy_mbedtls test_proxy_wolfssl test_benchmark server_fuzzer *.pem *.0 *.o *.1 *.srl httplib.h httplib.cc _build* *.dSYM *_shard_*.log cpp-httplib + rm -rf test test_split test_mbedtls test_split_mbedtls test_wolfssl test_split_wolfssl test_no_tls, test_split_no_tls test_proxy test_proxy_mbedtls test_proxy_wolfssl test_benchmark server_fuzzer client_fuzzer header_parser_fuzzer url_parser_fuzzer *.pem *.0 *.o *.1 *.srl httplib.h httplib.cc _build* *.dSYM *_shard_*.log cpp-httplib diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/dns_test_fixture.py new/cpp-httplib-0.43.3/test/dns_test_fixture.py --- old/cpp-httplib-0.43.1/test/dns_test_fixture.py 1970-01-01 01:00:00.000000000 +0100 +++ new/cpp-httplib-0.43.3/test/dns_test_fixture.py 2026-05-04 09:19:49.000000000 +0200 @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +"""Delayed UDP responder used as a loopback test fixture. + +This is a self-contained test fixture for the GetAddrInfoAsyncCancelTest +cases (reproducer for cpp-httplib issue #2431). It is NOT a general-purpose +nameserver and is only intended to run on 127.0.0.1 inside the test job's +own runner / container. + +What it does +------------ +Binds a UDP socket on 127.0.0.1:<port>, accepts well-formed DNS queries +from the test process, waits <delay_seconds>, then sends back a minimal +NXDOMAIN reply. The deliberate delay is what makes the bug reproducible: + + * The test calls getaddrinfo_with_timeout() with timeout_sec=1. + * gai_suspend() returns EAI_AGAIN after 1s; the function returns and + its stack frame is destroyed. + * The fixture replies after <delay_seconds> (= 3s by default), so the + glibc resolver worker thread receives the response *after* the + caller's frame is gone and writes back into freed stack memory. + * AddressSanitizer (with detect_stack_use_after_return=1) catches the + write and aborts with a stack-use-after-return diagnostic. + +Without this fixture the bug is hard to surface: dropping UDP/53 makes +the resolver hang forever, so the worker never receives anything and +never reaches the buggy write-back path. + +Usage +----- + python3 test/dns_test_fixture.py <port> [<delay_seconds>] + +Only standard library; no third-party dependencies. +""" + +import socket +import struct +import sys +import threading +import time + + +def serve(port: int, delay_sec: float) -> None: + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(("127.0.0.1", port)) + print( + f"[dns_test_fixture] listening on 127.0.0.1:{port}, " + f"reply delay={delay_sec}s", + flush=True, + ) + while True: + try: + data, addr = sock.recvfrom(2048) + except OSError: + return + threading.Thread( + target=_reply_after_delay, + args=(sock, data, addr, delay_sec), + daemon=True, + ).start() + + +def _reply_after_delay(sock, query: bytes, addr, delay_sec: float) -> None: + time.sleep(delay_sec) + if len(query) < 12: + return + # Header: copy transaction id, set QR=1 RA=1 RCODE=3 (NXDOMAIN), + # preserve the requester's RD bit, then echo the question section so + # glibc's resolver accepts the reply as matching its outstanding query. + txid = query[:2] + rd_bit = query[2] & 0x01 + flags = struct.pack(">H", 0x8003 | (rd_bit << 8)) + counts = struct.pack(">HHHH", 1, 0, 0, 0) + question = query[12:] + reply = txid + flags + counts + question + try: + sock.sendto(reply, addr) + except OSError: + pass + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(__doc__, file=sys.stderr) + sys.exit(2) + port_arg = int(sys.argv[1]) + delay_arg = float(sys.argv[2]) if len(sys.argv) > 2 else 3.0 + serve(port_arg, delay_arg) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/fuzzing/Makefile new/cpp-httplib-0.43.3/test/fuzzing/Makefile --- old/cpp-httplib-0.43.1/test/fuzzing/Makefile 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/test/fuzzing/Makefile 2026-05-04 09:19:49.000000000 +0200 @@ -13,7 +13,7 @@ BROTLI_DIR = /usr/local/opt/brotli # BROTLI_SUPPORT = -DCPPHTTPLIB_BROTLI_SUPPORT -I$(BROTLI_DIR)/include -L$(BROTLI_DIR)/lib -lbrotlicommon -lbrotlienc -lbrotlidec -FUZZERS = server_fuzzer url_parser_fuzzer header_parser_fuzzer +FUZZERS = server_fuzzer url_parser_fuzzer header_parser_fuzzer client_fuzzer # Runs all the tests and also fuzz tests against seed corpus. all : $(FUZZERS) @@ -25,6 +25,10 @@ $(CXX) $(CXXFLAGS) -o $@ $< $(ZLIB_SUPPORT) $(LIB_FUZZING_ENGINE) -pthread -lanl zip -q -r server_fuzzer_seed_corpus.zip corpus +client_fuzzer : client_fuzzer.cc ../../httplib.h + $(CXX) $(CXXFLAGS) -o $@ $< $(ZLIB_SUPPORT) $(LIB_FUZZING_ENGINE) -pthread -lanl + zip -q -r client_fuzzer_seed_corpus.zip corpus + header_parser_fuzzer : header_parser_fuzzer.cc ../../httplib.h $(CXX) $(CXXFLAGS) -o $@ $< $(ZLIB_SUPPORT) $(LIB_FUZZING_ENGINE) -pthread -lanl diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/fuzzing/client_fuzzer.cc new/cpp-httplib-0.43.3/test/fuzzing/client_fuzzer.cc --- old/cpp-httplib-0.43.1/test/fuzzing/client_fuzzer.cc 1970-01-01 01:00:00.000000000 +0100 +++ new/cpp-httplib-0.43.3/test/fuzzing/client_fuzzer.cc 2026-05-04 09:19:49.000000000 +0200 @@ -0,0 +1,88 @@ +#include <cstdint> +#include <cstring> +#include <httplib.h> + +class FuzzedStream : public httplib::Stream { +public: + FuzzedStream(const uint8_t *data, size_t size) + : data_(data), size_(size), read_pos_(0) {} + + ssize_t read(char *ptr, size_t size) override { + if (size + read_pos_ > size_) { size = size_ - read_pos_; } + memcpy(ptr, data_ + read_pos_, size); + read_pos_ += size; + return static_cast<ssize_t>(size); + } + + ssize_t write(const char *ptr, size_t size) override { + request_.append(ptr, size); + return static_cast<ssize_t>(size); + } + + ssize_t write(const char *ptr) { return write(ptr, strlen(ptr)); } + + ssize_t write(const std::string &s) { return write(s.data(), s.size()); } + + bool is_readable() const override { return true; } + + bool wait_readable() const override { return true; } + + bool wait_writable() const override { return true; } + + void get_remote_ip_and_port(std::string &ip, int &port) const override { + ip = "127.0.0.1"; + port = 8080; + } + + void get_local_ip_and_port(std::string &ip, int &port) const override { + ip = "127.0.0.1"; + port = 8080; + } + + socket_t socket() const override { return 0; } + + time_t duration() const override { return 0; }; + +private: + const uint8_t *data_; + size_t size_; + size_t read_pos_; + std::string request_; +}; + +class FuzzableClient : public httplib::ClientImpl { +public: + FuzzableClient() : httplib::ClientImpl("localhost", 8080) {} + + void ProcessFuzzedResponse(FuzzedStream &stream, const std::string &method) { + httplib::Request req; + req.method = method; + req.path = "/"; + httplib::Response res; + bool close_connection = false; + httplib::Error error = httplib::Error::Success; + + process_request(stream, req, res, close_connection, error); + } +}; + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { + if (size < 1) return 0; + + FuzzedStream stream{data + 1, size - 1}; + FuzzableClient client; + + // Use the first byte to select method + std::string method; + switch (data[0] % 6) { + case 0: method = "GET"; break; + case 1: method = "POST"; break; + case 2: method = "PUT"; break; + case 3: method = "PATCH"; break; + case 4: method = "DELETE"; break; + case 5: method = "OPTIONS"; break; + } + + client.ProcessFuzzedResponse(stream, method); + return 0; +} diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-5188033728282624 new/cpp-httplib-0.43.3/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-5188033728282624 --- old/cpp-httplib-0.43.1/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-5188033728282624 1970-01-01 01:00:00.000000000 +0100 +++ new/cpp-httplib-0.43.3/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-5188033728282624 2026-05-04 09:19:49.000000000 +0200 @@ -0,0 +1,3 @@ + HTTP/1.1 777 +Content-Length:20000000000 + diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-6561074643271680 new/cpp-httplib-0.43.3/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-6561074643271680 --- old/cpp-httplib-0.43.1/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-6561074643271680 1970-01-01 01:00:00.000000000 +0100 +++ new/cpp-httplib-0.43.3/test/fuzzing/corpus/clusterfuzz-testcase-minimized-client_fuzzer-6561074643271680 2026-05-04 09:19:49.000000000 +0200 @@ -0,0 +1,3 @@ + HTTP/1.1 777 +Content-Length:446744071854775 + Binary files old/cpp-httplib-0.43.1/test/fuzzing/corpus/clusterfuzz-testcase-minimized-header_parser_fuzzer-4645558454386688 and new/cpp-httplib-0.43.3/test/fuzzing/corpus/clusterfuzz-testcase-minimized-header_parser_fuzzer-4645558454386688 differ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/lsan_suppressions.txt new/cpp-httplib-0.43.3/test/lsan_suppressions.txt --- old/cpp-httplib-0.43.1/test/lsan_suppressions.txt 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/test/lsan_suppressions.txt 2026-05-04 09:19:49.000000000 +0200 @@ -1,3 +1,7 @@ # OpenSSL 3.x internal caches (provider, cipher, keymgmt) are allocated # lazily and intentionally kept until process exit. These are not real leaks. leak:libcrypto + +# wolfSSL keeps ECC point/scratch buffers alive across handshakes; they are +# released only at library shutdown which the test binaries do not invoke. +leak:libwolfssl diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/run_issue_2431_repro.sh new/cpp-httplib-0.43.3/test/run_issue_2431_repro.sh --- old/cpp-httplib-0.43.1/test/run_issue_2431_repro.sh 1970-01-01 01:00:00.000000000 +0100 +++ new/cpp-httplib-0.43.3/test/run_issue_2431_repro.sh 2026-05-04 09:19:49.000000000 +0200 @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# Reproducer runner for Issue #2431 +# (https://github.com/yhirose/cpp-httplib/issues/2431). +# +# Spins up an Ubuntu container, runs the loopback DNS test fixture +# (test/dns_test_fixture.py), routes the container's DNS lookups to +# that fixture via an iptables NAT rule, builds the test suite with +# g++ + ASAN, and runs the GetAddrInfoAsyncCancelTest cases. +# +# Expected outcomes: +# - HEAD prior to the fix: ASAN reports stack-use-after-return inside +# getaddrinfo_with_timeout's getaddrinfo_a path during one of the +# GetAddrInfoAsyncCancelTest cases. +# - HEAD with the fix applied: all three cases PASS. +# +# Usage: +# bash test/run_issue_2431_repro.sh +# +# Requirements: Docker (Linux container support). The container needs +# --privileged because the test binary uses `setarch -R` to disable ASLR +# for ASAN compatibility, and because the test job manages iptables +# rules inside the container. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +docker run --rm --privileged \ + -v "$REPO_ROOT:/work" \ + -w /work/test \ + ubuntu:24.04 bash -c ' +set -euo pipefail +export DEBIAN_FRONTEND=noninteractive + +apt-get update -qq +apt-get install -y -qq --no-install-recommends \ + ca-certificates g++ make pkg-config iptables iproute2 util-linux coreutils file \ + python3 \ + libssl-dev zlib1g-dev libbrotli-dev libzstd-dev libcurl4-openssl-dev \ + >/dev/null + +# Force DNS-only resolution: Ubuntu defaults nsswitch.conf to +# "hosts: files mdns4_minimal [NOTFOUND=return] dns ...", which +# short-circuits to NOTFOUND before reaching glibc DNS code, so the +# gai_cancel() branch never gets exercised. +sed -i "s/^hosts:.*/hosts: dns/" /etc/nsswitch.conf + +# Start the loopback DNS test fixture (delayed UDP responder). +DNS_FIXTURE_PORT=15353 +DNS_FIXTURE_DELAY=3 +python3 /work/test/dns_test_fixture.py "$DNS_FIXTURE_PORT" "$DNS_FIXTURE_DELAY" \ + >/tmp/dns_fixture.log 2>&1 & +FIXTURE_PID=$! + +# Route the container DNS lookups to the fixture; conntrack handles the +# reply path automatically. /etc/resolv.conf is left untouched. +iptables -t nat -I OUTPUT -p udp --dport 53 \ + -j REDIRECT --to-port "$DNS_FIXTURE_PORT" + +trap '"'"'iptables -t nat -F OUTPUT 2>/dev/null || true; kill "$FIXTURE_PID" 2>/dev/null || true'"'"' EXIT + +# Wait for the fixture to start listening. +for _ in $(seq 1 50); do + if ss -lun "( sport = :$DNS_FIXTURE_PORT )" | grep -q ":$DNS_FIXTURE_PORT"; then + break + fi + sleep 0.1 +done +ss -lun "( sport = :$DNS_FIXTURE_PORT )" | grep -q ":$DNS_FIXTURE_PORT" || { + echo "ERROR: dns_test_fixture failed to start" >&2 + cat /tmp/dns_fixture.log >&2 || true + exit 1 +} + +# Sanity check: a DNS lookup must take at least the fixture delay +# (proving the NAT rule routes the query to the fixture). +start=$(date +%s) +getent hosts unresolvable-host.invalid >/dev/null 2>&1 || true +elapsed=$(( $(date +%s) - start )) +if [ "$elapsed" -lt 2 ]; then + echo "ERROR: lookup returned in ${elapsed}s; fixture not in DNS path" >&2 + exit 1 +fi +echo "[ok] DNS lookups are routed to the test fixture (took ${elapsed}s)" + +cd /work/test +echo "=== building test binary (g++ + ASAN) ===" +make CXX=g++ test 2>&1 | tail -5 + +ARCH=$(uname -m) +echo "=== running GetAddrInfoAsyncCancelTest with CPPHTTPLIB_TEST_ISSUE_2431=1 ===" +set +e +CPPHTTPLIB_TEST_ISSUE_2431=1 \ +ASAN_OPTIONS=detect_stack_use_after_return=1 \ +setarch "$ARCH" -R \ + ./test --gtest_filter="GetAddrInfoAsyncCancelTest.*" 2>&1 +rc=$? +set -e +echo "=== test exit: $rc ===" +exit $rc +' diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/cpp-httplib-0.43.1/test/test.cc new/cpp-httplib-0.43.3/test/test.cc --- old/cpp-httplib-0.43.1/test/test.cc 2026-04-20 07:48:27.000000000 +0200 +++ new/cpp-httplib-0.43.3/test/test.cc 2026-05-04 09:19:49.000000000 +0200 @@ -1549,6 +1549,140 @@ std::this_thread::sleep_for(std::chrono::seconds(8)); } +#if defined(__linux__) && defined(__GLIBC__) && \ + defined(CPPHTTPLIB_USE_NON_BLOCKING_GETADDRINFO) + +// Forward declaration: in split builds split.py strips `inline` and moves the +// definition into httplib.cc, so detail::getaddrinfo_with_timeout is not +// visible from the public httplib.h. Re-declaring it here lets the tests link +// against the symbol in both header-only and split builds. +namespace httplib { +namespace detail { +int getaddrinfo_with_timeout(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res, time_t timeout_sec); +} // namespace detail +} // namespace httplib + +// Reproducer for https://github.com/yhirose/cpp-httplib/issues/2431. +// +// On Linux/glibc, getaddrinfo_with_timeout() runs the lookup via +// getaddrinfo_a(GAI_NOWAIT) using a stack-local `struct gaicb`. When the +// gai_suspend() call hits the connection timeout the function calls +// gai_cancel() and returns immediately. gai_cancel() is non-blocking and +// can return EAI_NOTCANCELED, in which case the resolver worker thread is +// still alive and still references the now-destroyed stack frame. +// +// Triggering the bug requires DNS to actually hang (UDP/53 dropped, etc.), +// so these tests are gated on CPPHTTPLIB_TEST_ISSUE_2431=1 and are skipped +// during normal runs. test/run_issue_2431_repro.sh sets up the environment +// and runs them in a container. +namespace { +bool should_run_issue_2431_tests() { + const char *v = getenv("CPPHTTPLIB_TEST_ISSUE_2431"); + return v && *v && std::string(v) != "0"; +} + +std::string unique_unresolvable_host(int n) { + // .invalid is reserved (RFC 6761) and is never served by real DNS, but + // glibc still asks the configured nameserver — which is exactly the path + // we want to exercise. A unique label per call avoids the resolver cache. + auto t = std::chrono::steady_clock::now().time_since_epoch().count(); + return "h-" + std::to_string(::getpid()) + "-" + std::to_string(t) + "-" + + std::to_string(n) + ".invalid"; +} +} // namespace + +TEST(GetAddrInfoAsyncCancelTest, DirectCallSingleThread) { + if (!should_run_issue_2431_tests()) { + GTEST_SKIP() + << "Set CPPHTTPLIB_TEST_ISSUE_2431=1 (and sinkhole DNS) to run"; + } + + for (int i = 0; i < 8; ++i) { + struct addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + auto host = unique_unresolvable_host(i); + struct addrinfo *result = nullptr; + int rc = detail::getaddrinfo_with_timeout(host.c_str(), "80", &hints, + &result, /*timeout_sec=*/1); + if (rc == 0 && result) { freeaddrinfo(result); } + } + + // Give orphaned getaddrinfo_a worker threads a chance to write into the + // stack region they still believe holds their gaicb. + std::this_thread::sleep_for(std::chrono::seconds(3)); +} + +TEST(GetAddrInfoAsyncCancelTest, DirectCallMultiThread) { + if (!should_run_issue_2431_tests()) { + GTEST_SKIP() + << "Set CPPHTTPLIB_TEST_ISSUE_2431=1 (and sinkhole DNS) to run"; + } + + std::atomic<bool> stop{false}; + std::vector<std::thread> threads; + for (int t = 0; t < 8; ++t) { + threads.emplace_back([t, &stop] { + int i = 0; + while (!stop.load(std::memory_order_relaxed)) { + struct addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + auto host = unique_unresolvable_host(t * 100000 + i++); + struct addrinfo *result = nullptr; + int rc = detail::getaddrinfo_with_timeout(host.c_str(), "80", &hints, + &result, /*timeout_sec=*/1); + if (rc == 0 && result) { freeaddrinfo(result); } + } + }); + } + + std::this_thread::sleep_for(std::chrono::seconds(8)); + stop.store(true, std::memory_order_relaxed); + for (auto &th : threads) { + th.join(); + } + std::this_thread::sleep_for(std::chrono::seconds(3)); +} + +TEST(GetAddrInfoAsyncCancelTest, ClientGetMultiThread) { + if (!should_run_issue_2431_tests()) { + GTEST_SKIP() + << "Set CPPHTTPLIB_TEST_ISSUE_2431=1 (and sinkhole DNS) to run"; + } + + std::atomic<bool> stop{false}; + std::vector<std::thread> threads; + for (int t = 0; t < 8; ++t) { + threads.emplace_back([t, &stop] { + int i = 0; + while (!stop.load(std::memory_order_relaxed)) { + auto host = unique_unresolvable_host(t * 100000 + i++); + Client cli(host, 80); + cli.set_connection_timeout(1, 0); + cli.set_read_timeout(1, 0); + cli.set_write_timeout(1, 0); + (void)cli.Get("/"); + } + }); + } + + std::this_thread::sleep_for(std::chrono::seconds(8)); + stop.store(true, std::memory_order_relaxed); + for (auto &th : threads) { + th.join(); + } + std::this_thread::sleep_for(std::chrono::seconds(3)); +} + +#endif // __linux__ && __GLIBC__ && CPPHTTPLIB_USE_NON_BLOCKING_GETADDRINFO + TEST(ConnectionErrorTest, InvalidHost) { auto host = "-abcde.com"; @@ -7805,6 +7939,31 @@ EXPECT_EQ(U8("日本語コンテンツ"), res->body); } +#ifdef _WIN32 +// Issue #2435: mmap::open() must succeed even when another handle holds +// the file open for writing (e.g. an active log file). +TEST(MmapTest, OpenWhileFileHeldForWriting) { + const char *path = "mmap_concurrent_writer_test.txt"; + const char *content = "hello"; + + { + std::ofstream f(path, std::ios::binary); + f.write(content, static_cast<std::streamsize>(strlen(content))); + } + auto file_cleanup = detail::scope_exit([&] { std::remove(path); }); + + HANDLE writer = ::CreateFileA(path, GENERIC_WRITE, FILE_SHARE_READ, NULL, + OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + ASSERT_NE(INVALID_HANDLE_VALUE, writer); + auto handle_cleanup = detail::scope_exit([&] { ::CloseHandle(writer); }); + + detail::mmap m(path); + ASSERT_TRUE(m.is_open()); + EXPECT_EQ(strlen(content), m.size()); + EXPECT_EQ(0, std::memcmp(content, m.data(), strlen(content))); +} +#endif + TEST(KeepAliveTest, ReadTimeout) { Server svr; @@ -9129,6 +9288,81 @@ << " bytes without truncation, but only read " << total_read << " bytes."; } +// Regression test for OSS-Fuzz issue 508342856: a malicious server sending an +// enormous Content-Length must not cause the client to pre-allocate a huge +// response body buffer. The reservation is capped at payload_max_length_, and +// the read itself fails when the body exceeds the limit. +TEST(ClientVulnerabilityTest, HugeContentLengthDoesNotPreallocate) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + auto server_thread = std::thread([] { + auto srv = ::socket(AF_INET, SOCK_STREAM, 0); + default_socket_options(srv); + detail::set_socket_opt_time(srv, SOL_SOCKET, SO_RCVTIMEO, 5, 0); + detail::set_socket_opt_time(srv, SOL_SOCKET, SO_SNDTIMEO, 5, 0); + + sockaddr_in addr{}; + addr.sin_family = AF_INET; + addr.sin_port = htons(static_cast<uint16_t>(PORT + 2)); + ::inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr); + + int opt = 1; + ::setsockopt(srv, SOL_SOCKET, SO_REUSEADDR, +#ifdef _WIN32 + reinterpret_cast<const char *>(&opt), +#else + &opt, +#endif + sizeof(opt)); + + ::bind(srv, reinterpret_cast<sockaddr *>(&addr), sizeof(addr)); + ::listen(srv, 1); + + sockaddr_in cli_addr{}; + socklen_t cli_len = sizeof(cli_addr); + auto cli = ::accept(srv, reinterpret_cast<sockaddr *>(&cli_addr), &cli_len); + + if (cli != INVALID_SOCKET) { + char buf[4096]; + ::recv(cli, buf, sizeof(buf), 0); + + // Malicious response: claim a 20GB body but send only a tiny payload. + std::string response = "HTTP/1.1 200 OK\r\n" + "Content-Length: 20000000000\r\n" + "\r\n" + "abc"; + ::send(cli, +#ifdef _WIN32 + static_cast<const char *>(response.c_str()), + static_cast<int>(response.size()), +#else + response.c_str(), response.size(), +#endif + 0); + + detail::close_socket(cli); + } + detail::close_socket(srv); + }); + + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + + { + Client cli("127.0.0.1", PORT + 2); + cli.set_read_timeout(5, 0); + // Default payload_max_length_ is 100MB; a 20GB Content-Length must not + // result in a 20GB pre-allocation. The Get() call is expected to fail + // (server claims more bytes than payload_max_length permits), but it must + // not exhaust memory before getting there. + auto res = cli.Get("/malicious"); + EXPECT_FALSE(res); // Read fails because body exceeds payload_max_length_ + } + + server_thread.join(); +} + // Verify that content_receiver bypasses the default payload_max_length, // allowing streaming downloads larger than 100MB without requiring an explicit // set_payload_max_length call. @@ -12142,7 +12376,7 @@ TEST(MultipartFormDataTest, MakeFileProvider) { // Verify make_file_provider sends a file's contents correctly. const std::string file_content(4096, 'Z'); - const std::string tmp_path = "/tmp/httplib_test_make_file_provider.bin"; + const std::string tmp_path = "./httplib_test_make_file_provider.bin"; { std::ofstream ofs(tmp_path, std::ios::binary); ofs.write(file_content.data(), @@ -12197,7 +12431,7 @@ TEST(MakeFileBodyTest, Basic) { const std::string file_content(4096, 'Z'); - const std::string tmp_path = "/tmp/httplib_test_make_file_body.bin"; + const std::string tmp_path = "./httplib_test_make_file_body.bin"; { std::ofstream ofs(tmp_path, std::ios::binary); ofs.write(file_content.data(),
