This is an automated email from the ASF dual-hosted git repository.
masaori pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new 5b32738f23 fix(http): stale `skip_bytes` in cache-write consumer after
100 Continue with transform (#12906)
5b32738f23 is described below
commit 5b32738f2388090bdff3b1b6abd855efe63466ed
Author: Jake <[email protected]>
AuthorDate: Thu Mar 5 00:49:53 2026 +0000
fix(http): stale `skip_bytes` in cache-write consumer after 100 Continue
with transform (#12906)
When an origin sends 100 Continue before a compressible response,
`setup_100_continue_transfer()` sets `client_response_hdr_bytes` to the
intermediate header size.
`setup_server_transfer_to_transform()` then creates the tunnel buffer with
`hdr_size=0` - body only, but does not reset `client_response_hdr_bytes` for
non-chunked responses.
`perform_cache_write_action()` passes the stale value as `skip_bytes` to
the cache-write consumer, causing `ink_release_assert` in `producer_run` when
the response body is smaller than the 100 Continue headers.
Fix: Set `cache_write_skip` to `0` when a transform is present, since
`setup_server_transfer_to_transform()` never writes headers into the tunnel
buffer.
---
src/proxy/http/HttpSM.cc | 12 +-
.../compress/compress-cache-untransformed.test.py | 118 +++++++++++++++++++
.../compress/compress_100_continue_origin.py | 126 +++++++++++++++++++++
.../compress/etc/compress-cache-false.config | 5 +
4 files changed, 259 insertions(+), 2 deletions(-)
diff --git a/src/proxy/http/HttpSM.cc b/src/proxy/http/HttpSM.cc
index 1082b3a081..8fdbaed76c 100644
--- a/src/proxy/http/HttpSM.cc
+++ b/src/proxy/http/HttpSM.cc
@@ -6608,8 +6608,16 @@ HttpSM::perform_cache_write_action()
if (transform_info.entry == nullptr ||
t_state.api_info.cache_untransformed == true) {
cache_sm.close_read();
t_state.cache_info.write_status =
HttpTransact::CacheWriteStatus_t::IN_PROGRESS;
- setup_cache_write_transfer(&cache_sm, server_entry->vc,
&t_state.cache_info.object_store, client_response_hdr_bytes,
- "cache write");
+
+ // Decide how many header bytes the cache-write consumer should skip.
+ // When a transform is present, setup_server_transfer_to_transform()
+ // creates the tunnel buffer with hdr_size=0, so the buffer contains
+ // only body data - no response headers to skip. When no transform is
+ // present, setup_server_transfer() writes the response header into the
+ // buffer, so skip_bytes must equal client_response_hdr_bytes.
+ int64_t cache_write_skip = (transform_info.entry == nullptr) ?
client_response_hdr_bytes : 0;
+
+ setup_cache_write_transfer(&cache_sm, server_entry->vc,
&t_state.cache_info.object_store, cache_write_skip, "cache write");
} else {
// We are not caching the untransformed. We might want to
// use the cache writevc to cache the transformed copy
diff --git
a/tests/gold_tests/pluginTest/compress/compress-cache-untransformed.test.py
b/tests/gold_tests/pluginTest/compress/compress-cache-untransformed.test.py
new file mode 100644
index 0000000000..bf5e0a85ca
--- /dev/null
+++ b/tests/gold_tests/pluginTest/compress/compress-cache-untransformed.test.py
@@ -0,0 +1,118 @@
+'''
+Regression test for https://github.com/apache/trafficserver/issues/12244
+
+The crash requires two conditions in the same transaction:
+1. An intermediate response (100 Continue) is forwarded to the client, which
+ sets client_response_hdr_bytes to the intermediate header size via
+ setup_100_continue_transfer().
+2. The final response goes through a compress transform with untransformed
+ cache writing (cache=true), which calls
setup_server_transfer_to_transform().
+ For non-chunked responses, client_response_hdr_bytes is NOT reset to 0.
+
+perform_cache_write_action() then passes the stale client_response_hdr_bytes
+as skip_bytes to the cache-write consumer, but the server-to-transform tunnel
+buffer contains only body data (no headers). The assertion in
+HttpTunnel::producer_run fires:
+
+ c->skip_bytes <= c->buffer_reader->read_avail()
+
+This test uses a custom origin that sends "100 Continue" followed by a
+compressible, non-chunked 200 OK to trigger the exact crash path.
+'''
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from ports import get_port
+
+Test.Summary = '''
+Regression test for compress plugin with cache=true causing assertion failure
+when origin sends 100 Continue before a compressible response (#12244)
+'''
+
+Test.SkipUnless(Condition.PluginExists('compress.so'))
+
+
+class CompressCacheUntransformedTest:
+
+ def __init__(self):
+ self.setupTS()
+ self.run()
+
+ def setupTS(self):
+ self.ts = Test.MakeATSProcess("ts", enable_cache=True)
+
+ def run(self):
+ tr = Test.AddTestRun()
+
+ # Copy scripts into the test run directory.
+ tr.Setup.CopyAs("compress_100_continue_origin.py")
+ tr.Setup.Copy("etc/compress-cache-false.config")
+
+ # Create and configure the custom origin server process.
+ origin = tr.Processes.Process("origin")
+ origin_port = get_port(origin, 'http_port')
+ origin.Command = (f'{sys.executable} compress_100_continue_origin.py'
+ f' --port {origin_port}')
+ origin.Ready = When.PortOpenv4(origin_port)
+ origin.ReturnCode = 0
+
+ # Configure ATS.
+ self.ts.Disk.records_config.update(
+ {
+ "proxy.config.diags.debug.enabled": 1,
+ "proxy.config.diags.debug.tags": "http|compress|http_tunnel",
+ # Do NOT send 100 Continue from ATS - let the origin send it.
+ # This ensures ATS processes the origin's 100 via
+ # handle_100_continue_response -> setup_100_continue_transfer,
+ # which sets client_response_hdr_bytes.
+ "proxy.config.http.send_100_continue_response": 0,
+ # Enable POST caching so that the 200 OK is cached, triggering
+ # the cache write path where the stale
client_response_hdr_bytes
+ # causes the crash.
+ "proxy.config.http.cache.post_method": 1,
+ })
+
+ self.ts.Disk.remap_config.AddLine(
+ f'map / http://127.0.0.1:{origin_port}/'
+ f' @plugin=compress.so'
+ f' @pparam={Test.RunDirectory}/compress-cache-false.config')
+
+ # Client sends a POST with Expect: 100-continue but does not wait for
+ # the 100 response before sending the body (--expect100-timeout 0).
+ # The crash is triggered by ATS processing the origin's 100 Continue,
+ # not by the client's behaviour during the handshake.
+ client = tr.Processes.Default
+ client.Command = (
+ f'curl --http1.1 -s -o /dev/null'
+ f' -X POST'
+ f' -H "Accept-Encoding: gzip"'
+ f' -H "Expect: 100-continue"'
+ f' --expect100-timeout 0'
+ f' --data "test body data"'
+ f' http://127.0.0.1:{self.ts.Variables.port}/test/resource.js')
+ client.ReturnCode = 0
+ client.StartBefore(origin)
+ client.StartBefore(self.ts)
+
+ # The key assertion: ATS must still be running after the test.
+ # Without the fix, ATS would have crashed with a failed assertion
+ # in HttpTunnel::producer_run.
+ tr.StillRunningAfter = self.ts
+
+
+CompressCacheUntransformedTest()
diff --git
a/tests/gold_tests/pluginTest/compress/compress_100_continue_origin.py
b/tests/gold_tests/pluginTest/compress/compress_100_continue_origin.py
new file mode 100644
index 0000000000..a3b0f29b01
--- /dev/null
+++ b/tests/gold_tests/pluginTest/compress/compress_100_continue_origin.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+"""Origin server that sends 100 Continue then a compressible 200 OK.
+
+Used to reproduce the crash in HttpTunnel::producer_run when compress.so
+with cache=true is combined with a 100 Continue response from the origin.
+"""
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import signal
+import socket
+import sys
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--port', type=int, required=True, help='Port to
listen on')
+ return parser.parse_args()
+
+
+def read_request(conn):
+ """Read an HTTP request from the connection, returning the raw bytes."""
+ data = b''
+ while b'\r\n\r\n' not in data:
+ chunk = conn.recv(4096)
+ if not chunk:
+ return None
+ data += chunk
+ return data
+
+
+def handle_connection(conn, addr):
+ """Handle a single client connection."""
+ try:
+ conn.settimeout(10)
+ request = read_request(conn)
+ if request is None:
+ return
+
+ # Send 100 Continue immediately.
+ conn.sendall(b'HTTP/1.1 100 Continue\r\n\r\n')
+
+ # Read the request body. The origin MUST wait for the body
+ # before sending the 200 OK so that ATS processes the 100
+ # Continue tunnel, the POST body tunnel, and THEN reads the
+ # 200 OK - reproducing the exact timing of the production crash.
+ request_str = request.decode('utf-8', errors='replace')
+ cl = 0
+ for line in request_str.split('\r\n'):
+ if line.lower().startswith('content-length:'):
+ cl = int(line.split(':', 1)[1].strip())
+ break
+
+ header_end = request.find(b'\r\n\r\n') + 4
+ body_received = len(request) - header_end
+ remaining = cl - body_received
+ while remaining > 0:
+ chunk = conn.recv(min(remaining, 4096))
+ if not chunk:
+ break
+ remaining -= len(chunk)
+
+ # Send a compressible 200 OK response with Content-Length
(non-chunked).
+ # The body is JavaScript text so compress.so will add a transform.
+ # The body must be SMALLER than the 100 Continue response headers
+ # (~82 bytes) so that skip_bytes > read_avail() in producer_run.
+ body = b'var x=1;' # 8 bytes - smaller than 100 Continue headers
+ response = (
+ b'HTTP/1.1 200 OK\r\n'
+ b'Content-Type: text/javascript\r\n'
+ b'Cache-Control: public, max-age=3600\r\n'
+ b'Content-Length: ' + str(len(body)).encode() + b'\r\n'
+ b'\r\n' + body)
+ conn.sendall(response)
+ except Exception as e:
+ print(f'Error handling connection from {addr}: {e}', file=sys.stderr)
+ finally:
+ conn.close()
+
+
+def main():
+ # Exit cleanly when the test framework sends SIGINT to stop us.
+ signal.signal(signal.SIGINT, lambda *_: sys.exit(0))
+
+ args = parse_args()
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(('127.0.0.1', args.port))
+ sock.listen(5)
+ sock.settimeout(5)
+
+ actual_port = sock.getsockname()[1]
+ print(f'Listening on port {actual_port}', flush=True)
+
+ # Accept connections until timeout. The first connection may be
+ # the readiness probe from the test framework; the real ATS
+ # connection arrives after that.
+ while True:
+ try:
+ conn, addr = sock.accept()
+ handle_connection(conn, addr)
+ except socket.timeout:
+ break
+
+ sock.close()
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git
a/tests/gold_tests/pluginTest/compress/etc/compress-cache-false.config
b/tests/gold_tests/pluginTest/compress/etc/compress-cache-false.config
new file mode 100644
index 0000000000..4fa9f523e8
--- /dev/null
+++ b/tests/gold_tests/pluginTest/compress/etc/compress-cache-false.config
@@ -0,0 +1,5 @@
+cache false
+compressible-content-type text/javascript
+compressible-content-type application/json
+supported-algorithms gzip
+minimum-content-length 0