On Thu, Apr 30, 2015 at 4:14 AM, Yann Ylavic <yla...@apache.org> wrote: > > I'm almost done with these changes and will propose a patch soon, > thoughts/showstoppers?
Here is the corresponding patch (attached). I chose to define a new proxy_http_req_t struct which is passed to each mod_proxy_http (static) functions (to avoid yet more args), and then declared local vars with the same name as the old parameters (to limit code change and ease review). I would use (some of) the req fields directly in the commit (if acceptable), because it's not always necessary... Comments?
Index: modules/proxy/mod_proxy_http.c =================================================================== --- modules/proxy/mod_proxy_http.c (revision 1678213) +++ modules/proxy/mod_proxy_http.c (working copy) @@ -229,30 +229,65 @@ static void terminate_headers(apr_bucket_alloc_t * #define MAX_MEM_SPOOL 16384 -static int stream_reqbody_chunked(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - int flushall) +typedef enum { + RB_INIT = 0, + RB_STREAM_CL, + RB_STREAM_CHUNKED, + RB_SPOOL_CL +} rb_methods; + +typedef struct { + apr_pool_t *p; + request_rec *r; + proxy_worker *worker; + proxy_server_conf *sconf; + + char server_portstr[32]; + proxy_conn_rec *backconn; + conn_rec *origin; + + rb_methods rb_method; + apr_bucket_alloc_t *bucket_alloc; + apr_bucket_brigade *header_brigade; + apr_bucket_brigade *input_brigade; + char *old_cl_val, *old_te_val; + apr_off_t cl_val; + + int do_100_continue; + int flushall; +} proxy_http_req_t; + +static int send_reqhdr_chunked(proxy_http_req_t *req) { + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + + add_te_chunked(req->p, bucket_alloc, header_brigade); + terminate_headers(bucket_alloc, header_brigade); + + return ap_proxy_pass_brigade(bucket_alloc, req->r, req->backconn, + req->origin, header_brigade, 1); +} + +static int stream_reqbody_chunked(proxy_http_req_t *req) +{ + request_rec *r = req->r; int seen_eos = 0, rv = OK; apr_size_t hdr_len; apr_off_t bytes; apr_status_t status; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; + char chunk_hdr[20]; /* must be here due to transient bucket. */ + proxy_conn_rec *p_conn = req->backconn; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket_brigade *bb; apr_bucket *e; - add_te_chunked(p, bucket_alloc, header_brigade); - terminate_headers(bucket_alloc, header_brigade); - while (APR_BRIGADE_EMPTY(input_brigade) || !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) { - char chunk_hdr[20]; /* must be here due to transient bucket. */ - int flush = flushall; + int flush = req->flushall; if (!APR_BRIGADE_EMPTY(input_brigade)) { /* If this brigade contains EOS, either stop or remove it. */ @@ -259,9 +294,6 @@ static void terminate_headers(apr_bucket_alloc_t * if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { seen_eos = 1; - /* The request is flushed below this loop with the EOS chunk */ - flush = 0; - /* We can't pass this EOS to the output_filters. */ e = APR_BRIGADE_LAST(input_brigade); apr_bucket_delete(e); @@ -269,6 +301,11 @@ static void terminate_headers(apr_bucket_alloc_t * apr_brigade_length(input_brigade, 1, &bytes); + /* Flush only if we did not get the requested #bytes. */ + if (bytes < HUGE_STRING_LEN) { + flush = 0; + } + hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes); @@ -285,39 +322,26 @@ static void terminate_headers(apr_bucket_alloc_t * APR_BRIGADE_INSERT_TAIL(input_brigade, e); } - if (header_brigade) { + if (!APR_BRIGADE_EMPTY(header_brigade)) { /* we never sent the header brigade, so go ahead and * take care of that now */ bb = header_brigade; + APR_BRIGADE_CONCAT(bb, input_brigade); - /* Flush now since we have the header and (enough of) the prefeched - * body already, unless we are EOS since everything is to be - * flushed below this loop with the EOS chunk. + /* Flush now since we have the header and (enough of) the + * prefeched body, or racing KeepAliveTimeout on the backend + * side may kill our connection while we read more client data. */ - flush = !seen_eos; - - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - header_brigade = NULL; + flush = 1; } else { bb = input_brigade; } - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, flush); + /* Once we hit EOS, flush below this loop with the EOS chunk. */ + rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, + bb, flush && !seen_eos); if (rv != OK) { return rv; } @@ -341,7 +365,7 @@ static void terminate_headers(apr_bucket_alloc_t * } } - if (header_brigade) { + if (!APR_BRIGADE_EMPTY(header_brigade)) { /* we never sent the header brigade because there was no request body; * send it now */ @@ -369,34 +393,24 @@ static void terminate_headers(apr_bucket_alloc_t * } /* Now we have headers-only, or the chunk EOS mark; flush it */ - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1); - return rv; + return ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, bb, 1); } -static int stream_reqbody_cl(apr_pool_t *p, - request_rec *r, - proxy_conn_rec *p_conn, - conn_rec *origin, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - char *old_cl_val, int flushall) +static int send_reqhdr_cl(proxy_http_req_t *req) { - int seen_eos = 0, rv = 0; - apr_status_t status = APR_SUCCESS; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; - apr_bucket_brigade *bb; - apr_bucket *e; - apr_off_t cl_val = 0; - apr_off_t bytes; - apr_off_t bytes_streamed = 0; + request_rec *r = req->r; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + const char *old_cl_val = req->old_cl_val; if (old_cl_val) { char *endstr; + apr_status_t status; - add_cl(p, bucket_alloc, header_brigade, old_cl_val); - status = apr_strtoff(&cl_val, old_cl_val, &endstr, 10); + add_cl(req->p, bucket_alloc, header_brigade, old_cl_val); + status = apr_strtoff(&req->cl_val, old_cl_val, &endstr, 10); - if (status || *endstr || endstr == old_cl_val || cl_val < 0) { + if (status || *endstr || endstr == old_cl_val || req->cl_val < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01085) "could not parse request Content-Length (%s)", old_cl_val); @@ -405,22 +419,42 @@ static void terminate_headers(apr_bucket_alloc_t * } terminate_headers(bucket_alloc, header_brigade); + return ap_proxy_pass_brigade(bucket_alloc, r, req->backconn, + req->origin, header_brigade, 1); +} + +static int stream_reqbody_cl(proxy_http_req_t *req) +{ + request_rec *r = req->r; + int seen_eos = 0, rv = 0; + apr_status_t status = APR_SUCCESS; + proxy_conn_rec *p_conn = req->backconn; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; + apr_bucket_brigade *bb; + apr_bucket *e; + apr_off_t bytes; + apr_off_t bytes_streamed = 0; + while (APR_BRIGADE_EMPTY(input_brigade) || !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(input_brigade))) { - int flush = flushall; + int flush = req->flushall; if (!APR_BRIGADE_EMPTY(input_brigade)) { apr_brigade_length(input_brigade, 1, &bytes); bytes_streamed += bytes; + /* Flush only if we did not get the requested #bytes. */ + if (bytes < HUGE_STRING_LEN) { + flush = 0; + } + /* If this brigade contains EOS, either stop or remove it. */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { seen_eos = 1; - /* Once we hit EOS, we are ready to flush. */ - flush = 1; - /* We can't pass this EOS to the output_filters. */ e = APR_BRIGADE_LAST(input_brigade); apr_bucket_delete(e); @@ -441,48 +475,36 @@ static void terminate_headers(apr_bucket_alloc_t * * * Prevents HTTP Response Splitting. */ - if (bytes_streamed > cl_val) { + if (bytes_streamed > req->cl_val) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01086) "read more bytes of request body than expected " "(got %" APR_OFF_T_FMT ", expected " "%" APR_OFF_T_FMT ")", - bytes_streamed, cl_val); + bytes_streamed, req->cl_val); return HTTP_INTERNAL_SERVER_ERROR; } } - if (header_brigade) { + if (!APR_BRIGADE_EMPTY(header_brigade)) { /* we never sent the header brigade, so go ahead and * take care of that now */ bb = header_brigade; + APR_BRIGADE_CONCAT(bb, input_brigade); - /* Flush now since we have the header and (enough of) the prefeched - * body already. + /* Flush now since we have the header and (enough of) the + * prefeched body, or racing KeepAliveTimeout on the backend + * side may kill our connection while we read more client data. */ flush = 1; - - /* - * Save input_brigade in bb brigade. (At least) in the SSL case - * input_brigade contains transient buckets whose data would get - * overwritten during the next call of ap_get_brigade in the loop. - * ap_save_brigade ensures these buckets to be set aside. - * Calling ap_save_brigade with NULL as filter is OK, because - * bb brigade already has been created and does not need to get - * created by ap_save_brigade. - */ - status = ap_save_brigade(NULL, &bb, &input_brigade, p); - if (status != APR_SUCCESS) { - return HTTP_INTERNAL_SERVER_ERROR; - } - - header_brigade = NULL; } else { bb = input_brigade; } - rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, flush); + /* Once we hit EOS, we are ready to flush. */ + rv = ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, + input_brigade, flush || seen_eos); if (rv != OK) { return rv; } @@ -506,7 +528,7 @@ static void terminate_headers(apr_bucket_alloc_t * } } - if (bytes_streamed != cl_val) { + if (bytes_streamed != req->cl_val) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01087) "client %s given Content-Length did not match" " number of body bytes read", r->connection->client_ip); @@ -513,26 +535,26 @@ static void terminate_headers(apr_bucket_alloc_t * return HTTP_BAD_REQUEST; } - if (header_brigade) { + if (!APR_BRIGADE_EMPTY(header_brigade)) { /* we never sent the header brigade since there was no request * body; send it now with the flush flag */ - bb = header_brigade; - return(ap_proxy_pass_brigade(bucket_alloc, r, p_conn, origin, bb, 1)); + return ap_proxy_pass_brigade(bucket_alloc, r, p_conn, req->origin, + header_brigade, 1); } return OK; } -static int spool_reqbody_cl(apr_pool_t *p, - request_rec *r, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - int force_cl) +static int spool_reqbody_cl(proxy_http_req_t *req, int force_cl) { + apr_pool_t *p = req->p; + request_rec *r = req->r; int seen_eos = 0; apr_status_t status = APR_SUCCESS; - apr_bucket_alloc_t *bucket_alloc = r->connection->bucket_alloc; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket_brigade *body_brigade; apr_bucket *e; apr_off_t bytes, bytes_spooled = 0, fsize = 0; @@ -657,16 +679,18 @@ static void terminate_headers(apr_bucket_alloc_t * } if (bytes_spooled || force_cl) { - add_cl(p, bucket_alloc, header_brigade, apr_off_t_toa(p, bytes_spooled)); + add_cl(p, bucket_alloc, header_brigade, + apr_off_t_toa(p, bytes_spooled)); } terminate_headers(bucket_alloc, header_brigade); - APR_BRIGADE_CONCAT(header_brigade, body_brigade); + + APR_BRIGADE_CONCAT(input_brigade, body_brigade); if (tmpfile) { - apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); + apr_brigade_insert_file(input_brigade, tmpfile, 0, fsize, p); } if (apr_table_get(r->subprocess_env, "proxy-sendextracrlf")) { e = apr_bucket_immortal_create(ASCII_CRLF, 2, bucket_alloc); - APR_BRIGADE_INSERT_TAIL(header_brigade, e); + APR_BRIGADE_INSERT_TAIL(input_brigade, e); } return OK; } @@ -720,25 +744,16 @@ static apr_status_t proxy_buckets_lifetime_transfo return rv; } -enum rb_methods { - RB_INIT = 0, - RB_STREAM_CL, - RB_STREAM_CHUNKED, - RB_SPOOL_CL -}; - -static int ap_proxy_http_prefetch(apr_pool_t *p, request_rec *r, - proxy_conn_rec *p_conn, proxy_worker *worker, - proxy_server_conf *conf, - apr_uri_t *uri, - char *url, char *server_portstr, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - char **old_cl_val, char **old_te_val, - enum rb_methods *rb_method, int flushall) +static int ap_proxy_http_prefetch(proxy_http_req_t *req, + apr_uri_t *uri, char *url) { + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; - apr_bucket_alloc_t *bucket_alloc = c->bucket_alloc; + proxy_conn_rec *p_conn = req->backconn; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; apr_bucket_brigade *temp_brigade; apr_bucket *e; char *buf; @@ -747,7 +762,6 @@ static apr_status_t proxy_buckets_lifetime_transfo apr_off_t bytes; int force10, rv; apr_read_type_e block; - conn_rec *origin = p_conn->connection; if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { if (r->expecting_100) { @@ -759,8 +773,9 @@ static apr_status_t proxy_buckets_lifetime_transfo } rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, p_conn, - worker, conf, uri, url, server_portstr, - old_cl_val, old_te_val); + req->worker, req->sconf, + uri, url, req->server_portstr, + &req->old_cl_val, &req->old_te_val); if (rv != OK) { return rv; } @@ -777,9 +792,9 @@ static apr_status_t proxy_buckets_lifetime_transfo if (!r->kept_body && r->main) { /* XXX: Why DON'T sub-requests use keepalives? */ p_conn->close = 1; - *old_cl_val = NULL; - *old_te_val = NULL; - *rb_method = RB_STREAM_CL; + req->old_te_val = NULL; + req->old_cl_val = NULL; + req->rb_method = RB_STREAM_CL; e = apr_bucket_eos_create(input_brigade->bucket_alloc); APR_BRIGADE_INSERT_TAIL(input_brigade, e); goto skip_body; @@ -793,19 +808,20 @@ static apr_status_t proxy_buckets_lifetime_transfo * encoding has been done by the extensions' handler, and * do not modify add_te_chunked's logic */ - if (*old_te_val && strcasecmp(*old_te_val, "chunked") != 0) { + if (req->old_te_val && strcasecmp(req->old_te_val, "chunked") != 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01093) - "%s Transfer-Encoding is not supported", *old_te_val); + "%s Transfer-Encoding is not supported", + req->old_te_val); return HTTP_INTERNAL_SERVER_ERROR; } - if (*old_cl_val && *old_te_val) { + if (req->old_te_val && req->old_cl_val) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01094) "client %s (%s) requested Transfer-Encoding " "chunked body with Content-Length (C-L ignored)", c->client_ip, c->remote_host ? c->remote_host: ""); - *old_cl_val = NULL; - origin->keepalive = AP_CONN_CLOSE; + req->old_cl_val = NULL; + req->origin->keepalive = AP_CONN_CLOSE; p_conn->close = 1; } @@ -818,7 +834,7 @@ static apr_status_t proxy_buckets_lifetime_transfo * reasonable size. */ temp_brigade = apr_brigade_create(p, bucket_alloc); - block = (flushall) ? APR_NONBLOCK_READ : APR_BLOCK_READ; + block = (req->flushall) ? APR_NONBLOCK_READ : APR_BLOCK_READ; do { status = ap_get_brigade(r->input_filters, temp_brigade, AP_MODE_READBYTES, block, @@ -915,52 +931,51 @@ static apr_status_t proxy_buckets_lifetime_transfo * If we expected no body, and read no body, do not set * the Content-Length. */ - if (*old_cl_val || *old_te_val || bytes_read) { - *old_cl_val = apr_off_t_toa(r->pool, bytes_read); + if (req->old_cl_val || req->old_te_val || bytes_read) { + req->old_cl_val = apr_off_t_toa(r->pool, bytes_read); } - *rb_method = RB_STREAM_CL; + req->rb_method = RB_STREAM_CL; } - else if (*old_te_val) { + else if (req->old_te_val) { if (force10 || (apr_table_get(r->subprocess_env, "proxy-sendcl") && !apr_table_get(r->subprocess_env, "proxy-sendchunks") && !apr_table_get(r->subprocess_env, "proxy-sendchunked"))) { - *rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } else { - *rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } } - else if (*old_cl_val) { + else if (req->old_cl_val) { if (r->input_filters == r->proto_input_filters) { - *rb_method = RB_STREAM_CL; + req->rb_method = RB_STREAM_CL; } else if (!force10 && (apr_table_get(r->subprocess_env, "proxy-sendchunks") || apr_table_get(r->subprocess_env, "proxy-sendchunked")) && !apr_table_get(r->subprocess_env, "proxy-sendcl")) { - *rb_method = RB_STREAM_CHUNKED; + req->rb_method = RB_STREAM_CHUNKED; } else { - *rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } } else { /* This is an appropriate default; very efficient for no-body * requests, and has the behavior that it will not add any C-L - * when the *old_cl_val is NULL. + * when the old_cl_val is NULL. */ - *rb_method = RB_SPOOL_CL; + req->rb_method = RB_SPOOL_CL; } /* If we have to spool the body, do it now, before connecting or * reusing the backend connection. */ - if (*rb_method == RB_SPOOL_CL) { - rv = spool_reqbody_cl(p, r, header_brigade, input_brigade, - (bytes_read > 0) - || (*old_cl_val != NULL) - || (*old_te_val != NULL)); + if (req->rb_method == RB_SPOOL_CL) { + rv = spool_reqbody_cl(req, (bytes_read > 0) + || (req->old_cl_val != NULL) + || (req->old_te_val != NULL)); if (rv != OK) { return rv; } @@ -988,33 +1003,42 @@ skip_body: return OK; } -static -int ap_proxy_http_request(apr_pool_t *p, request_rec *r, - proxy_conn_rec *p_conn, - apr_bucket_brigade *header_brigade, - apr_bucket_brigade *input_brigade, - char *old_cl_val, char *old_te_val, - enum rb_methods rb_method, int flushall) +static int ap_proxy_http_request(proxy_http_req_t *req) { int rv; - conn_rec *origin = p_conn->connection; + request_rec *r = req->r; + apr_bucket_alloc_t *bucket_alloc = req->bucket_alloc; + apr_bucket_brigade *header_brigade = req->header_brigade; + apr_bucket_brigade *input_brigade = req->input_brigade; - /* send the request body, if any. */ - switch(rb_method) { + /* send the request header/body, if any. */ + switch(req->rb_method) { case RB_STREAM_CHUNKED: - rv = stream_reqbody_chunked(p, r, p_conn, origin, header_brigade, - input_brigade, flushall); + if (req->do_100_continue) { + rv = send_reqhdr_chunked(req); + } + else { + rv = stream_reqbody_chunked(req); + } break; case RB_STREAM_CL: - rv = stream_reqbody_cl(p, r, p_conn, origin, header_brigade, - input_brigade, old_cl_val, flushall); + if (req->do_100_continue) { + rv = send_reqhdr_cl(req); + } + else { + rv = stream_reqbody_cl(req); + } break; case RB_SPOOL_CL: - /* Prefetch has spooled the whole body, simply forward it now. - * This is all a single brigade, pass with flush flagged. + /* Prefetch has built the header and spooled the whole body; + * if we don't expect 100-continue we can flush both all at once, + * otherwise flush the header only. */ - rv = ap_proxy_pass_brigade(r->connection->bucket_alloc, - r, p_conn, origin, header_brigade, 1); + if (!req->do_100_continue) { + APR_BRIGADE_CONCAT(header_brigade, input_brigade); + } + rv = ap_proxy_pass_brigade(bucket_alloc, r, req->backconn, + req->origin, header_brigade, 1); break; default: /* shouldn't be possible */ @@ -1027,7 +1051,8 @@ skip_body: /* apr_status_t value has been logged in lower level method */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01097) "pass request body failed to %pI (%s) from %s (%s)", - p_conn->addr, p_conn->hostname ? p_conn->hostname: "", + req->backconn->addr, + req->backconn->hostname ? req->backconn->hostname: "", c->client_ip, c->remote_host ? c->remote_host: ""); return rv; } @@ -1294,12 +1319,14 @@ static int add_trailers(void *data, const char *ke return 1; } -static -int ap_proxy_http_process_response(apr_pool_t * p, request_rec *r, - proxy_conn_rec **backend_ptr, proxy_worker *worker, - proxy_server_conf *conf, char *server_portstr) +static int ap_proxy_http_process_response(proxy_http_req_t *req) { + apr_pool_t *p = req->p; + request_rec *r = req->r; conn_rec *c = r->connection; + proxy_worker *worker = req->worker; + proxy_conn_rec *backend = req->backconn; + int do_100_continue = req->do_100_continue; char buffer[HUGE_STRING_LEN]; const char *buf; char keepchar; @@ -1320,25 +1347,18 @@ static int add_trailers(void *data, const char *ke int proxy_status = OK; const char *original_status_line = r->status_line; const char *proxy_status_line = NULL; - proxy_conn_rec *backend = *backend_ptr; - conn_rec *origin = backend->connection; apr_interval_time_t old_timeout = 0; proxy_dir_conf *dconf; - int do_100_continue; dconf = ap_get_module_config(r->per_dir_config, &proxy_module); - do_100_continue = (worker->s->ping_timeout_set - && (worker->s->ping_timeout >= 0) - && (PROXYREQ_REVERSE == r->proxyreq) - && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) - && ap_request_has_body(r)); - bb = apr_brigade_create(p, c->bucket_alloc); pass_bb = apr_brigade_create(p, c->bucket_alloc); /* Setup for 100-Continue timeout if appropriate */ - if (do_100_continue) { + if (do_100_continue + && worker->s->ping_timeout_set + && worker->s->ping_timeout >= 0) { apr_socket_timeout_get(backend->sock, &old_timeout); if (worker->s->ping_timeout != old_timeout) { apr_status_t rc; @@ -1354,16 +1374,18 @@ static int add_trailers(void *data, const char *ke * filter chain */ - backend->r = make_fake_req(origin, r); + backend->r = make_fake_req(req->origin, r); /* In case anyone needs to know, this is a fake request that is really a * response. */ backend->r->proxyreq = PROXYREQ_RESPONSE; apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", - origin->local_addr->port)); + req->origin->local_addr->port)); tmp_bb = apr_brigade_create(p, c->bucket_alloc); do { apr_status_t rc; + int major = 0, minor = 0; + int toclose = 0; apr_brigade_cleanup(bb); @@ -1379,7 +1401,10 @@ static int add_trailers(void *data, const char *ke if (APR_STATUS_IS_TIMEUP(rc)) { apr_table_setn(r->notes, "proxy_timedout", "1"); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01103) "read timeout"); - if (do_100_continue) { + /* Reset to old timeout iff we've adjusted it. */ + if (do_100_continue + && worker->s->ping_timeout_set + && worker->s->ping_timeout >= 0) { proxy_run_detach_backend(r, backend); return ap_proxyerror(r, HTTP_SERVICE_UNAVAILABLE, "Timeout on 100-Continue"); } @@ -1434,9 +1459,6 @@ static int add_trailers(void *data, const char *ke * This is buggy if we ever see an HTTP/1.10 */ if (apr_date_checkmask(buffer, "HTTP/#.# ###*")) { - int major, minor; - int toclose; - major = buffer[5] - '0'; minor = buffer[7] - '0'; @@ -1487,8 +1509,8 @@ static int add_trailers(void *data, const char *ke "Set-Cookie", NULL); /* shove the headers direct into r->headers_out */ - ap_proxy_read_headers(r, backend->r, buffer, sizeof(buffer), origin, - &pread_len); + ap_proxy_read_headers(r, backend->r, buffer, sizeof(buffer), + req->origin, &pread_len); if (r->headers_out == NULL) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01106) @@ -1559,7 +1581,7 @@ static int add_trailers(void *data, const char *ke ap_set_content_type(r, apr_pstrdup(p, buf)); } if (!ap_is_HTTP_INFO(proxy_status)) { - ap_proxy_pre_http_request(origin, backend->r); + ap_proxy_pre_http_request(req->origin, backend->r); } /* Clear hop-by-hop headers */ @@ -1571,7 +1593,8 @@ static int add_trailers(void *data, const char *ke r->headers_out = ap_proxy_clean_warnings(p, r->headers_out); /* handle Via header in response */ - if (conf->viaopt != via_off && conf->viaopt != via_block) { + if (req->sconf->viaopt != via_off + && req->sconf->viaopt != via_block) { const char *server_name = ap_get_server_name(r); /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, * then the server name returned by ap_get_server_name() is the @@ -1582,18 +1605,18 @@ static int add_trailers(void *data, const char *ke server_name = r->server->server_hostname; /* create a "Via:" response header entry and merge it */ apr_table_addn(r->headers_out, "Via", - (conf->viaopt == via_full) + (req->sconf->viaopt == via_full) ? apr_psprintf(p, "%d.%d %s%s (%s)", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr, + req->server_portstr, AP_SERVER_BASEVERSION) : apr_psprintf(p, "%d.%d %s%s", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, - server_portstr) + req->server_portstr) ); } @@ -1600,7 +1623,7 @@ static int add_trailers(void *data, const char *ke /* cancel keepalive if HTTP/1.0 or less */ if ((major < 1) || (minor < 1)) { backend->close = 1; - origin->keepalive = AP_CONN_CLOSE; + req->origin->keepalive = AP_CONN_CLOSE; } } else { /* an http/0.9 response */ @@ -1611,18 +1634,6 @@ static int add_trailers(void *data, const char *ke } if (ap_is_HTTP_INFO(proxy_status)) { - interim_response++; - /* Reset to old timeout iff we've adjusted it */ - if (do_100_continue - && (r->status == HTTP_CONTINUE) - && (worker->s->ping_timeout != old_timeout)) { - apr_socket_timeout_set(backend->sock, old_timeout); - } - } - else { - interim_response = 0; - } - if (interim_response) { /* RFC2616 tells us to forward this. * * OTOH, an interim response here may mean the backend @@ -1643,7 +1654,11 @@ static int add_trailers(void *data, const char *ke ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "HTTP: received interim %d response", r->status); if (!policy - || (!strcasecmp(policy, "RFC") && ((r->expecting_100 = 1)))) { + || (proxy_status == HTTP_CONTINUE + && r->expecting_100) + || (!strcasecmp(policy, "RFC") + && (proxy_status != HTTP_CONTINUE + || (r->expecting_100 = 1)))) { ap_send_interim_response(r, 1); } /* FIXME: refine this to be able to specify per-response-status @@ -1653,7 +1668,78 @@ static int add_trailers(void *data, const char *ke ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(01108) "undefined proxy interim response policy"); } + interim_response++; } + else { + interim_response = 0; + } + + /* If we still do 100-continue (end-to-end or ping), either the + * current response is the expected "100 Continue" and we are done + * with this mode, or this is another interim response and we'll wait + * for the next one, or this is a final response and hence the backend + * did not honor our expectation. + */ + if (do_100_continue + && (proxy_status == HTTP_CONTINUE || !interim_response)) { + /* Reset to old timeout iff we've adjusted it. */ + if (worker->s->ping_timeout_set && worker->s->ping_timeout >= 0) { + apr_socket_timeout_set(backend->sock, old_timeout); + } + /* RFC 7231 - Section 5.1.1 - Expect - Requirement for servers + * o A server that responds with a final status code before + * reading the entire message body SHOULD indicate in that + * response whether it intends to close the connection or + * continue reading and discarding the request message. + * + * So, if this response is not an interim 100 Continue, we can + * avoid sending the request body if the backend responded with + * "Connection: close" or HTTP < 1.1, and either let the core + * discard it or the caller try another balancer member with the + * same body (given status 503, though not implemented yet). + */ + if (proxy_status == HTTP_CONTINUE + || (major > 0 && minor > 0 && !toclose)) { + int status; + + /* Send the request body (fully). */ + switch(req->rb_method) { + case RB_STREAM_CL: + status = stream_reqbody_cl(req); + break; + case RB_STREAM_CHUNKED: + status = stream_reqbody_chunked(req); + break; + case RB_SPOOL_CL: + /* Prefetch has spooled the whole body, flush it. */ + status = ap_proxy_pass_brigade(req->bucket_alloc, r, + backend, req->origin, + req->input_brigade, 1); + break; + default: + /* Shouldn't happen */ + status = HTTP_INTERNAL_SERVER_ERROR; + break; + } + if (status != OK) { + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, + APLOGNO() "pass request body failed " + "to %pI (%s) from %s (%s) with status %i", + backend->addr, + backend->hostname ? backend->hostname : "", + c->client_ip, + c->remote_host ? c->remote_host : "", + status); + backend->close = 1; + proxy_run_detach_backend(r, backend); + return status; + } + } + + /* Once only! */ + do_100_continue = 0; + } + /* Moved the fixups of Date headers and those affected by * ProxyPassReverse/etc from here to ap_proxy_read_headers */ @@ -1775,7 +1861,7 @@ static int add_trailers(void *data, const char *ke rv = ap_get_brigade(backend->r->input_filters, bb, AP_MODE_READBYTES, mode, - conf->io_buffer_size); + req->sconf->io_buffer_size); /* ap_get_brigade will return success with an empty brigade * for a non-blocking read which would block: */ @@ -1885,7 +1971,7 @@ static int add_trailers(void *data, const char *ke ap_proxy_release_connection(backend->worker->s->scheme, backend, r->server); /* Ensure that the backend is not reused */ - *backend_ptr = NULL; + req->backconn = NULL; } @@ -1894,12 +1980,13 @@ static int add_trailers(void *data, const char *ke || c->aborted) { /* Ack! Phbtt! Die! User aborted! */ /* Only close backend if we haven't got all from the - * backend. Furthermore if *backend_ptr is NULL it is no + * backend. Furthermore if backconn is NULL it is no * longer safe to fiddle around with backend as it might * be already in use by another thread. */ - if (*backend_ptr) { - backend->close = 1; /* this causes socket close below */ + if (req->backconn) { + /* this causes socket close below */ + req->backconn->close = 1; } finish = TRUE; } @@ -1923,7 +2010,7 @@ static int add_trailers(void *data, const char *ke proxy_run_detach_backend(r, backend); ap_proxy_release_connection(backend->worker->s->scheme, backend, r->server); - *backend_ptr = NULL; + req->backconn = NULL; /* Pass EOS bucket down the filter chain. */ e = apr_bucket_eos_create(c->bucket_alloc); @@ -1938,8 +2025,8 @@ static int add_trailers(void *data, const char *ke * created from scpool and this pool can be freed before this brigade. */ apr_brigade_cleanup(bb); - if (*backend_ptr) { - proxy_run_detach_backend(r, backend); + if (req->backconn) { + proxy_run_detach_backend(r, req->backconn); } /* See define of AP_MAX_INTERIM_RESPONSES for why */ @@ -1981,20 +2068,15 @@ static int proxy_http_handler(request_rec *r, prox apr_port_t proxyport) { int status; - char server_portstr[32]; char *scheme; const char *proxy_function; const char *u; - apr_bucket_brigade *header_brigade; - apr_bucket_brigade *input_brigade; + proxy_http_req_t *req; proxy_conn_rec *backend = NULL; int is_ssl = 0; conn_rec *c = r->connection; int retry = 0; - char *old_cl_val = NULL, *old_te_val = NULL; - enum rb_methods rb_method = RB_INIT; char *locurl = url; - int flushall = 0; int toclose = 0; /* * Use a shorter-lived pool to reduce memory usage @@ -2036,6 +2118,13 @@ static int proxy_http_handler(request_rec *r, prox } ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "HTTP: serving URL %s", url); + req = apr_pcalloc(p, sizeof *req); + req->p = p; + req->r = r; + req->sconf = conf; + req->worker = worker; + req->bucket_alloc = c->bucket_alloc; + req->rb_method = RB_INIT; /* create space for state information */ if ((status = ap_proxy_acquire_connection(proxy_function, &backend, @@ -2042,7 +2131,7 @@ static int proxy_http_handler(request_rec *r, prox worker, r->server)) != OK) goto cleanup; - + req->backconn = backend; backend->is_ssl = is_ssl; if (is_ssl) { @@ -2063,14 +2152,14 @@ static int proxy_http_handler(request_rec *r, prox } if (apr_table_get(r->subprocess_env, "proxy-flushall")) { - flushall = 1; + req->flushall = 1; } /* Step One: Determine Who To Connect To */ if ((status = ap_proxy_determine_connection(p, r, conf, worker, backend, uri, &locurl, proxyname, - proxyport, server_portstr, - sizeof(server_portstr))) != OK) + proxyport, req->server_portstr, + sizeof req->server_portstr)) != OK) goto cleanup; /* Prefetch (nonlocking) the request body so to increase the chance to get @@ -2083,13 +2172,9 @@ static int proxy_http_handler(request_rec *r, prox * to reduce to the minimum the unavoidable local is_socket_connected() vs * remote keepalive race condition. */ - input_brigade = apr_brigade_create(p, c->bucket_alloc); - header_brigade = apr_brigade_create(p, c->bucket_alloc); - if ((status = ap_proxy_http_prefetch(p, r, backend, worker, conf, uri, - locurl, server_portstr, - header_brigade, input_brigade, - &old_cl_val, &old_te_val, &rb_method, - flushall)) != OK) + req->input_brigade = apr_brigade_create(p, req->bucket_alloc); + req->header_brigade = apr_brigade_create(p, req->bucket_alloc); + if ((status = ap_proxy_http_prefetch(req, uri, locurl)) != OK) goto cleanup; /* We need to reset backend->close now, since ap_proxy_http_prefetch() set @@ -2101,27 +2186,42 @@ static int proxy_http_handler(request_rec *r, prox toclose = backend->close; backend->close = 0; + req->do_100_continue = ((r->expecting_100 + || (worker->s->ping_timeout_set + && (worker->s->ping_timeout >= 0))) + && (PROXYREQ_REVERSE == r->proxyreq) + && !(apr_table_get(r->subprocess_env, + "force-proxy-request-1.0")) + && ap_request_has_body(r)); + while (retry < 2) { - conn_rec *backconn; - if (retry) { char *newurl = url; /* Step One (again): (Re)Determine Who To Connect To */ - if ((status = ap_proxy_determine_connection(p, r, conf, worker, - backend, uri, &newurl, proxyname, proxyport, - server_portstr, sizeof(server_portstr))) != OK) + status = ap_proxy_determine_connection(p, r, conf, worker, + backend, uri, &newurl, + proxyname, proxyport, + req->server_portstr, + sizeof req->server_portstr); + if (status != OK) break; - /* The code assumes locurl is not changed during the loop, or - * ap_proxy_http_prefetch() would have to be called every time, - * and header_brigade be changed accordingly... - */ - AP_DEBUG_ASSERT(strcmp(newurl, locurl) == 0); + /* Rebuild consumed header brigade. */ + apr_brigade_cleanup(req->header_brigade); + req->old_cl_val = req->old_te_val = NULL; + status = ap_proxy_create_hdrbrgd(p, req->header_brigade, + r, backend, worker, conf, + uri, newurl, req->server_portstr, + &req->old_cl_val, + &req->old_te_val); + if (status != OK) + break; } /* Step Two: Make the Connection */ - if (ap_proxy_connect_backend(proxy_function, backend, worker, r->server)) { + if (ap_proxy_connect_backend(proxy_function, backend, + worker, r->server)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01114) "HTTP: failed to make connection to backend: %s", backend->hostname); @@ -2130,12 +2230,12 @@ static int proxy_http_handler(request_rec *r, prox } /* Step Three: Create conn_rec */ - backconn = backend->connection; - if (!backconn) { + req->origin = backend->connection; + if (!req->origin) { if ((status = ap_proxy_connection_create(proxy_function, backend, c, r->server)) != OK) break; - backconn = backend->connection; + req->origin = backend->connection; /* * On SSL connections set a note on the connection what CN is @@ -2156,9 +2256,10 @@ static int proxy_http_handler(request_rec *r, prox if (worker->s->ping_timeout_set && worker->s->ping_timeout < 0 && !ap_proxy_is_socket_connected(backend->sock)) { backend->close = 1; - ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(02535) + ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02535) "socket check failed to %pI (%s)", worker->cp->addr, worker->s->hostname); + status = HTTP_SERVICE_UNAVAILABLE; retry++; continue; } @@ -2167,7 +2268,7 @@ static int proxy_http_handler(request_rec *r, prox /* Don't recycle the connection if prefetch (above) told not to do so */ if (toclose) { backend->close = 1; - backconn->keepalive = AP_CONN_CLOSE; + req->origin->keepalive = AP_CONN_CLOSE; } /* Step Four: Send the Request @@ -2174,10 +2275,8 @@ static int proxy_http_handler(request_rec *r, prox * On the off-chance that we forced a 100-Continue as a * kinda HTTP ping test, allow for retries */ - if ((status = ap_proxy_http_request(p, r, backend, - header_brigade, input_brigade, - old_cl_val, old_te_val, rb_method, - flushall)) != OK) { + status = ap_proxy_http_request(req); + if (status != OK) { proxy_run_detach_backend(r, backend); if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set && @@ -2194,8 +2293,7 @@ static int proxy_http_handler(request_rec *r, prox } /* Step Five: Receive the Response... Fall thru to cleanup */ - status = ap_proxy_http_process_response(p, r, &backend, worker, - conf, server_portstr); + status = ap_proxy_http_process_response(req); break; } @@ -2202,10 +2300,10 @@ static int proxy_http_handler(request_rec *r, prox /* Step Six: Clean Up */ cleanup: - if (backend) { + if (req->backconn) { if (status != OK) - backend->close = 1; - ap_proxy_http_cleanup(proxy_function, r, backend); + req->backconn->close = 1; + ap_proxy_http_cleanup(proxy_function, r, req->backconn); } return status; } Index: server/protocol.c =================================================================== --- server/protocol.c (revision 1678215) +++ server/protocol.c (working copy) @@ -1906,23 +1906,25 @@ AP_DECLARE(void) ap_send_interim_response(request_ "Status is %d - not sending interim response", r->status); return; } - if ((r->status == HTTP_CONTINUE) && !r->expecting_100) { - /* - * Don't send 100-Continue when there was no Expect: 100-continue - * in the request headers. For origin servers this is a SHOULD NOT - * for proxies it is a MUST NOT according to RFC 2616 8.2.3 + if (r->status == HTTP_CONTINUE) { + if (!r->expecting_100) { + /* + * Don't send 100-Continue when there was no Expect: 100-continue + * in the request headers. For origin servers this is a SHOULD NOT + * for proxies it is a MUST NOT according to RFC 2616 8.2.3 + */ + return; + } + + /* if we send an interim response, we're no longer in a state of + * expecting one. Also, this could feasibly be in a subrequest, + * so we need to propagate the fact that we responded. */ - return; + for (rr = r; rr != NULL; rr = rr->main) { + rr->expecting_100 = 0; + } } - /* if we send an interim response, we're no longer in a state of - * expecting one. Also, this could feasibly be in a subrequest, - * so we need to propagate the fact that we responded. - */ - for (rr = r; rr != NULL; rr = rr->main) { - rr->expecting_100 = 0; - } - status_line = apr_pstrcat(r->pool, AP_SERVER_PROTOCOL, " ", r->status_line, CRLF, NULL); ap_xlate_proto_to_ascii(status_line, strlen(status_line));