Hello! On Sat, Oct 26, 2013 at 03:38:35PM -0700, Yichun Zhang (agentzh) wrote:
> Hello! > > Akos Gyimesi reported a request hang (downstream connections stuck in > the CLOSE_WAIT state forever) regarding use of proxy_cache_lock in > subrequests. > > The issue is that when proxy_cache_lock_timeout is reached, > ngx_http_file_cache_lock_wait_handler calls > r->connection->write->handler() directly, but > r->connection->write->handler is (usually) just > ngx_http_request_handler, which simply picks up r->connection->data, > which is *not* necessarily the current (sub)request, so the current > subrequest may never be continued nor finalized, leading to an > infinite request hang. > > The following patch fixes this issue for me. Comments welcome! Yichun, I've spent some time looking in this, and I don't see how it can cause infinite hang at least with stock nginx modules. It certainly can cause suboptimal behaviour though, both with proxy cache locks and with AIO. Here are two patches to address this (and also logging issues with subrequests): # HG changeset patch # User Maxim Dounin <mdou...@mdounin.ru> # Date 1417096347 -10800 # Thu Nov 27 16:52:27 2014 +0300 # Node ID 6182e4636b972aee8edfdfb70d8ccb45b5d9303a # Parent 005b56eca92995fed63d5a5526db895c2402b96f Upstream: improved subrequest logging. To ensure proper logging make sure to set current_request in all event handlers, including resolve, ssl handshake, cache lock wait timer and aio read handlers. A macro ngx_http_set_log_request() introduced to simplify this. diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -14,6 +14,8 @@ static ngx_int_t ngx_http_file_cache_lock(ngx_http_request_t *r, ngx_http_cache_t *c); static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev); +static void ngx_http_file_cache_lock_wait(ngx_http_request_t *r, + ngx_http_cache_t *c); static ngx_int_t ngx_http_file_cache_read(ngx_http_request_t *r, ngx_http_cache_t *c); static ssize_t ngx_http_file_cache_aio_read(ngx_http_request_t *r, @@ -448,25 +450,35 @@ ngx_http_file_cache_lock(ngx_http_reques static void ngx_http_file_cache_lock_wait_handler(ngx_event_t *ev) { - ngx_uint_t wait; - ngx_msec_t now, timer; - ngx_http_cache_t *c; - ngx_http_request_t *r; - ngx_http_file_cache_t *cache; + ngx_connection_t *c; + ngx_http_request_t *r; r = ev->data; - c = r->cache; + c = r->connection; + + ngx_http_set_log_request(c->log, r); + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http file cache wait: \"%V?%V\"", &r->uri, &r->args); + + ngx_http_file_cache_lock_wait(r, r->cache); +} + + +static void +ngx_http_file_cache_lock_wait(ngx_http_request_t *r, ngx_http_cache_t *c) +{ + ngx_uint_t wait; + ngx_msec_t now, timer; + ngx_http_file_cache_t *cache; now = ngx_current_msec; - ngx_log_debug2(NGX_LOG_DEBUG_HTTP, ev->log, 0, - "http file cache wait handler wt:%M cur:%M", - c->wait_time, now); - timer = c->wait_time - now; if ((ngx_msec_int_t) timer <= 0) { - ngx_log_error(NGX_LOG_INFO, ev->log, 0, "cache lock timeout"); + ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, + "cache lock timeout"); c->lock_timeout = 0; goto wakeup; } @@ -485,7 +497,7 @@ ngx_http_file_cache_lock_wait_handler(ng ngx_shmtx_unlock(&cache->shpool->mutex); if (wait) { - ngx_add_timer(ev, (timer > 500) ? 500 : timer); + ngx_add_timer(&c->wait_event, (timer > 500) ? 500 : timer); return; } @@ -665,10 +677,17 @@ static void ngx_http_cache_aio_event_handler(ngx_event_t *ev) { ngx_event_aio_t *aio; + ngx_connection_t *c; ngx_http_request_t *r; aio = ev->data; r = aio->data; + c = r->connection; + + ngx_http_set_log_request(c->log, r); + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http file cache aio: \"%V?%V\"", &r->uri, &r->args); r->main->blocked--; r->aio = 0; diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -2169,13 +2169,11 @@ ngx_http_request_handler(ngx_event_t *ev { ngx_connection_t *c; ngx_http_request_t *r; - ngx_http_log_ctx_t *ctx; c = ev->data; r = c->data; - ctx = c->log->data; - ctx->current_request = r; + ngx_http_set_log_request(c->log, r); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http run request: \"%V?%V\"", &r->uri, &r->args); @@ -2195,7 +2193,6 @@ void ngx_http_run_posted_requests(ngx_connection_t *c) { ngx_http_request_t *r; - ngx_http_log_ctx_t *ctx; ngx_http_posted_request_t *pr; for ( ;; ) { @@ -2215,8 +2212,7 @@ ngx_http_run_posted_requests(ngx_connect r = pr->request; - ctx = c->log->data; - ctx->current_request = r; + ngx_http_set_log_request(c->log, r); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http posted request: \"%V?%V\"", &r->uri, &r->args); diff --git a/src/http/ngx_http_request.h b/src/http/ngx_http_request.h --- a/src/http/ngx_http_request.h +++ b/src/http/ngx_http_request.h @@ -595,4 +595,8 @@ extern ngx_http_header_out_t ngx_http_ } +#define ngx_http_set_log_request(log, r) \ + ((ngx_http_log_ctx_t *) log->data)->current_request = r + + #endif /* _NGX_HTTP_REQUEST_H_INCLUDED_ */ diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -942,6 +942,11 @@ ngx_http_upstream_resolve_handler(ngx_re u = r->upstream; ur = u->resolved; + ngx_http_set_log_request(c->log, r); + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, + "http upstream resolve: \"%V?%V\"", &r->uri, &r->args); + if (ctx->state) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "%V could not be resolved (%i: %s)", @@ -1003,7 +1008,6 @@ ngx_http_upstream_handler(ngx_event_t *e { ngx_connection_t *c; ngx_http_request_t *r; - ngx_http_log_ctx_t *ctx; ngx_http_upstream_t *u; c = ev->data; @@ -1012,8 +1016,7 @@ ngx_http_upstream_handler(ngx_event_t *e u = r->upstream; c = r->connection; - ctx = c->log->data; - ctx->current_request = r; + ngx_http_set_log_request(c->log, r); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http upstream request: \"%V?%V\"", &r->uri, &r->args); @@ -1447,6 +1450,8 @@ ngx_http_upstream_ssl_handshake(ngx_conn r = c->data; u = r->upstream; + ngx_http_set_log_request(c->log, r); + if (c->ssl->handshaked) { if (u->conf->ssl_verify) { # HG changeset patch # User Maxim Dounin <mdou...@mdounin.ru> # Date 1417096358 -10800 # Thu Nov 27 16:52:38 2014 +0300 # Node ID a96fc771c7bf1343f7cb965d525a1479644f3325 # Parent 6182e4636b972aee8edfdfb70d8ccb45b5d9303a Cache: proper wakup of subrequests. In case of a cache lock timeout and in the aio handler we now call r->write_event_handler() instead of a connection write handler, to make sure to run appropriate subrequest. Previous code failed to run inactive subrequests and hence resulted in suboptimal behaviour, see report by Yichun Zhang: http://mailman.nginx.org/pipermail/nginx-devel/2013-October/004435.html (Infinite hang claimed in the report seems impossible without 3rd party modules, as subrequests will be eventually woken up by the postpone filter.) diff --git a/src/http/ngx_http_file_cache.c b/src/http/ngx_http_file_cache.c --- a/src/http/ngx_http_file_cache.c +++ b/src/http/ngx_http_file_cache.c @@ -462,6 +462,8 @@ ngx_http_file_cache_lock_wait_handler(ng "http file cache wait: \"%V?%V\"", &r->uri, &r->args); ngx_http_file_cache_lock_wait(r, r->cache); + + ngx_http_run_posted_requests(c); } @@ -505,7 +507,7 @@ wakeup: c->waiting = 0; r->main->blocked--; - r->connection->write->handler(r->connection->write); + r->write_event_handler(r); } @@ -692,7 +694,9 @@ ngx_http_cache_aio_event_handler(ngx_eve r->main->blocked--; r->aio = 0; - r->connection->write->handler(r->connection->write); + r->write_event_handler(r); + + ngx_http_run_posted_requests(c); } #endif -- Maxim Dounin http://nginx.org/ _______________________________________________ nginx-devel mailing list nginx-devel@nginx.org http://mailman.nginx.org/mailman/listinfo/nginx-devel