[nginx] Upstream: variables support in proxy_limit_rate and friends.
details: https://hg.nginx.org/nginx/rev/2e9588d65dd9 branches: changeset: 9249:2e9588d65dd9 user: J Carter date: Sat Nov 25 21:57:09 2023 + description: Upstream: variables support in proxy_limit_rate and friends. diffstat: src/http/modules/ngx_http_fastcgi_module.c | 8 src/http/modules/ngx_http_proxy_module.c | 8 src/http/modules/ngx_http_scgi_module.c| 8 src/http/modules/ngx_http_uwsgi_module.c | 8 src/http/ngx_http_upstream.c | 2 +- src/http/ngx_http_upstream.h | 2 +- 6 files changed, 18 insertions(+), 18 deletions(-) diffs (152 lines): diff -r f7d53c7f7014 -r 2e9588d65dd9 src/http/modules/ngx_http_fastcgi_module.c --- a/src/http/modules/ngx_http_fastcgi_module.cThu May 23 19:15:38 2024 +0400 +++ b/src/http/modules/ngx_http_fastcgi_module.cSat Nov 25 21:57:09 2023 + @@ -375,7 +375,7 @@ static ngx_command_t ngx_http_fastcgi_c { ngx_string("fastcgi_limit_rate"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, - ngx_conf_set_size_slot, + ngx_http_set_complex_value_size_slot, NGX_HTTP_LOC_CONF_OFFSET, offsetof(ngx_http_fastcgi_loc_conf_t, upstream.limit_rate), NULL }, @@ -2898,7 +2898,7 @@ ngx_http_fastcgi_create_loc_conf(ngx_con conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; @@ -3015,8 +3015,8 @@ ngx_http_fastcgi_merge_loc_conf(ngx_conf prev->upstream.buffer_size, (size_t) ngx_pagesize); -ngx_conf_merge_size_value(conf->upstream.limit_rate, - prev->upstream.limit_rate, 0); +ngx_conf_merge_ptr_value(conf->upstream.limit_rate, + prev->upstream.limit_rate, NULL); ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, diff -r f7d53c7f7014 -r 2e9588d65dd9 src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c Thu May 23 19:15:38 2024 +0400 +++ b/src/http/modules/ngx_http_proxy_module.c Sat Nov 25 21:57:09 2023 + @@ -494,7 +494,7 @@ static ngx_command_t ngx_http_proxy_com { ngx_string("proxy_limit_rate"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, - ngx_conf_set_size_slot, + ngx_http_set_complex_value_size_slot, NGX_HTTP_LOC_CONF_OFFSET, offsetof(ngx_http_proxy_loc_conf_t, upstream.limit_rate), NULL }, @@ -3371,7 +3371,7 @@ ngx_http_proxy_create_loc_conf(ngx_conf_ conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; @@ -3515,8 +3515,8 @@ ngx_http_proxy_merge_loc_conf(ngx_conf_t prev->upstream.buffer_size, (size_t) ngx_pagesize); -ngx_conf_merge_size_value(conf->upstream.limit_rate, - prev->upstream.limit_rate, 0); +ngx_conf_merge_ptr_value(conf->upstream.limit_rate, + prev->upstream.limit_rate, NULL); ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, 8, ngx_pagesize); diff -r f7d53c7f7014 -r 2e9588d65dd9 src/http/modules/ngx_http_scgi_module.c --- a/src/http/modules/ngx_http_scgi_module.c Thu May 23 19:15:38 2024 +0400 +++ b/src/http/modules/ngx_http_scgi_module.c Sat Nov 25 21:57:09 2023 + @@ -223,7 +223,7 @@ static ngx_command_t ngx_http_scgi_comma { ngx_string("scgi_limit_rate"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, - ngx_conf_set_size_slot, + ngx_http_set_complex_value_size_slot, NGX_HTTP_LOC_CONF_OFFSET, offsetof(ngx_http_scgi_loc_conf_t, upstream.limit_rate), NULL }, @@ -1301,7 +1301,7 @@ ngx_http_scgi_create_loc_conf(ngx_conf_t conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; @@ -1413,8 +1413,8 @@ ngx_http_scgi_merge_loc_conf(ngx_conf_t prev->upstream.buffer_size, (size_t) ngx_pagesize); -
Re: [PATCH] Proxy: altered limit_rate to support variables
Hi, On Tue, Dec 26, 2023 at 07:07:37PM +0400, Sergey Kandaurov wrote: > > > On 26 Nov 2023, at 03:37, J Carter wrote: > > > > # HG changeset patch > > # User J Carter > > # Date 1700949429 0 > > # Sat Nov 25 21:57:09 2023 + > > # Node ID 98306e705015758eab0a05103d90e6bdb1da2819 > > # Parent f366007dd23a6ce8e8427c1b3042781b618a2ade > > Proxy: altered limit_rate to support variables > > Since it changes the upstream interface, I'd rather say: > > Upstream: variables support in proxy_limit_rate and friends. > > The change looks good to me, also it compliments variables support > in the stream proxy module's directives. > Any particular use-cases you'd like to share for this functionality? > > > > > diff -r f366007dd23a -r 98306e705015 > > src/http/modules/ngx_http_fastcgi_module.c > > --- a/src/http/modules/ngx_http_fastcgi_module.cTue Nov 14 15:26:02 > > 2023 +0400 > > +++ b/src/http/modules/ngx_http_fastcgi_module.cSat Nov 25 21:57:09 > > 2023 + > > @@ -375,7 +375,7 @@ > > > > { ngx_string("fastcgi_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_fastcgi_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -2898,7 +2898,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -3015,8 +3015,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > -ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > +ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > diff -r f366007dd23a -r 98306e705015 > > src/http/modules/ngx_http_proxy_module.c > > --- a/src/http/modules/ngx_http_proxy_module.c Tue Nov 14 15:26:02 > > 2023 +0400 > > +++ b/src/http/modules/ngx_http_proxy_module.c Sat Nov 25 21:57:09 > > 2023 + > > @@ -494,7 +494,7 @@ > > > > { ngx_string("proxy_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_proxy_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -3371,7 +3371,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -3515,8 +3515,8 @@ > > prev->upstream.buffer_size, > > (size_t) ngx_pagesize); > > > > -ngx_conf_merge_size_value(conf->upstream.limit_rate, > > - prev->upstream.limit_rate, 0); > > +ngx_conf_merge_ptr_value(conf->upstream.limit_rate, > > + prev->upstream.limit_rate, NULL); > > > > ngx_conf_merge_bufs_value(conf->upstream.bufs, prev->upstream.bufs, > > 8, ngx_pagesize); > > diff -r f366007dd23a -r 98306e705015 src/http/modules/ngx_http_scgi_module.c > > --- a/src/http/modules/ngx_http_scgi_module.c Tue Nov 14 15:26:02 > > 2023 +0400 > > +++ b/src/http/modules/ngx_http_scgi_module.c Sat Nov 25 21:57:09 > > 2023 + > > @@ -223,7 +223,7 @@ > > > > { ngx_string("scgi_limit_rate"), > > NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, > > - ngx_conf_set_size_slot, > > + ngx_http_set_complex_value_size_slot, > > NGX_HTTP_LOC_CONF_OFFSET, > > offsetof(ngx_http_scgi_loc_conf_t, upstream.limit_rate), > > NULL }, > > @@ -1301,7 +1301,7 @@ > > > > conf->upstream.send_lowat = NGX_CONF_UNSET_SIZE; > > conf->upstream.buffer_size = NGX_CONF_UNSET_SIZE; > > -conf->upstream.limit_rate = NGX_CONF_UNSET_SIZE; > > +conf->upstream.limit_rate = NGX_CONF_UNSET_PTR; > > > > conf->upstream.busy_buffers_size_conf = NGX_CONF_UNSET_SIZE; > > conf->upstream.max_temp_file_size_conf = NGX_CONF_UNSET_SIZE; > > @@ -1413,8 +1413,8 @@ > > prev->upstream.buffer_size, > > (size_t)
[nginx] Optimized chain link usage (ticket #2614).
details: https://hg.nginx.org/nginx/rev/f7d53c7f7014 branches: changeset: 9248:f7d53c7f7014 user: Roman Arutyunyan date: Thu May 23 19:15:38 2024 +0400 description: Optimized chain link usage (ticket #2614). Previously chain links could sometimes be dropped instead of being reused, which could result in increased memory consumption during long requests. A similar chain link issue in ngx_http_gzip_filter_module was fixed in da46bfc484ef (1.11.10). Based on a patch by Sangmin Lee. diffstat: src/core/ngx_output_chain.c | 10 -- src/http/modules/ngx_http_grpc_module.c | 5 - src/http/modules/ngx_http_gunzip_filter_module.c | 18 ++ src/http/modules/ngx_http_gzip_filter_module.c | 10 +++--- src/http/modules/ngx_http_ssi_filter_module.c| 8 ++-- src/http/modules/ngx_http_sub_filter_module.c| 8 ++-- 6 files changed, 45 insertions(+), 14 deletions(-) diffs (158 lines): diff -r f58b6f636238 -r f7d53c7f7014 src/core/ngx_output_chain.c --- a/src/core/ngx_output_chain.c Thu May 16 11:15:10 2024 +0200 +++ b/src/core/ngx_output_chain.c Thu May 23 19:15:38 2024 +0400 @@ -117,7 +117,10 @@ ngx_output_chain(ngx_output_chain_ctx_t ngx_debug_point(); -ctx->in = ctx->in->next; +cl = ctx->in; +ctx->in = cl->next; + +ngx_free_chain(ctx->pool, cl); continue; } @@ -203,7 +206,10 @@ ngx_output_chain(ngx_output_chain_ctx_t /* delete the completed buf from the ctx->in chain */ if (ngx_buf_size(ctx->in->buf) == 0) { -ctx->in = ctx->in->next; +cl = ctx->in; +ctx->in = cl->next; + +ngx_free_chain(ctx->pool, cl); } cl = ngx_alloc_chain_link(ctx->pool); diff -r f58b6f636238 -r f7d53c7f7014 src/http/modules/ngx_http_grpc_module.c --- a/src/http/modules/ngx_http_grpc_module.c Thu May 16 11:15:10 2024 +0200 +++ b/src/http/modules/ngx_http_grpc_module.c Thu May 23 19:15:38 2024 +0400 @@ -1231,7 +1231,7 @@ ngx_http_grpc_body_output_filter(void *d ngx_buf_t *b; ngx_int_t rc; ngx_uint_t next, last; -ngx_chain_t*cl, *out, **ll; +ngx_chain_t*cl, *out, *ln, **ll; ngx_http_upstream_t*u; ngx_http_grpc_ctx_t*ctx; ngx_http_grpc_frame_t *f; @@ -1459,7 +1459,10 @@ ngx_http_grpc_body_output_filter(void *d last = 1; } +ln = in; in = in->next; + +ngx_free_chain(r->pool, ln); } ctx->in = in; diff -r f58b6f636238 -r f7d53c7f7014 src/http/modules/ngx_http_gunzip_filter_module.c --- a/src/http/modules/ngx_http_gunzip_filter_module.c Thu May 16 11:15:10 2024 +0200 +++ b/src/http/modules/ngx_http_gunzip_filter_module.c Thu May 23 19:15:38 2024 +0400 @@ -333,6 +333,8 @@ static ngx_int_t ngx_http_gunzip_filter_add_data(ngx_http_request_t *r, ngx_http_gunzip_ctx_t *ctx) { +ngx_chain_t *cl; + if (ctx->zstream.avail_in || ctx->flush != Z_NO_FLUSH || ctx->redo) { return NGX_OK; } @@ -344,8 +346,11 @@ ngx_http_gunzip_filter_add_data(ngx_http return NGX_DECLINED; } -ctx->in_buf = ctx->in->buf; -ctx->in = ctx->in->next; +cl = ctx->in; +ctx->in_buf = cl->buf; +ctx->in = cl->next; + +ngx_free_chain(r->pool, cl); ctx->zstream.next_in = ctx->in_buf->pos; ctx->zstream.avail_in = ctx->in_buf->last - ctx->in_buf->pos; @@ -374,6 +379,7 @@ static ngx_int_t ngx_http_gunzip_filter_get_buf(ngx_http_request_t *r, ngx_http_gunzip_ctx_t *ctx) { +ngx_chain_t *cl; ngx_http_gunzip_conf_t *conf; if (ctx->zstream.avail_out) { @@ -383,8 +389,12 @@ ngx_http_gunzip_filter_get_buf(ngx_http_ conf = ngx_http_get_module_loc_conf(r, ngx_http_gunzip_filter_module); if (ctx->free) { -ctx->out_buf = ctx->free->buf; -ctx->free = ctx->free->next; + +cl = ctx->free; +ctx->out_buf = cl->buf; +ctx->free = cl->next; + +ngx_free_chain(r->pool, cl); ctx->out_buf->flush = 0; diff -r f58b6f636238 -r f7d53c7f7014 src/http/modules/ngx_http_gzip_filter_module.c --- a/src/http/modules/ngx_http_gzip_filter_module.cThu May 16 11:15:10 2024 +0200 +++ b/src/http/modules/ngx_http_gzip_filter_module.cThu May 23 19:15:38 2024 +0400 @@ -985,10 +985,14 @@ static void ngx_http_gzip_filter_free_copy_buf(ngx_http_request_t *r, ngx_http_gzip_ctx_t *ctx) { -ngx_chain_t *cl; +ngx_chain_t *cl, *ln; -for (cl = ctx->copied; cl; cl = cl->next) { -ngx_pfree(r->pool, cl->buf->start); +for (cl = ctx->copied; cl; /* void */) { +ln = cl; +cl = cl->next; + +ngx_pfree(r->pool, ln->buf->start); +ngx_free_chain(r->pool, ln); }
Re: I think I found a fix for the memory leak issue on gRPC module
On Mon, May 27, 2024 at 02:06:58PM +0400, Roman Arutyunyan wrote: > Hi, > # HG changeset patch > # User Roman Arutyunyan > # Date 1716477338 -14400 > # Thu May 23 19:15:38 2024 +0400 > # Node ID f7d53c7f70140b1cd1eaf51ce4346a873692f879 > # Parent f58b6f6362387eeace46043a6fc0bceb56a6786a > Optimized chain link usage (ticket #2614). > > Previously chain links could sometimes be dropped instead of being reused, > which could result in increased memory consumption during long requests. > > A similar chain link issue in ngx_http_gzip_filter_module was fixed in > da46bfc484ef (1.11.10). > > Based on a patch by Sangmin Lee. > > diff --git a/src/core/ngx_output_chain.c b/src/core/ngx_output_chain.c > --- a/src/core/ngx_output_chain.c > +++ b/src/core/ngx_output_chain.c > @@ -117,7 +117,10 @@ ngx_output_chain(ngx_output_chain_ctx_t > > ngx_debug_point(); > > -ctx->in = ctx->in->next; > +cl = ctx->in; > +ctx->in = cl->next; > + > +ngx_free_chain(ctx->pool, cl); > > continue; > } > @@ -203,7 +206,10 @@ ngx_output_chain(ngx_output_chain_ctx_t > /* delete the completed buf from the ctx->in chain */ > > if (ngx_buf_size(ctx->in->buf) == 0) { > -ctx->in = ctx->in->next; > +cl = ctx->in; > +ctx->in = cl->next; > + > +ngx_free_chain(ctx->pool, cl); > } > > cl = ngx_alloc_chain_link(ctx->pool); > diff --git a/src/http/modules/ngx_http_grpc_module.c > b/src/http/modules/ngx_http_grpc_module.c > --- a/src/http/modules/ngx_http_grpc_module.c > +++ b/src/http/modules/ngx_http_grpc_module.c > @@ -1231,7 +1231,7 @@ ngx_http_grpc_body_output_filter(void *d > ngx_buf_t *b; > ngx_int_t rc; > ngx_uint_t next, last; > -ngx_chain_t*cl, *out, **ll; > +ngx_chain_t*cl, *out, *ln, **ll; > ngx_http_upstream_t*u; > ngx_http_grpc_ctx_t*ctx; > ngx_http_grpc_frame_t *f; > @@ -1459,7 +1459,10 @@ ngx_http_grpc_body_output_filter(void *d > last = 1; > } > > +ln = in; > in = in->next; > + > +ngx_free_chain(r->pool, ln); > } Looks good now. > > ctx->in = in; > diff --git a/src/http/modules/ngx_http_gunzip_filter_module.c > b/src/http/modules/ngx_http_gunzip_filter_module.c > --- a/src/http/modules/ngx_http_gunzip_filter_module.c > +++ b/src/http/modules/ngx_http_gunzip_filter_module.c > @@ -333,6 +333,8 @@ static ngx_int_t > ngx_http_gunzip_filter_add_data(ngx_http_request_t *r, > ngx_http_gunzip_ctx_t *ctx) > { > +ngx_chain_t *cl; > + > if (ctx->zstream.avail_in || ctx->flush != Z_NO_FLUSH || ctx->redo) { > return NGX_OK; > } > @@ -344,8 +346,11 @@ ngx_http_gunzip_filter_add_data(ngx_http > return NGX_DECLINED; > } > > -ctx->in_buf = ctx->in->buf; > -ctx->in = ctx->in->next; > +cl = ctx->in; > +ctx->in_buf = cl->buf; > +ctx->in = cl->next; > + > +ngx_free_chain(r->pool, cl); > > ctx->zstream.next_in = ctx->in_buf->pos; > ctx->zstream.avail_in = ctx->in_buf->last - ctx->in_buf->pos; > @@ -374,6 +379,7 @@ static ngx_int_t > ngx_http_gunzip_filter_get_buf(ngx_http_request_t *r, > ngx_http_gunzip_ctx_t *ctx) > { > +ngx_chain_t *cl; > ngx_http_gunzip_conf_t *conf; > > if (ctx->zstream.avail_out) { > @@ -383,8 +389,12 @@ ngx_http_gunzip_filter_get_buf(ngx_http_ > conf = ngx_http_get_module_loc_conf(r, ngx_http_gunzip_filter_module); > > if (ctx->free) { > -ctx->out_buf = ctx->free->buf; > -ctx->free = ctx->free->next; > + > +cl = ctx->free; > +ctx->out_buf = cl->buf; > +ctx->free = cl->next; > + > +ngx_free_chain(r->pool, cl); > > ctx->out_buf->flush = 0; > > diff --git a/src/http/modules/ngx_http_gzip_filter_module.c > b/src/http/modules/ngx_http_gzip_filter_module.c > --- a/src/http/modules/ngx_http_gzip_filter_module.c > +++ b/src/http/modules/ngx_http_gzip_filter_module.c > @@ -985,10 +985,14 @@ static void > ngx_http_gzip_filter_free_copy_buf(ngx_http_request_t *r, > ngx_http_gzip_ctx_t *ctx) > { > -ngx_chain_t *cl; > +ngx_chain_t *cl, *ln; > > -for (cl = ctx->copied; cl; cl = cl->next) { > -ngx_pfree(r->pool, cl->buf->start); > +for (cl = ctx->copied; cl; /* void */) { > +ln = cl; > +cl = cl->next; > + > +ngx_pfree(r->pool, ln->buf->start); > +ngx_free_chain(r->pool, ln); > } > > ctx->copied = NULL; > diff --git a/src/http/modules/ngx_http_ssi_filter_module.c > b/src/http/modules/ngx_http_ssi_filter_module.c > --- a/src/http/modules/ngx_http_ssi_filter_module.c > +++ b/src/http/modules/ngx_http_ssi_filter_module.c > @@ -482,9 +482,13 @@
[PATCH 2 of 2] Stream: do not reallocate a parsed SNI host
# HG changeset patch # User Sergey Kandaurov # Date 1716805288 -14400 # Mon May 27 14:21:28 2024 +0400 # Node ID 88fa18a0f05f7dead38a127bb24e5cf861f3d66d # Parent e82a7318ed48fdbc1273771bc96357e9dc232975 Stream: do not reallocate a parsed SNI host. Unlike in http SNI callback, it doesn't need to be saved here. diff --git a/src/stream/ngx_stream_ssl_module.c b/src/stream/ngx_stream_ssl_module.c --- a/src/stream/ngx_stream_ssl_module.c +++ b/src/stream/ngx_stream_ssl_module.c @@ -501,7 +501,7 @@ ngx_stream_ssl_servername(ngx_ssl_conn_t host.data = (u_char *) servername; -rc = ngx_stream_validate_host(, c->pool, 1); +rc = ngx_stream_validate_host(, c->pool, 0); if (rc == NGX_ERROR) { goto error; diff --git a/src/stream/ngx_stream_ssl_preread_module.c b/src/stream/ngx_stream_ssl_preread_module.c --- a/src/stream/ngx_stream_ssl_preread_module.c +++ b/src/stream/ngx_stream_ssl_preread_module.c @@ -519,7 +519,7 @@ ngx_stream_ssl_preread_servername(ngx_st host = *servername; -rc = ngx_stream_validate_host(, c->pool, 1); +rc = ngx_stream_validate_host(, c->pool, 0); if (rc == NGX_ERROR) { return NGX_ERROR; ___ nginx-devel mailing list nginx-devel@nginx.org https://mailman.nginx.org/mailman/listinfo/nginx-devel
[PATCH 1 of 2] Rewritten host header validation to follow generic parsing rules
# HG changeset patch # User Sergey Kandaurov # Date 1716805272 -14400 # Mon May 27 14:21:12 2024 +0400 # Node ID e82a7318ed48fdbc1273771bc96357e9dc232975 # Parent f58b6f6362387eeace46043a6fc0bceb56a6786a Rewritten host header validation to follow generic parsing rules. It now uses a generic model of state-based machine, with more strict parsing rules borrowed from ngx_http_validate_host(), with additional checks for double dots and stripping a port subcomponent. Notably, now a port subcomponent of the Host header is restricted to digits, using underscores in domain name labels is prohibited. diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c --- a/src/http/ngx_http_request.c +++ b/src/http/ngx_http_request.c @@ -2148,9 +2148,11 @@ ngx_http_validate_host(ngx_str_t *host, size_t i, dot_pos, host_len; enum { -sw_usual = 0, -sw_literal, -sw_rest +sw_host_start = 0, +sw_host, +sw_host_end, +sw_host_ip_literal, +sw_port, } state; dot_pos = host->len; @@ -2158,55 +2160,122 @@ ngx_http_validate_host(ngx_str_t *host, h = host->data; -state = sw_usual; +state = sw_host_start; for (i = 0; i < host->len; i++) { ch = h[i]; -switch (ch) { - -case '.': -if (dot_pos == i - 1) { -return NGX_DECLINED; +switch (state) { + +case sw_host_start: + +if (ch == '[') { +state = sw_host_ip_literal; +break; +} + +state = sw_host; + +/* fall through */ + +case sw_host: + +if (ch >= 'A' && ch <= 'Z') { +alloc = 1; +break; +} + +if (ch >= 'a' && ch <= 'z') { +break; } -dot_pos = i; -break; - -case ':': -if (state == sw_usual) { + +if ((ch >= '0' && ch <= '9') || ch == '-') { +break; +} + +if (ch == '.') { +if (dot_pos == i - 1) { +return NGX_DECLINED; +} + +dot_pos = i; +break; +} + +/* fall through */ + +case sw_host_end: + +switch (ch) { +case ':': host_len = i; -state = sw_rest; +state = sw_port; +break; +default: +return NGX_DECLINED; } break; -case '[': -if (i == 0) { -state = sw_literal; -} -break; - -case ']': -if (state == sw_literal) { -host_len = i + 1; -state = sw_rest; -} -break; - -default: - -if (ngx_path_separator(ch)) { -return NGX_DECLINED; -} - -if (ch <= 0x20 || ch == 0x7f) { -return NGX_DECLINED; -} +case sw_host_ip_literal: if (ch >= 'A' && ch <= 'Z') { alloc = 1; +break; } +if (ch >= 'a' && ch <= 'z') { +break; +} + +if (ch >= '0' && ch <= '9') { +break; +} + +if (ch == '.') { +if (dot_pos == i - 1) { +return NGX_DECLINED; +} + +dot_pos = i; +break; +} + +switch (ch) { +case ':': +break; +case ']': +host_len = i + 1; +state = sw_host_end; +break; +case '-': +case '_': +case '~': +/* unreserved */ +break; +case '!': +case '$': +case '&': +case '\'': +case '(': +case ')': +case '*': +case '+': +case ',': +case ';': +case '=': +/* sub-delims */ +break; +default: +return NGX_DECLINED; +} break; + +case sw_port: +if (ch >= '0' && ch <= '9') { +break; +} + +return NGX_DECLINED; } } diff --git a/src/stream/ngx_stream_core_module.c b/src/stream/ngx_stream_core_module.c --- a/src/stream/ngx_stream_core_module.c +++ b/src/stream/ngx_stream_core_module.c @@ -471,9 +471,11 @@ ngx_stream_validate_host(ngx_str_t *host size_t i, dot_pos, host_len; enum { -sw_usual = 0, -sw_literal, -sw_rest +sw_host_start = 0, +sw_host, +sw_host_end, +sw_host_ip_literal, +sw_port, } state;
Re: Inquiry about QUIC Congestion Control Algorithms Development
Hi, > Hi, > > I've been following the development of the QUIC protocol within Nginx, and I > am particularly interested in the implementation of advanced congestion > control algorithms such as BBR and CUBIC. > > Considering the impact that these algorithms can have on network performance, > especially in scenarios where QUIC is being utilized, I was hoping to get > some insights into the current or planned support for these within the Nginx > ecosystem. > > Are there any ongoing efforts or discussions regarding the adoption of BBR, > CUBIC, or similar congestion control algorithms for QUIC in Nginx? If so, > could you please share some details or point me towards the relevant > discussions or documentation? > > Thank you for your continued work on Nginx and for any information you can > provide on this topic. We have CUBIC in our roadmap. BBR will be our next step after it, but no particular plans about it so far. -- Roman Arutyunyan ___ nginx-devel mailing list nginx-devel@nginx.org https://mailman.nginx.org/mailman/listinfo/nginx-devel
Re: I think I found a fix for the memory leak issue on gRPC module
On Wed, May 22, 2024 at 06:14:26PM +0400, Roman Arutyunyan wrote: > Hi, > > Indeed there's a problem there. We have similar problems in other places as > well. Attached is a patch that fixes all I could find. > > I did some testing for the sub_filter with the following config. Small > buffers > exaggerate the problem. > > http { > server { > listen 8000; > > location / { > root html; > > output_buffers 2 128; > > sub_filter 1 2; > sub_filter_types *; > sub_filter_once off; > } > } > } > > Retrieving a 10m file resulted in 7304 (patched) vs 1317704 (unpatched) bytes > allocated from the request pool. With the default output_buffers (2 32768), > it's 79880 vs 84040. Note that unlike in sub_filter and gzip modules, gRPC logic is more flawed as the buffer size in user controlled. > > > On Thu, May 02, 2024 at 07:59:44AM +, Pavel Pautov via nginx-devel wrote: > > Hi Sangmin, > > > > Your analysis looks correct. Client streaming RPCs can lead to unbound > > accumulation of ngx_chain_t objects (although, at very slow rate). Gzip > > module had a similar issue (https://trac.nginx.org/nginx/ticket/1046). > > Attaching somewhat simplified patch based on yours. I was able to reproduce > > the issue locally and the patch fixes it. > > > > From: nginx-devel On Behalf Of Sangmin Lee > > Sent: Thursday, April 4, 2024 19:29 > > To: nginx-devel@nginx.org > > Subject: I think I found a fix for the memory leak issue on gRPC module > > > > CAUTION: This email has been sent from an external source. Do not click > > links, open attachments, or provide sensitive business information unless > > you can verify the sender's legitimacy. > > > > I am sending this mail again because I did a mistake while I was writing a > > mail. I didn't know, in gmail, "Ctrl - Enter" would send a mail immediately > > even without a title! > > I am sorry for that, so I am sending again. > > > > Hello, > > > > I think I found the main cause of the memory leak issue when using gRPC > > stream so I made a patch for it. > > Please find the test scenario and details here -- This is what I wrote.: > > https://trac.nginx.org/nginx/ticket/2614 > > After I changed the memory pool totally on nginx and tested our workload -- > > long-lived gRPC streams with many connections -- using Valgrind and massif, > > I was able to find what brought up the memory leak issue. > > like the picture below. > > > > [cid:image001.png@01DA9C29.C792CD90] > > ( I am not sure whether this picture will be sent properly ) > > > > After I patched one part, it seems okay now I have tested it for 1 week > > with out workload. > > > > [cid:image002.png@01DA9C29.C792CD90] > > ( I am not sure whether this picture will be sent properly ) > > > > But because I am not familiar with Mercurial, I couldn't find a way to > > create PR like on github. I guess this mailing list is for this patch. > > From my point of view, it is more like a workaround and I think the way of > > using ngx_chain_add_copy() or itself needs to be changed because it > > allocates a ngx_chain_t structure using ngx_alloc_chain_link() but inside > > of that, it just copies pointer, like cl->buf = in->buf; > > so this ngx_chain_t instance should be dealt with differently unlike other > > ngx_chain_t instances. > > But I am quite new to nginx codes so my view might be wrong. > > Anyhow, please go over this patch and I would like to further talk here. > > > > > > > > diff --git a/src/http/modules/ngx_http_grpc_module.c > > b/src/http/modules/ngx_http_grpc_module.c > > index dfe49c586..1db67bd0a 100644 > > --- a/src/http/modules/ngx_http_grpc_module.c > > +++ b/src/http/modules/ngx_http_grpc_module.c > > @@ -1462,6 +1462,12 @@ ngx_http_grpc_body_output_filter(void *data, > > ngx_chain_t *in) > > in = in->next; > > } > > > > + ngx_chain_t *nl; > > + for (ngx_chain_t *dl = ctx->in; dl != in; dl = nl ) { > > + nl = dl->next; > > + ngx_free_chain(r->pool, dl); > > + } > > + > > ctx->in = in; > > > > if (last) { > > > > > > > > Best regards, > > Sangmin > > > > > > > ___ > > nginx-devel mailing list > > nginx-devel@nginx.org > > https://mailman.nginx.org/mailman/listinfo/nginx-devel > > > -- > Roman Arutyunyan > # HG changeset patch > # User Roman Arutyunyan > # Date 1716386385 -14400 > # Wed May 22 17:59:45 2024 +0400 > # Node ID 95af5fe4921294b48e634defc03b6b0f0201d6f7 > # Parent e60377bdee3d0f8dbd237b5ae8a5deab2af785ef > Optimized chain link usage (ticket #2614). > > Previously
Re: I think I found a fix for the memory leak issue on gRPC module
Hi, Following an internal discussion with Sergey, here's an updated version of the patch. On Thu, May 23, 2024 at 01:42:24PM +0400, Roman Arutyunyan wrote: > Hi, > > On Wed, May 22, 2024 at 06:14:26PM +0400, Roman Arutyunyan wrote: > > Hi, > > > > Indeed there's a problem there. We have similar problems in other places as > > well. Attached is a patch that fixes all I could find. > > > > I did some testing for the sub_filter with the following config. Small > > buffers > > exaggerate the problem. > > > > http { > > server { > > listen 8000; > > > > location / { > > root html; > > > > output_buffers 2 128; > > > > sub_filter 1 2; > > sub_filter_types *; > > sub_filter_once off; > > } > > } > > } > > > > Retrieving a 10m file resulted in 7304 (patched) vs 1317704 (unpatched) > > bytes > > allocated from the request pool. With the default output_buffers (2 32768), > > it's 79880 vs 84040. > > I tested ssi with the following config. > > server { > listen 8000; > > location / { > root html; > > output_buffers 2 128; > > ssi on; > ssi_types *; > } > } > > The result is similar: > > 6224 vs 1316912 with small buffers > 38864 vs 43952 with the default buffers > > > On Thu, May 02, 2024 at 07:59:44AM +, Pavel Pautov via nginx-devel > > wrote: > > > Hi Sangmin, > > > > > > Your analysis looks correct. Client streaming RPCs can lead to unbound > > > accumulation of ngx_chain_t objects (although, at very slow rate). Gzip > > > module had a similar issue (https://trac.nginx.org/nginx/ticket/1046). > > > Attaching somewhat simplified patch based on yours. I was able to > > > reproduce the issue locally and the patch fixes it. > > > > > > From: nginx-devel On Behalf Of Sangmin Lee > > > Sent: Thursday, April 4, 2024 19:29 > > > To: nginx-devel@nginx.org > > > Subject: I think I found a fix for the memory leak issue on gRPC module > > > > > > CAUTION: This email has been sent from an external source. Do not click > > > links, open attachments, or provide sensitive business information unless > > > you can verify the sender's legitimacy. > > > > > > I am sending this mail again because I did a mistake while I was writing > > > a mail. I didn't know, in gmail, "Ctrl - Enter" would send a mail > > > immediately even without a title! > > > I am sorry for that, so I am sending again. > > > > > > Hello, > > > > > > I think I found the main cause of the memory leak issue when using gRPC > > > stream so I made a patch for it. > > > Please find the test scenario and details here -- This is what I wrote.: > > > https://trac.nginx.org/nginx/ticket/2614 > > > After I changed the memory pool totally on nginx and tested our workload > > > -- long-lived gRPC streams with many connections -- using Valgrind and > > > massif, I was able to find what brought up the memory leak issue. > > > like the picture below. > > > > > > [cid:image001.png@01DA9C29.C792CD90] > > > ( I am not sure whether this picture will be sent properly ) > > > > > > After I patched one part, it seems okay now I have tested it for 1 week > > > with out workload. > > > > > > [cid:image002.png@01DA9C29.C792CD90] > > > ( I am not sure whether this picture will be sent properly ) > > > > > > But because I am not familiar with Mercurial, I couldn't find a way to > > > create PR like on github. I guess this mailing list is for this patch. > > > From my point of view, it is more like a workaround and I think the way > > > of using ngx_chain_add_copy() or itself needs to be changed because it > > > allocates a ngx_chain_t structure using ngx_alloc_chain_link() but inside > > > of that, it just copies pointer, like cl->buf = in->buf; > > > so this ngx_chain_t instance should be dealt with differently unlike > > > other ngx_chain_t instances. > > > But I am quite new to nginx codes so my view might be wrong. > > > Anyhow, please go over this patch and I would like to further talk here. > > > > > > > > > > > > diff --git a/src/http/modules/ngx_http_grpc_module.c > > > b/src/http/modules/ngx_http_grpc_module.c > > > index dfe49c586..1db67bd0a 100644 > > > --- a/src/http/modules/ngx_http_grpc_module.c > > > +++ b/src/http/modules/ngx_http_grpc_module.c > > > @@ -1462,6 +1462,12 @@ ngx_http_grpc_body_output_filter(void *data, > > > ngx_chain_t *in) > > > in = in->next; > > > } > > > > > > + ngx_chain_t *nl; > > > + for (ngx_chain_t *dl = ctx->in; dl != in; dl = nl ) { > > > + nl = dl->next; > > > + ngx_free_chain(r->pool, dl); > > > + } > > > + > > > ctx->in = in; > > > > > > if (last) { > > > > > >