Hi,

Context: consider nginx used as a cache with proxy_cache_use_stale set
to 'http_500' and the 'updating' parameter set i.e. it caches errors and
serves the stale content while updating.  Suppose the upstream temporarily
responds with HTTP 504 and Cache-Control being max-age=3.  The error gets
cached, but after 3 seconds it expires.  At this point, let's say the
upstream server starts serving HTTP 200 responses, but with Cache-Control
set to 'no-cache'.

The cache manager will not LRU the expired content immediately; it will
stay in the EXPIRED state while subsequent requests will result in 200s.
Problem: if there are multiple processes racing, the ones in the UPDATING
state will serve stale 504s.  That results in sporadic errors, e.g.:

200 EXPIRED
504 UPDATING
200 EXPIRED
...

At the very least, I think the stale cache content should be marked as
"invalid" after the no-cache response (with the possibility to become
valid again if it becomes cacheable).  Whether the object should be kept
at all is something to debate.

Please find the preliminary patch attached.

-- 
Mindaugas
# HG changeset patch
# User Mindaugas Rasiukevicius <[email protected]>
# Date 1447780811 0
#      Tue Nov 17 17:20:11 2015 +0000
# Node ID d5ddde5620a04e9b27b277346e665c952e0cf374
# Parent  bec5b3093337708cbdb59f9fc253f8e1cd6d7848
Mark stale cache content as "invalid" on non-cacheable responses.

diff -r bec5b3093337 -r d5ddde5620a0 src/http/ngx_http_cache.h
--- a/src/http/ngx_http_cache.h Tue Nov 17 19:41:39 2015 +0300
+++ b/src/http/ngx_http_cache.h Tue Nov 17 17:20:11 2015 +0000
@@ -50,7 +50,8 @@ typedef struct {
     unsigned                         exists:1;
     unsigned                         updating:1;
     unsigned                         deleting:1;
-                                     /* 11 unused bits */
+    unsigned                         invalid:1;
+                                     /* 10 unused bits */
 
     ngx_file_uniq_t                  uniq;
     time_t                           expire;
@@ -163,6 +164,9 @@ struct ngx_http_file_cache_s {
 };
 
 
+#define NGX_HTTP_CACHE_INVALID       0x0001
+
+
 ngx_int_t ngx_http_file_cache_new(ngx_http_request_t *r);
 ngx_int_t ngx_http_file_cache_create(ngx_http_request_t *r);
 void ngx_http_file_cache_create_key(ngx_http_request_t *r);
@@ -171,7 +175,8 @@ ngx_int_t ngx_http_file_cache_set_header
 void ngx_http_file_cache_update(ngx_http_request_t *r, ngx_temp_file_t *tf);
 void ngx_http_file_cache_update_header(ngx_http_request_t *r);
 ngx_int_t ngx_http_cache_send(ngx_http_request_t *);
-void ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf);
+void ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf,
+    ngx_uint_t flags);
 time_t ngx_http_file_cache_valid(ngx_array_t *cache_valid, ngx_uint_t status);
 
 char *ngx_http_file_cache_set_slot(ngx_conf_t *cf, ngx_command_t *cmd,
diff -r bec5b3093337 -r d5ddde5620a0 src/http/ngx_http_file_cache.c
--- a/src/http/ngx_http_file_cache.c    Tue Nov 17 19:41:39 2015 +0300
+++ b/src/http/ngx_http_file_cache.c    Tue Nov 17 17:20:11 2015 +0000
@@ -824,6 +824,13 @@ ngx_http_file_cache_exists(ngx_http_file
     if (fcn) {
         ngx_queue_remove(&fcn->queue);
 
+        if (fcn->invalid) {
+
+            rc = NGX_DECLINED;
+
+            goto renew;
+        }
+
         if (c->node == NULL) {
             fcn->uses++;
             fcn->count++;
@@ -1406,6 +1413,7 @@ ngx_http_file_cache_update(ngx_http_requ
     }
 
     c->node->updating = 0;
+    c->node->invalid = 0;
 
     ngx_shmtx_unlock(&cache->shpool->mutex);
 }
@@ -1595,7 +1603,8 @@ ngx_http_cache_send(ngx_http_request_t *
 
 
 void
-ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf)
+ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf,
+    ngx_uint_t flags)
 {
     ngx_http_file_cache_t       *cache;
     ngx_http_file_cache_node_t  *fcn;
@@ -1633,6 +1642,10 @@ ngx_http_file_cache_free(ngx_http_cache_
         c->node = NULL;
     }
 
+    if (flags & NGX_HTTP_CACHE_INVALID) {
+        fcn->invalid = 1;
+    }
+
     ngx_shmtx_unlock(&cache->shpool->mutex);
 
     c->updated = 1;
@@ -1675,7 +1688,7 @@ ngx_http_file_cache_cleanup(void *data)
                       "stalled cache updating, error:%ui", c->error);
     }
 
-    ngx_http_file_cache_free(c, NULL);
+    ngx_http_file_cache_free(c, NULL, 0);
 }
 
 
diff -r bec5b3093337 -r d5ddde5620a0 src/http/ngx_http_upstream.c
--- a/src/http/ngx_http_upstream.c      Tue Nov 17 19:41:39 2015 +0300
+++ b/src/http/ngx_http_upstream.c      Tue Nov 17 17:20:11 2015 +0000
@@ -2357,7 +2357,7 @@ ngx_http_upstream_intercept_errors(ngx_h
                     r->cache->error = status;
                 }
 
-                ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+                ngx_http_file_cache_free(r->cache, u->pipe->temp_file, 0);
             }
 #endif
             ngx_http_upstream_finalize_request(r, u, status);
@@ -2839,7 +2839,8 @@ ngx_http_upstream_send_response(ngx_http
                    "http cacheable: %d", u->cacheable);
 
     if (u->cacheable == 0 && r->cache) {
-        ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+        ngx_http_file_cache_free(r->cache, u->pipe->temp_file,
+                                 NGX_HTTP_CACHE_INVALID);
     }
 
 #endif
@@ -3647,11 +3648,11 @@ ngx_http_upstream_process_request(ngx_ht
                     ngx_http_file_cache_update(r, tf);
 
                 } else {
-                    ngx_http_file_cache_free(r->cache, tf);
+                    ngx_http_file_cache_free(r->cache, tf, 0);
                 }
 
             } else if (p->upstream_error) {
-                ngx_http_file_cache_free(r->cache, p->temp_file);
+                ngx_http_file_cache_free(r->cache, p->temp_file, 0);
             }
         }
 
@@ -4030,7 +4031,7 @@ ngx_http_upstream_finalize_request(ngx_h
             }
         }
 
-        ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
+        ngx_http_file_cache_free(r->cache, u->pipe->temp_file, 0);
     }
 
 #endif
_______________________________________________
nginx-devel mailing list
[email protected]
http://mailman.nginx.org/mailman/listinfo/nginx-devel

Reply via email to