On Thu, Oct 26, 2006 at 05:20:10PM +0200, Plüm, Rüdiger, VF EITO wrote:
> > Index: modules/cache/mod_disk_cache.c
> > ===================================================================
> > --- modules/cache/mod_disk_cache.c  (revision 450104)
> > +++ modules/cache/mod_disk_cache.c  (working copy)
> 
> > @@ -998,12 +998,16 @@
> >          dobj->file_size = 0;
> >      }
> >  
> > -    for (e = APR_BRIGADE_FIRST(bb);
> > -         e != APR_BRIGADE_SENTINEL(bb);
> > -         e = APR_BUCKET_NEXT(e))
> > -    {
> > +    e = APR_BRIGADE_FIRST(bb);
> > +    while (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)) {
> >          const char *str;
> >          apr_size_t length, written;
> > +
> > +        if (APR_BUCKET_IS_METADATA(e)) {
> > +            e = APR_BUCKET_NEXT(e);
> > +            continue;
> > +        }
> > +
> 
> Why ignore the metadata buckets? This means that flush buckets do not get 
> passed
> up the chain via the temporary brigade tmpbb. This is bad IMHO.
> I guess the best thing we can do here is to add them to tmpbb before doing the
> continue. Of course this means that all additions need to be done to the tail
> of tmpbb.

Yeah, I noticed this too.  For FLUSH buckets batching them up in tmpbb 
is no good either, they need to be sent up the filter chain immediately.  
I changed it like this:

(incremental patch with diff -wu to hide the re-indenting)

--- mod_disk_cache.c.prev       2006-10-26 16:39:17.000000000 +0100
+++ mod_disk_cache.c    2006-10-26 16:32:16.000000000 +0100
@@ -1003,11 +1003,8 @@
         const char *str;
         apr_size_t length, written;
 
-        if (APR_BUCKET_IS_METADATA(e)) {
-            e = APR_BUCKET_NEXT(e);
-            continue;
-        }
-
+        /* Store contents of non-metadata buckets to disk. */
+        if (!APR_BUCKET_IS_METADATA(e)) {
         rv = apr_bucket_read(e, &str, &length, APR_BLOCK_READ);
         if (rv != APR_SUCCESS) {
             ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
@@ -1036,21 +1033,23 @@
             file_cache_errorcleanup(dobj, r);
             return APR_EGENERAL;
         }
+        }
 
         next = APR_BUCKET_NEXT(e);
 
-        /* Move the bucket to the temp brigade and flush it up the
+        /* Move the bucket to the temp brigade and pass it down the
          * filter chain. */
         APR_BUCKET_REMOVE(e);
         APR_BRIGADE_INSERT_HEAD(tmpbb, e);
+
+        if (!r->connection->aborted) {
         rv = ap_pass_brigade(f_next, tmpbb);
         if (rv) {
+                r->connection->aborted = 1;
             ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, r->server,
                          "disk_cache: failed to flush brigade for URL %s",
                          h->cache_obj->key);
-            /* Remove the intermediate cache file and return non-APR_SUCCESS */
-            file_cache_errorcleanup(dobj, r);
-            return rv;
+            }
         }
         
         /* Discard the bucket and move on. */
@@ -1059,19 +1058,29 @@
         e = next;
     }
 
-    /* Was this the final bucket? If yes, close the temp file and perform
-     * sanity checks.
-     */
-    if (e != APR_BRIGADE_SENTINEL(bb) && APR_BUCKET_IS_EOS(e)) {
-        if (r->connection->aborted || r->no_cache) {
+    if (r->no_cache) {
+        ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
+                     "disk_cache: Discarding body for URL %s, "
+                     "no-cache flag set", h->cache_obj->key);
+        /* Remove the intermediate cache file and return non-APR_SUCCESS */
+        file_cache_errorcleanup(dobj, r);
+        return APR_SUCCESS;
+    }
+
+    if (r->connection->aborted) {
             ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
                          "disk_cache: Discarding body for URL %s "
                          "because connection has been aborted.",
                          h->cache_obj->key);
             /* Remove the intermediate cache file and return non-APR_SUCCESS */
             file_cache_errorcleanup(dobj, r);
-            return APR_EGENERAL;
+        return APR_ECONNABORTED;
         }
+
+    /* Was this the final bucket? If yes, close the temp file and perform
+     * sanity checks.
+     */
+    if (e != APR_BRIGADE_SENTINEL(bb) && APR_BUCKET_IS_EOS(e)) {
         if (dobj->file_size < conf->minfs) {
             ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                          "disk_cache: URL %s failed the size check "

Reply via email to