This factors out the logic for correctly inserting a file into a
brigade, doing the special little dance for large files if necessary;
the code was already duplicated twice and it's needed in other places
too. (pervasive largefile support being my top 2.1 goal :)
Any comments, naming preferences, "put it in apr-util you twit" flames?
Index: include/util_filter.h
===
RCS file: /home/cvs/httpd-2.0/include/util_filter.h,v
retrieving revision 1.92
diff -u -r1.92 util_filter.h
--- include/util_filter.h 31 Oct 2004 18:00:43 - 1.92
+++ include/util_filter.h 4 Nov 2004 21:53:57 -
@@ -489,6 +489,22 @@
apr_bucket_brigade **b, apr_pool_t *p);
/**
+ * Utility function to insert a file of given length onto the end of
+ * the brigade. The file is split into multiple buckets if it is
+ * larger than the maximum size which can be represented by a single bucket.
+ * @param bb the brigade to insert into
+ * @param f the file to insert
+ * @param start the offset into the file
+ * @param len the length of the file to insert
+ * @param p pool from which file buckets are allocated
+ * @return the last bucket inserted
+ */
+AP_DECLARE(apr_bucket *) ap_brigade_insert_file(apr_bucket_brigade *bb,
+apr_file_t *f, apr_off_t start,
+apr_off_t len, apr_pool_t *p);
+
+
+/**
* Flush function for apr_brigade_* calls. This calls ap_pass_brigade
* to flush the brigade if the brigade buffer overflows.
* @param bb The brigade to flush
Index: modules/experimental/mod_disk_cache.c
===
RCS file: /home/cvs/httpd-2.0/modules/experimental/mod_disk_cache.c,v
retrieving revision 1.68
diff -u -r1.68 mod_disk_cache.c
--- modules/experimental/mod_disk_cache.c 4 Nov 2004 21:51:12 - 1.68
+++ modules/experimental/mod_disk_cache.c 4 Nov 2004 21:53:59 -
@@ -502,14 +502,10 @@
static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade
*bb)
{
-apr_bucket *e;
disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj;
-e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p,
- bb->bucket_alloc);
-APR_BRIGADE_INSERT_HEAD(bb, e);
-e = apr_bucket_eos_create(bb->bucket_alloc);
-APR_BRIGADE_INSERT_TAIL(bb, e);
+ap_brigade_insert_file(bb, dobj->fd, 0, dobj->file_size, p);
+APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(bb->bucket_alloc));
return APR_SUCCESS;
}
Index: modules/generators/mod_asis.c
===
RCS file: /home/cvs/httpd-2.0/modules/generators/mod_asis.c,v
retrieving revision 1.51
diff -u -r1.51 mod_asis.c
--- modules/generators/mod_asis.c 9 Feb 2004 20:29:19 - 1.51
+++ modules/generators/mod_asis.c 4 Nov 2004 21:53:59 -
@@ -76,7 +76,6 @@
if (!r->header_only) {
apr_bucket_brigade *bb;
-apr_bucket *b;
apr_off_t pos = 0;
rv = apr_file_seek(f, APR_CUR, &pos);
@@ -89,30 +88,8 @@
}
bb = apr_brigade_create(r->pool, c->bucket_alloc);
-#if APR_HAS_LARGE_FILES
-if (r->finfo.size - pos > AP_MAX_SENDFILE) {
-/* APR_HAS_LARGE_FILES issue; must split into mutiple buckets,
- * no greater than MAX(apr_size_t), and more granular than that
- * in case the brigade code/filters attempt to read it directly.
- */
-apr_off_t fsize = r->finfo.size - pos;
-b = apr_bucket_file_create(f, pos, AP_MAX_SENDFILE,
- r->pool, c->bucket_alloc);
-while (fsize > AP_MAX_SENDFILE) {
-APR_BRIGADE_INSERT_TAIL(bb, b);
-apr_bucket_copy(b, &b);
-b->start += AP_MAX_SENDFILE;
-fsize -= AP_MAX_SENDFILE;
-}
-b->length = (apr_size_t)fsize; /* Resize just the last bucket */
-}
-else
-#endif
-b = apr_bucket_file_create(f, pos, (apr_size_t) (r->finfo.size - pos),
- r->pool, c->bucket_alloc);
-APR_BRIGADE_INSERT_TAIL(bb, b);
-b = apr_bucket_eos_create(c->bucket_alloc);
-APR_BRIGADE_INSERT_TAIL(bb, b);
+ap_brigade_insert_file(bb, f, pos, (apr_size_t) (r->finfo.size - pos),
+ r->pool);
rv = ap_pass_brigade(r->output_filters, bb);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
Index: server/core.c
===
RCS file: /home/cvs/httpd-2.0/server/core.c,v
retrieving revision 1.292
diff -u -r1.292 core.c
--- server/core.c 4 Nov 2004 16:04:55 - 1.292