First fix :)

On Fri, Mar 3, 2017 at 6:41 PM, Yann Ylavic <ylavic....@gmail.com> wrote:
>
> With apr_allocator_bulk_alloc(), one can request several apr_memnode_t
> of a fixed (optional) or minimal given size, and in the worst case get
> a single one (allocaƓted), or in the best case as much free ones as
> available (within a maximal size, also given).

The non-fixed version was buggy (couldn't reuse lower indexes), so
"apr_allocators-bulk-v2.patch" attached.
Will post some numbers (w.r.t. buffers' resuse and writev sizes) soon.
Index: srclib/apr/include/apr_allocator.h
===================================================================
--- srclib/apr/include/apr_allocator.h	(revision 1763844)
+++ srclib/apr/include/apr_allocator.h	(working copy)
@@ -93,6 +93,26 @@ APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(a
                                                  apr_size_t size)
                              __attribute__((nonnull(1)));
 
+/* Get free nodes of [@a total_size:@a block_size] bytes, or allocate
+ * one of @a block_size bytes if none is found, putting the returned
+ * number of nodes in @a num and the number of bytes in @a size.
+ * @param a The allocator to allocate from
+ * @param block_size The minimum size of a mem block (excluding the memnode
+ *                   structure)
+ * @param total_size The maximum overall size to get on input, the gotten
+ *                   size on output (excluding the memnode structure for each
+ *                   block)
+ * @param blocks_num The number of nodes returned in the list (can be NULL)
+ * @param blocks_fixed Whether all blocks should have a fixed size (i.e.
+ *                     @a block_size rounded up to boundary)
+ */
+APR_DECLARE(apr_memnode_t *) apr_allocator_bulk_alloc(apr_allocator_t *a,
+                                                      apr_size_t block_size,
+                                                      apr_size_t *total_size,
+                                                      apr_size_t *blocks_num,
+                                                      int blocks_fixed)
+                             __attribute__((nonnull(1,3)));
+
 /**
  * Free a list of blocks of mem, giving them back to the allocator.
  * The list is typically terminated by a memnode with its next field
@@ -104,6 +124,13 @@ APR_DECLARE(void) apr_allocator_free(apr_allocator
                                      apr_memnode_t *memnode)
                   __attribute__((nonnull(1,2)));
 
+/**
+ * Get the aligned size corresponding to the requested size
+ * @param size The size to align
+ * @return The aligned size
+ */
+APR_DECLARE(apr_size_t) apr_allocator_align(apr_size_t size);
+
 #include "apr_pools.h"
 
 /**
Index: srclib/apr/memory/unix/apr_pools.c
===================================================================
--- srclib/apr/memory/unix/apr_pools.c	(revision 1763844)
+++ srclib/apr/memory/unix/apr_pools.c	(working copy)
@@ -115,7 +115,10 @@ struct apr_allocator_t {
 
 #define SIZEOF_ALLOCATOR_T  APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))
 
+/* Returns the amount of free space in the given node. */
+#define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))
 
+
 /*
  * Allocator
  */
@@ -209,38 +212,21 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
 #endif
 }
 
-static APR_INLINE
-apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
+static
+apr_memnode_t *allocator_alloc_index(apr_allocator_t *allocator,
+                                     apr_uint32_t index, apr_size_t size,
+                                     int lock, int free, int fixed)
 {
     apr_memnode_t *node, **ref;
     apr_uint32_t max_index;
-    apr_size_t size, i, index;
+    apr_size_t i;
 
-    /* Round up the block size to the next boundary, but always
-     * allocate at least a certain size (MIN_ALLOC).
-     */
-    size = APR_ALIGN(in_size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);
-    if (size < in_size) {
-        return NULL;
-    }
-    if (size < MIN_ALLOC)
-        size = MIN_ALLOC;
-
-    /* Find the index for this node size by
-     * dividing its size by the boundary size
-     */
-    index = (size >> BOUNDARY_INDEX) - 1;
-    
-    if (index > APR_UINT32_MAX) {
-        return NULL;
-    }
-
     /* First see if there are any nodes in the area we know
      * our node will fit into.
      */
     if (index <= allocator->max_index) {
 #if APR_HAS_THREADS
-        if (allocator->mutex)
+        if (lock)
             apr_thread_mutex_lock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
 
@@ -257,9 +243,11 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
         max_index = allocator->max_index;
         ref = &allocator->free[index];
         i = index;
-        while (*ref == NULL && i < max_index) {
-           ref++;
-           i++;
+        if (!fixed) {
+            while (*ref == NULL && i < max_index) {
+               ref++;
+               i++;
+            }
         }
 
         if ((node = *ref) != NULL) {
@@ -283,7 +271,7 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
                 allocator->current_free_index = allocator->max_free_index;
 
 #if APR_HAS_THREADS
-            if (allocator->mutex)
+            if (lock)
                 apr_thread_mutex_unlock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
 
@@ -294,7 +282,7 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
         }
 
 #if APR_HAS_THREADS
-        if (allocator->mutex)
+        if (lock)
             apr_thread_mutex_unlock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
     }
@@ -304,7 +292,7 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
      */
     else if (allocator->free[0]) {
 #if APR_HAS_THREADS
-        if (allocator->mutex)
+        if (lock)
             apr_thread_mutex_lock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
 
@@ -315,7 +303,7 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
         while ((node = *ref) != NULL && index > node->index)
             ref = &node->next;
 
-        if (node) {
+        if (node && (!fixed || index == node->index)) {
             *ref = node->next;
 
             allocator->current_free_index += node->index + 1;
@@ -323,7 +311,7 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
                 allocator->current_free_index = allocator->max_free_index;
 
 #if APR_HAS_THREADS
-            if (allocator->mutex)
+            if (lock)
                 apr_thread_mutex_unlock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
 
@@ -334,11 +322,15 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
         }
 
 #if APR_HAS_THREADS
-        if (allocator->mutex)
+        if (lock)
             apr_thread_mutex_unlock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
     }
 
+    if (free) {
+        return NULL;
+    }
+
     /* If we haven't got a suitable node, malloc a new one
      * and initialize it.
      */
@@ -359,6 +351,164 @@ APR_DECLARE(void) apr_allocator_max_free_set(apr_a
 }
 
 static APR_INLINE
+apr_size_t allocator_align(apr_size_t in_size)
+{
+    apr_size_t size = in_size;
+
+    /* Round up the block size to the next boundary, but always
+     * allocate at least a certain size (MIN_ALLOC).
+     */
+    size = APR_ALIGN(size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);
+    if (size < in_size) {
+        return 0;
+    }
+    if (size < MIN_ALLOC) {
+        size = MIN_ALLOC;
+    }
+
+    return size;
+}
+
+static APR_INLINE
+apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
+{
+    apr_size_t size;
+    apr_uint32_t index;
+#if APR_HAS_THREADS
+    const int lock = !!allocator->mutex;
+#else
+    const int lock = 0;
+#endif
+
+
+    size = allocator_align(in_size);
+    if (!size) {
+        return NULL;
+    }
+
+    /* Find the index for this node size by
+     * dividing its size by the boundary size
+     */
+    index = (size >> BOUNDARY_INDEX) - 1;
+    if (index > APR_UINT32_MAX) {
+        return NULL;
+    }
+
+    return allocator_alloc_index(allocator, index, size, lock, 0, 0);
+}
+
+static APR_INLINE
+apr_memnode_t *allocator_bulk_alloc(apr_allocator_t *a,
+                                    apr_size_t block_size,
+                                    apr_size_t *total_size,
+                                    apr_size_t *blocks_num,
+                                    int blocks_fixed)
+{
+    apr_memnode_t *node, *nodes = NULL, *last = NULL;
+    apr_size_t size, len = *total_size, pos = 0, num = 0;
+    apr_uint32_t index;
+
+    *total_size = 0;
+    if (blocks_num) {
+        *blocks_num = 0;
+    }
+
+    size = allocator_align(block_size);
+    if (!size) {
+        return NULL;
+    }
+
+    /* Find the index for the block size by
+     * dividing its size by the boundary size
+     */
+    index = (size >> BOUNDARY_INDEX) - 1;
+    if (index > APR_UINT32_MAX) {
+        return NULL;
+    }
+
+    /* Sanity checks */
+    if (len < size) {
+        len = size;
+    }
+    else if (len > APR_SIZE_MAX - APR_MEMNODE_T_SIZE) {
+        len = APR_SIZE_MAX - APR_MEMNODE_T_SIZE;
+    }
+
+#if APR_HAS_THREADS
+    if (a->mutex)
+        apr_thread_mutex_lock(a->mutex);
+#endif /* APR_HAS_THREADS */
+
+    /* Acquire free blocks with an index equal to (or greater
+     * than in !blocks_fixed mode) the block index.
+     */
+    if (blocks_fixed) {
+        for (; pos < len; pos += node_free_space(node), ++num) {
+            node = allocator_alloc_index(a, index, size, 0, 1, 1);
+            if (!node) {
+                break;
+            }
+            node->next = nodes;
+            nodes = node;
+        }
+    }
+    else {
+        /* Find the largest possible nodes based on the remaining size */
+        for (; pos < len; pos += node_free_space(node), ++num) {
+            apr_size_t n = allocator_align(len - pos);
+            apr_uint32_t i = (n >> BOUNDARY_INDEX) - 1;
+
+            /* Enough ? */
+            if (i < index) {
+                break;
+            }
+
+            /* Cap to a free index */
+            if (i > a->max_index) {
+                apr_memnode_t *oversize = a->free[0];
+                if (oversize && i >= oversize->index) {
+                    i = MAX_INDEX;
+                }
+                else if (a->max_index) {
+                    i = a->max_index;
+                }
+                else {
+                    break;
+                }
+            }
+
+            /* Can't fail now, queue last (i.e. largest first) */
+            node = allocator_alloc_index(a, i, n, 0, 1, 0);
+            if (last) {
+                last = last->next = node;
+            }
+            else {
+                nodes = last = node;
+            }
+        }
+    }
+
+    /* Alloc a single node when no (free) one is available above */
+    if (!nodes && (nodes = allocator_alloc_index(a, index, size, 0, 0,
+                                                 blocks_fixed))) {
+        pos += node_free_space(nodes);
+        num++;
+    }
+
+#if APR_HAS_THREADS
+    if (a->mutex)
+        apr_thread_mutex_unlock(a->mutex);
+#endif /* APR_HAS_THREADS */
+
+    *total_size = pos;
+    if (blocks_num) {
+        *blocks_num = num;
+    }
+
+    return nodes;
+}
+
+static APR_INLINE
 void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
 {
     apr_memnode_t *next, *freelist = NULL;
@@ -402,10 +552,14 @@ void allocator_free(apr_allocator_t *allocator, ap
         }
         else {
             /* This node is too large to keep in a specific size bucket,
-             * just add it to the sink (at index 0).
+             * just add it to the sink (at index 0), smallest first.
              */
-            node->next = allocator->free[0];
-            allocator->free[0] = node;
+            apr_memnode_t *pos, **ref = &allocator->free[0];
+            while ((pos = *ref) != NULL && index > pos->index)
+                ref = &pos->next;
+            node->next = pos;
+            *ref = node;
+
             if (current_free_index >= index + 1)
                 current_free_index -= index + 1;
             else
@@ -438,6 +592,16 @@ APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(a
     return allocator_alloc(allocator, size);
 }
 
+APR_DECLARE(apr_memnode_t *) apr_allocator_bulk_alloc(apr_allocator_t *a,
+                                                      apr_size_t block_size,
+                                                      apr_size_t *total_size,
+                                                      apr_size_t *blocks_num,
+                                                      int blocks_fixed)
+{
+    return allocator_bulk_alloc(a, block_size, total_size, blocks_num,
+                                blocks_fixed);
+}
+
 APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator,
                                      apr_memnode_t *node)
 {
@@ -444,6 +608,10 @@ APR_DECLARE(void) apr_allocator_free(apr_allocator
     allocator_free(allocator, node);
 }
 
+APR_DECLARE(apr_size_t) apr_allocator_align(apr_size_t size)
+{
+    return allocator_align(size);
+}
 
 
 /*
@@ -658,9 +826,6 @@ APR_DECLARE(void) apr_pool_terminate(void)
     node->next->ref = node->ref;                \
 } while (0)
 
-/* Returns the amount of free space in the given node. */
-#define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))
-
 /*
  * Memory allocation
  */
Index: srclib/apr-util/include/apr_buckets.h
===================================================================
--- srclib/apr-util/include/apr_buckets.h	(revision 1732829)
+++ srclib/apr-util/include/apr_buckets.h	(working copy)
@@ -606,6 +606,14 @@ struct apr_bucket_mmap {
 
 /** @see apr_bucket_file */
 typedef struct apr_bucket_file apr_bucket_file;
+/** Bucket file scatter read mode */
+typedef enum {
+    APR_BUCKET_FILE_SCATTER_OFF = 0,    /** No scattering */
+    APR_BUCKET_FILE_SCATTER_ON,         /** Scatter read in buffers of at
+                                         *  least apr_bucket_file#read_size */
+    APR_BUCKET_FILE_SCATTER_FIXED       /** Scatter read in buffers of fixed
+                                         *  apr_bucket_file#read_size */
+} apr_bucket_file_scatter_e;
 /**
  * A bucket referring to an file
  */
@@ -622,6 +630,10 @@ struct apr_bucket_file {
      *  a caller tries to read from it */
     int can_mmap;
 #endif /* APR_HAS_MMAP */
+    /** @see ::apr_bucket_file_scatter_e */
+    int can_scatter;
+    /** File read block size */
+    apr_size_t read_size;
 };
 
 /** @see apr_bucket_structs */
@@ -969,12 +981,51 @@ APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(
 APU_DECLARE_NONSTD(void *) apr_bucket_alloc(apr_size_t size, apr_bucket_alloc_t *list);
 
 /**
+ * Allocate a vector of memory blocks for use by the buckets.
+ * @param block_size The minimum size to allocate per block.
+ * @param total_size The maximum overall size to allocate (in), then the
+ *                   amount allocated (out).
+ * @param list The allocator from which to allocate the memory.
+ * @param blocks_num The number of blocks in the returned vector (can be NULL).
+ * @param blocks_fixed Whether all blocks should have a fixed size (i.e.
+ *                     @a block_size rounded up boundary).
+ * @return The allocated vector and blocks, or NULL on failure.
+ */
+APU_DECLARE_NONSTD(struct iovec *) apr_bucket_bulk_alloc(apr_size_t block_size,
+                                                         apr_size_t *total_size,
+                                                         apr_bucket_alloc_t *list,
+                                                         apr_size_t *blocks_num,
+                                                         int blocks_fixed);
+
+/**
  * Free memory previously allocated with apr_bucket_alloc().
  * @param block The block of memory to be freed.
  */
 APU_DECLARE_NONSTD(void) apr_bucket_free(void *block);
 
+/**
+ * Free the vector of memory blocks previously allocated with
+ * apr_bucket_bulk_alloc().
+ * @param blocks_vec The vector of blocks to be freed.
+ * @param blocks_num The number of blocks in the vector.
+ * @param free_vec Whether or not to free the vector itself (in addition
+ *                 to the blocks).
+ * @remark For permormance reasons, this function assumes that the vector
+ * and all its blocks come from the same allocator.
+ */
+APU_DECLARE_NONSTD(void) apr_bucket_bulk_free(struct iovec *blocks_vec,
+                                              apr_size_t blocks_num,
+                                              int free_vec);
 
+/**
+ * Get the largest size corresponding to the requested size such that
+ * the (would be) allocation remains in the same allocator's boundary.
+ * @param size The requested size.
+ * @return The corresponding size.
+ */
+APR_DECLARE(apr_size_t) apr_bucket_alloc_floor(apr_size_t size);
+
+
 /*  *****  Bucket Functions  *****  */
 /**
  * Free the resources used by a bucket. If multiple buckets refer to
@@ -1563,6 +1614,25 @@ APU_DECLARE(apr_bucket *) apr_bucket_file_make(apr
 APU_DECLARE(apr_status_t) apr_bucket_file_enable_mmap(apr_bucket *b,
                                                       int enabled);
 
+/**
+ * Enable or disable scatter mode for a FILE bucket (default is off)
+ * @param b The bucket
+ * @param mode One of ::apr_bucket_file_scatter_e
+ * @return APR_SUCCESS normally, or an error code if the operation fails
+ */
+APU_DECLARE(apr_status_t) apr_bucket_file_enable_scatter(apr_bucket *b,
+                                           apr_bucket_file_scatter_e mode);
+
+/**
+ * Set the size of HEAP bucket's buffer allocated by a FILE bucket on read
+ * when memory-mapping is disabled
+ * @param b The bucket
+ * @param size Size of the allocated buffers
+ * @return APR_SUCCESS normally, or an error code if the operation fails
+ */
+APU_DECLARE(apr_status_t) apr_bucket_file_read_size_set(apr_bucket *e,
+                                                        apr_size_t size);
+
 /** @} */
 #ifdef __cplusplus
 }
Index: srclib/apr-util/buckets/apr_buckets_alloc.c
===================================================================
--- srclib/apr-util/buckets/apr_buckets_alloc.c	(revision 1732829)
+++ srclib/apr-util/buckets/apr_buckets_alloc.c	(working copy)
@@ -121,14 +121,49 @@ APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(
 #endif
 }
 
-APU_DECLARE_NONSTD(void *) apr_bucket_alloc(apr_size_t size, 
+APR_DECLARE(apr_size_t) apr_bucket_alloc_floor(apr_size_t size)
+{
+    if (size <= SMALL_NODE_SIZE) {
+        size = SMALL_NODE_SIZE;
+    }
+    else {
+        if (size < APR_MEMNODE_T_SIZE) {
+            size = apr_allocator_align(0);
+        }
+        else {
+            size = apr_allocator_align(size - APR_MEMNODE_T_SIZE);
+        }
+        size -= APR_MEMNODE_T_SIZE;
+    }
+    size -= SIZEOF_NODE_HEADER_T;
+    return size;
+}
+
+static APR_INLINE
+void apr_bucket_abort(apr_bucket_alloc_t *list)
+{
+    if (list->pool) {
+        apr_abortfunc_t fn = apr_pool_abort_get(list->pool);
+        if (fn) {
+            fn(APR_ENOMEM);
+        }
+    }
+}
+
+APU_DECLARE_NONSTD(void *) apr_bucket_alloc(apr_size_t in_size, 
                                             apr_bucket_alloc_t *list)
 {
     node_header_t *node;
     apr_memnode_t *active = list->blocks;
+    apr_size_t size = in_size;
     char *endp;
 
     size += SIZEOF_NODE_HEADER_T;
+    if (size < in_size) { /* too big? */
+        apr_bucket_abort(list);
+        return NULL;
+    }
+
     if (size <= SMALL_NODE_SIZE) {
         if (list->freelist) {
             node = list->freelist;
@@ -140,6 +175,7 @@ APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(
                 list->blocks = apr_allocator_alloc(list->allocator, ALLOC_AMT);
                 if (!list->blocks) {
                     list->blocks = active;
+                    apr_bucket_abort(list);
                     return NULL;
                 }
                 list->blocks->next = active;
@@ -156,6 +192,7 @@ APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(
     else {
         apr_memnode_t *memnode = apr_allocator_alloc(list->allocator, size);
         if (!memnode) {
+            apr_bucket_abort(list);
             return NULL;
         }
         node = (node_header_t *)memnode->first_avail;
@@ -166,6 +203,94 @@ APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(
     return ((char *)node) + SIZEOF_NODE_HEADER_T;
 }
 
+APU_DECLARE_NONSTD(struct iovec *) apr_bucket_bulk_alloc(apr_size_t block_size,
+                                                         apr_size_t *total_size,
+                                                         apr_bucket_alloc_t *list,
+                                                         apr_size_t *blocks_num,
+                                                         int blocks_fixed)
+{
+    struct iovec *vec;
+    apr_size_t size;
+
+    if (block_size <= SMALL_NODE_SIZE - SIZEOF_NODE_HEADER_T) {
+        void *mem;
+        
+        mem = apr_bucket_alloc(block_size, list);
+        if (!mem) {
+            return NULL;
+        }
+
+        vec = apr_bucket_alloc(sizeof(struct iovec), list);
+        if (!vec) {
+            apr_bucket_free(mem);
+            return NULL;
+        }
+        if (blocks_fixed) {
+            size = block_size;
+        }
+        else {
+            size = SMALL_NODE_SIZE;
+        }
+        vec->iov_base = mem;
+        vec->iov_len = size;
+
+        *total_size = size;
+        if (blocks_num) {
+            *blocks_num = 1;
+        }
+    }
+    else {
+        node_header_t *node;
+        apr_memnode_t *memnode;
+        apr_size_t i, n = 0;
+
+        size = block_size + SIZEOF_NODE_HEADER_T;
+        if (size < block_size) { /* too big? */
+            apr_bucket_abort(list);
+            return NULL;
+        }
+
+        memnode = apr_allocator_bulk_alloc(list->allocator, size, total_size,
+                                           &n, blocks_fixed);
+        if (!memnode) {
+            apr_bucket_abort(list);
+            return NULL;
+        }
+
+        vec = apr_bucket_alloc(n * sizeof(struct iovec), list);
+        if (!vec) {
+            apr_allocator_free(list->allocator, memnode);
+            return NULL;
+        }
+        for (size = i = 0; i < n; size += node->size, ++i) {
+            apr_memnode_t *next = memnode->next;
+
+            node = (node_header_t *)memnode->first_avail;
+            memnode->first_avail += SIZEOF_NODE_HEADER_T;
+            memnode->next = NULL;
+
+            node->alloc = list;
+            node->memnode = memnode;
+            if (blocks_fixed) {
+                node->size = block_size;
+            }
+            else {
+                node->size = memnode->endp - memnode->first_avail;
+            }
+            vec[i].iov_base = memnode->first_avail;
+            vec[i].iov_len = node->size;
+
+            memnode = next;
+        }
+        *total_size = size;
+        if (blocks_num) {
+            *blocks_num = n;
+        }
+    }
+
+    return vec;
+}
+
 #ifdef APR_BUCKET_DEBUG
 #if APR_HAVE_STDLIB_H
 #include <stdlib.h>
@@ -200,3 +325,44 @@ APU_DECLARE_NONSTD(void) apr_bucket_free(void *mem
         apr_allocator_free(list->allocator, node->memnode);
     }
 }
+
+APU_DECLARE_NONSTD(void) apr_bucket_bulk_free(struct iovec *blocks_vec,
+                                              apr_size_t blocks_num,
+                                              int free_vec)
+{
+    apr_bucket_alloc_t *list = NULL;
+    apr_memnode_t *memnode = NULL;
+    apr_size_t i;
+
+    for (i = 0; i < blocks_num; ++i) {
+        node_header_t *node;
+        char *mem = blocks_vec[i].iov_base;
+
+        node = (node_header_t *)(mem - SIZEOF_NODE_HEADER_T);
+        if (!list) {
+            list = node->alloc;
+        }
+#ifdef APR_BUCKET_DEBUG
+        else if (list != node->alloc) {
+            abort();
+        }
+#endif
+        if (node->size == SMALL_NODE_SIZE) {
+            check_not_already_free(node);
+            node->next = list->freelist;
+            list->freelist = node;
+        }
+        else {
+            apr_memnode_t *n = node->memnode;
+            n->next = memnode;
+            memnode = n;
+        }
+    }
+    if (memnode) {
+        apr_allocator_free(list->allocator, memnode);
+    }
+
+    if (free_vec) {
+        apr_bucket_free(blocks_vec);
+    }
+}
Index: srclib/apr-util/buckets/apr_buckets_file.c
===================================================================
--- srclib/apr-util/buckets/apr_buckets_file.c	(revision 1732829)
+++ srclib/apr-util/buckets/apr_buckets_file.c	(working copy)
@@ -78,7 +78,6 @@ static apr_status_t file_bucket_read(apr_bucket *e
     apr_bucket_file *a = e->data;
     apr_file_t *f = a->fd;
     apr_bucket *b = NULL;
-    char *buf;
     apr_status_t rv;
     apr_size_t filelength = e->length;  /* bytes remaining in file past offset */
     apr_off_t fileoffset = e->start;
@@ -85,6 +84,15 @@ static apr_status_t file_bucket_read(apr_bucket *e
 #if APR_HAS_THREADS && !APR_HAS_XTHREAD_FILES
     apr_int32_t flags;
 #endif
+    apr_size_t size;
+    int scattered = 0;
+    union {
+        char *buf;
+        struct {
+            struct iovec *v;
+            apr_size_t n;
+        } io;
+    } u = {0};
 
 #if APR_HAS_MMAP
     if (file_make_mmap(e, filelength, fileoffset, a->readpool)) {
@@ -108,36 +116,119 @@ static apr_status_t file_bucket_read(apr_bucket *e
     }
 #endif
 
-    *len = (filelength > APR_BUCKET_BUFF_SIZE)
-               ? APR_BUCKET_BUFF_SIZE
-               : filelength;
     *str = NULL;  /* in case we die prematurely */
-    buf = apr_bucket_alloc(*len, e->list);
+    *len = 0;
 
     /* Handle offset ... */
     rv = apr_file_seek(f, APR_SET, &fileoffset);
     if (rv != APR_SUCCESS) {
-        apr_bucket_free(buf);
         return rv;
     }
-    rv = apr_file_read(f, buf, len);
-    if (rv != APR_SUCCESS && rv != APR_EOF) {
-        apr_bucket_free(buf);
+
+    size = filelength;
+    if (size > a->read_size) {
+        if (a->can_scatter) {
+            scattered = 1;
+        }
+        else {
+            size = a->read_size;
+        }
+    }
+    if (scattered) {
+        int fixed = (a->can_scatter == APR_BUCKET_FILE_SCATTER_FIXED);
+        u.io.v = apr_bucket_bulk_alloc(a->read_size, &size, e->list,
+                                       &u.io.n, fixed);
+        rv = apr_file_readv(f, u.io.v, u.io.n, &size);
+#if 1
+        fprintf(stderr, "file_bucket_read_vec: %"APR_SIZE_T_FMT"/%"APR_SIZE_T_FMT" bytes: %i\n",
+                size, filelength, rv);
+        fflush(stdout);
+#endif
+    }
+    else {
+        u.buf = apr_bucket_alloc(size, e->list);
+        rv = apr_file_read(f, u.buf, &size);
+#if 1
+        fprintf(stderr, "file_bucket_read_buf: %"APR_SIZE_T_FMT"/%"APR_SIZE_T_FMT" bytes\n: %i",
+                size, filelength, rv);
+        fflush(stdout);
+#endif
+    }
+    if (rv != APR_SUCCESS) {
+        if (scattered) {
+            apr_bucket_bulk_free(u.io.v, u.io.n, 1);
+        }
+        else {
+            apr_bucket_free(u.buf);
+        }
+        if (rv == APR_EOF) {
+            /* Change the current bucket to an empty one. */
+            apr_bucket_immortal_make(e, *str = "", 0);
+            file_bucket_destroy(a);
+        }
         return rv;
     }
-    filelength -= *len;
+    filelength -= size;
+
     /*
-     * Change the current bucket to refer to what we read,
-     * even if we read nothing because we hit EOF.
+     * Change the current bucket to refer to what we read.
      */
-    apr_bucket_heap_make(e, buf, *len, apr_bucket_free);
+    if (scattered) {
+        apr_size_t avail = size, i;
 
+        *str = u.io.v[0].iov_base;
+        *len = u.io.v[0].iov_len;
+        if (*len < avail) {
+            avail -= *len;
+        }
+        else {
+            *len = avail;
+            avail = 0;
+        }
+#if 1
+        fprintf(stderr, "file_bucket_read[00]: "
+                "%"APR_SIZE_T_FMT" bytes (%pp)\n",
+                *len, u.io.v[0].iov_base);
+        fflush(stdout);
+#endif
+        apr_bucket_heap_make(e, *str, *len, apr_bucket_free);
+        for (i = 1; avail && i < u.io.n; ++i) {
+            apr_size_t n = u.io.v[i].iov_len;
+            if (n < avail) {
+                avail -= n;
+            }
+            else {
+                n = avail;
+                avail = 0;
+            }
+#if 1
+            fprintf(stderr, "file_bucket_read[%.2"APR_SIZE_T_FMT"]: "
+                    "%"APR_SIZE_T_FMT" bytes (%pp)\n",
+                    i, n, u.io.v[i].iov_base);
+            fflush(stdout);
+#endif
+            b = apr_bucket_heap_create(u.io.v[i].iov_base, n,
+                                       apr_bucket_free, e->list);
+            APR_BUCKET_INSERT_AFTER(e, b);
+            e = b;
+        }
+        if (i < u.io.n) {
+            apr_bucket_bulk_free(u.io.v + i, u.io.n - i, 0);
+        }
+        apr_bucket_free(u.io.v);
+    }
+    else {
+        *str = u.buf;
+        *len = size;
+        apr_bucket_heap_make(e, *str, *len, apr_bucket_free);
+    }
+
     /* If we have more to read from the file, then create another bucket */
-    if (filelength > 0 && rv != APR_EOF) {
+    if (filelength > 0) {
         /* for efficiency, we can just build a new apr_bucket struct
          * to wrap around the existing file bucket */
         b = apr_bucket_alloc(sizeof(*b), e->list);
-        b->start  = fileoffset + (*len);
+        b->start  = fileoffset + size;
         b->length = filelength;
         b->data   = a;
         b->type   = &apr_bucket_type_file;
@@ -149,7 +240,6 @@ static apr_status_t file_bucket_read(apr_bucket *e
         file_bucket_destroy(a);
     }
 
-    *str = buf;
     return rv;
 }
 
@@ -165,6 +255,8 @@ APU_DECLARE(apr_bucket *) apr_bucket_file_make(apr
 #if APR_HAS_MMAP
     f->can_mmap = 1;
 #endif
+    f->can_scatter = APR_BUCKET_FILE_SCATTER_OFF;
+    f->read_size = APR_BUCKET_BUFF_SIZE;
 
     b = apr_bucket_shared_make(b, f, offset, len);
     b->type = &apr_bucket_type_file;
@@ -197,7 +289,60 @@ APU_DECLARE(apr_status_t) apr_bucket_file_enable_m
 #endif /* APR_HAS_MMAP */
 }
 
+APU_DECLARE(apr_status_t) apr_bucket_file_enable_scatter(apr_bucket *e,
+                                           apr_bucket_file_scatter_e mode)
+{
+    apr_bucket_file *a = e->data;
+    switch (mode) {
+    case APR_BUCKET_FILE_SCATTER_ON:
+        a->can_scatter = APR_BUCKET_FILE_SCATTER_ON;
+        break;
+    case APR_BUCKET_FILE_SCATTER_FIXED:
+        a->can_scatter = APR_BUCKET_FILE_SCATTER_FIXED;
+        break;
+    case APR_BUCKET_FILE_SCATTER_OFF:
+        a->can_scatter = APR_BUCKET_FILE_SCATTER_OFF;
+        break;
+    default:
+        return APR_EINVAL;
+    }
+    return APR_SUCCESS;
+}
 
+/*
+ * The allocator will always align (round up) the requested size to it's
+ * boundary size (a multiple of the system's page size).
+ *
+ * So we want to account for some potential "external" overhead, which for
+ * instance could happen when an aligned heap bucket's buffer is given to some
+ * application (e.g. a cryptographic library) which would then give back a
+ * transient buffer to set aside, with almost the same size (e.g. plus the
+ * header of a TLS record encapsulation, a MAC, tag, ...).
+ *
+ * By substracting such small overhead here, we allow for optimal recycling in
+ * and reuse of the aligned heap buffers in this case, otherwise we'd end up
+ * with a different size for the original block and the one set aside, which
+ * would hurt the bulk fixed size allocations (at least).
+ * 
+ * 32 bytes should be enough...
+ */
+#define EXTERNAL_OVERHEAD 32
+
+APU_DECLARE(apr_status_t) apr_bucket_file_read_size_set(apr_bucket *e,
+                                                        apr_size_t size)
+{
+    apr_bucket_file *a = e->data;
+
+    if (size < APR_BUCKET_BUFF_SIZE) {
+        a->read_size = APR_BUCKET_BUFF_SIZE;
+    }
+    else {
+        a->read_size = apr_bucket_alloc_floor(size) - EXTERNAL_OVERHEAD;
+    }
+
+    return APR_SUCCESS;
+}
+
 static apr_status_t file_bucket_setaside(apr_bucket *data, apr_pool_t *reqpool)
 {
     apr_bucket_file *a = data->data;

Reply via email to