Hello,
the attached patch modifies the allocator (and pool) to allow for
order-0 nodes/allocations (2^0, one page, 4K on most CPUs).
The pools are still created with order-1 (two pages) by default, but
one can use the new apr_pool_order_set(order) function at any time
(preferable init time) to default to different order (max 9).
This change requires to recycle single pages in slot 0
(allocator->free[0]) and thus to move the sink slot at MAX_INDEX (for
non-fitting nodes).
Also, since the page is now relevant outside APR_ALLOCATOR_USES_MMAP
scope, _SC_PAGESIZE is always determined at init time and gives
dynamic BOUNDARY_INDEX/BOUNDARY_SIZE for runtime (only when
_SC_PAGESIZE is defined still, otherwise 4K is the default).
Pros/cons?
Regards,
Yann.
Index: include/apr_pools.h
===================================================================
--- include/apr_pools.h (revision 1784277)
+++ include/apr_pools.h (working copy)
@@ -171,6 +171,13 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void
*/
APR_DECLARE(void) apr_pool_terminate(void);
+/**
+ * Setup the order (power of two number of pages to allocate) when creating
+ * a pool.
+ * @param order The order to set
+ * @return APR_SUCCESS, or APR_EINVAL if @a order is too high (e.g. 10)
+ */
+APR_DECLARE(apr_status_t) apr_pool_order_set(unsigned int order);
/*
* Pool creation/destruction
Index: memory/unix/apr_pools.c
===================================================================
--- memory/unix/apr_pools.c (revision 1784277)
+++ memory/unix/apr_pools.c (working copy)
@@ -63,23 +63,30 @@ int apr_running_on_valgrind = 0;
*/
/*
- * XXX: This is not optimal when using --enable-allocator-uses-mmap on
- * XXX: machines with large pagesize, but currently the sink is assumed
- * XXX: to be index 0, so MIN_ALLOC must be at least two pages.
+ * Allocate at least MIN_ALLOC (one boundary/page) bytes.
+ * Recycle up to MAX_INDEX in slots, larger indexes go to
+ * the sink slot at MAX_INDEX.
*/
-#define MIN_ALLOC (2 * BOUNDARY_SIZE)
-#define MAX_INDEX 20
+#define MIN_ALLOC BOUNDARY_SIZE
+#define MAX_INDEX 20
-#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+/*
+ * Determines the boundary/page size.
+ */
+#if defined(_SC_PAGESIZE)
static unsigned int boundary_index;
static unsigned int boundary_size;
#define BOUNDARY_INDEX boundary_index
#define BOUNDARY_SIZE boundary_size
-#else
-#define BOUNDARY_INDEX 12
-#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
+#else /* Assume 4K pages */
+#define BOUNDARY_INDEX 12
+#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
#endif
+#define POOL_ORDER_MAX 9
+static unsigned int pool_order = 1;
+#define POOL_SIZE (BOUNDARY_SIZE << pool_order)
+
#if APR_ALLOCATOR_GUARD_PAGES
#if defined(_SC_PAGESIZE)
#define GUARDPAGE_SIZE boundary_size
@@ -126,16 +133,17 @@ struct apr_allocator_t {
#endif /* APR_HAS_THREADS */
apr_pool_t *owner;
/**
- * Lists of free nodes. Slot 0 is used for oversized nodes,
- * and the slots 1..MAX_INDEX-1 contain nodes of sizes
+ * Lists of free nodes. Slot MAX_INDEX is used for oversized nodes,
+ * and the slots 0..MAX_INDEX-1 contain nodes of sizes
* (i+1) * BOUNDARY_SIZE. Example for BOUNDARY_INDEX == 12:
- * slot 0: nodes larger than 81920
+ * slot 0: size 4096
* slot 1: size 8192
* slot 2: size 12288
* ...
* slot 19: size 81920
+ * slot 20: nodes larger than 81920
*/
- apr_memnode_t *free[MAX_INDEX];
+ apr_memnode_t *free[MAX_INDEX + 1];
};
#define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))
@@ -167,7 +175,7 @@ APR_DECLARE(void) apr_allocator_destroy(apr_alloca
apr_uint32_t index;
apr_memnode_t *node, **ref;
- for (index = 0; index < MAX_INDEX; index++) {
+ for (index = 0; index <= MAX_INDEX; index++) {
ref = &allocator->free[index];
while ((node = *ref) != NULL) {
*ref = node->next;
@@ -328,10 +336,10 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *al
#endif /* APR_HAS_THREADS */
}
- /* If we found nothing, seek the sink (at index 0), if
+ /* If we found nothing, seek the sink (at index MAX_INDEX), if
* it is not empty.
*/
- else if (allocator->free[0]) {
+ else if (allocator->free[MAX_INDEX]) {
#if APR_HAS_THREADS
if (allocator->mutex)
apr_thread_mutex_lock(allocator->mutex);
@@ -340,7 +348,7 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *al
/* Walk the free list to see if there are
* any nodes on it of the requested size
*/
- ref = &allocator->free[0];
+ ref = &allocator->free[MAX_INDEX];
while ((node = *ref) != NULL && index > node->index)
ref = &node->next;
@@ -445,10 +453,10 @@ void allocator_free(apr_allocator_t *allocator, ap
}
else {
/* This node is too large to keep in a specific size bucket,
- * just add it to the sink (at index 0).
+ * just add it to the sink (at index MAX_INDEX).
*/
- node->next = allocator->free[0];
- allocator->free[0] = node;
+ node->next = allocator->free[MAX_INDEX];
+ allocator->free[MAX_INDEX] = node;
if (current_free_index >= index + 1)
current_free_index -= index + 1;
else
@@ -622,6 +630,15 @@ static void free_proc_chain(struct process_chain *
static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
#endif
+APR_DECLARE(apr_status_t) apr_pool_order_set(unsigned int order)
+{
+ if (order > POOL_ORDER_MAX) {
+ return APR_EINVAL;
+ }
+ pool_order = order;
+ return APR_SUCCESS;
+}
+
#if !APR_POOL_DEBUG
/*
* Initialization
@@ -638,7 +655,7 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void
apr_running_on_valgrind = RUNNING_ON_VALGRIND;
#endif
-#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+#if defined(_SC_PAGESIZE)
boundary_size = sysconf(_SC_PAGESIZE);
boundary_index = 12;
while ( (1 << boundary_index) < boundary_size)
@@ -1050,7 +1067,7 @@ APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_p
allocator = parent->allocator;
if ((node = allocator_alloc(allocator,
- MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+ POOL_SIZE - APR_MEMNODE_T_SIZE)) == NULL) {
if (abort_fn)
abort_fn(APR_ENOMEM);
@@ -1144,7 +1161,7 @@ APR_DECLARE(apr_status_t) apr_pool_create_unmanage
return APR_ENOMEM;
}
if ((node = allocator_alloc(pool_allocator,
- MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+ POOL_SIZE - APR_MEMNODE_T_SIZE)) == NULL) {
if (abort_fn)
abort_fn(APR_ENOMEM);
@@ -1152,7 +1169,7 @@ APR_DECLARE(apr_status_t) apr_pool_create_unmanage
}
}
else if ((node = allocator_alloc(pool_allocator,
- MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+ POOL_SIZE - APR_MEMNODE_T_SIZE)) == NULL) {
if (abort_fn)
abort_fn(APR_ENOMEM);
@@ -1634,7 +1651,7 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void
if (apr_pools_initialized++)
return APR_SUCCESS;
-#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+#if defined(_SC_PAGESIZE)
boundary_size = sysconf(_SC_PAGESIZE);
boundary_index = 12;
while ( (1 << boundary_index) < boundary_size)