Is anybody interested in evaluating this modification to the pools code to allow a pool to use a thread-private free list?
--Brian
Index: srclib/apr/include/apr_pools.h =================================================================== RCS file: /home/cvspublic/apr/include/apr_pools.h,v retrieving revision 1.59 diff -u -r1.59 apr_pools.h --- srclib/apr/include/apr_pools.h 2001/08/15 21:02:53 1.59 +++ srclib/apr/include/apr_pools.h 2001/08/19 22:15:43 @@ -238,6 +238,15 @@ APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newcont, apr_pool_t *cont);
+/**
+ * Create a new root pool that manages a private free block list.
+ * @param newpool The pool we have just created.
+ * @remark This pool and any subpools created from it must not be
+ * used from multiple threads simultaneously unless the caller
+ * does its own locking.
+ */
+APR_DECLARE(apr_status_t) apr_pool_create_for_thread(apr_pool_t **newpool);
+
#if !defined(APR_POOLS_ARE_SMS) || defined(DOXYGEN)
/**
* Set the function to be called when an allocation failure occurs.
Index: srclib/apr/memory/unix/apr_pools.c
===================================================================
RCS file: /home/cvspublic/apr/memory/unix/apr_pools.c,v
retrieving revision 1.108
diff -u -r1.108 apr_pools.c
--- srclib/apr/memory/unix/apr_pools.c 2001/08/13 04:20:19 1.108
+++ srclib/apr/memory/unix/apr_pools.c 2001/08/19 22:15:44
@@ -205,6 +205,12 @@
int (*apr_abort)(int retcode);
/** A place to hold user data associated with this pool */
struct apr_hash_t *prog_data;
+ /** Free list owned by this pool */
+ union block_hdr *private_free_list;
+ /** Handle to free list to use when allocating new blocks for this pool
+ (if non-null, points to private_free_list in this pool or
+ an ancestor of this pool) */
+ union block_hdr **free_list;
};
@@ -415,7 +421,8 @@
/* Free a chain of blocks --- must be called with alarms blocked. */
-static void free_blocks(union block_hdr *blok)
+static void free_blocks(union block_hdr **freelist,
+ union block_hdr *blok)
{
#ifdef ALLOC_USE_MALLOC
union block_hdr *next;
@@ -430,6 +437,10 @@
unsigned num_blocks;
#endif /* ALLOC_STATS */+#if APR_HAS_THREADS
+ int lock_required;
+#endif /* APR_HAS_THREADS */
+
/*
* First, put new blocks at the head of the free list ---
* we'll eventually bash the 'next' pointer of the last block
@@ -442,13 +453,23 @@
return; /* Sanity check --- freeing empty pool? */
}+ if (freelist) {
#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
+ lock_required = 0;
+#endif /* APR_HAS_THREADS */
}
-#endif
- old_free_list = block_freelist;
- block_freelist = blok;
+ else {
+#if APR_HAS_THREADS
+ lock_required = 1;
+ if (alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
+ freelist = &block_freelist;
+ }
+
+ old_free_list = *freelist;
+ *freelist = blok; /*
* Next, adjust first_avail pointers of each block --- have to do it
@@ -495,7 +516,7 @@
#endif /* ALLOC_STATS */#if APR_HAS_THREADS
- if (alloc_mutex) {
+ if (lock_required && alloc_mutex) {
apr_lock_release(alloc_mutex);
}
#endif /* APR_HAS_THREADS */
@@ -503,13 +524,40 @@
}/*
- * Get a new block, from our own free list if possible, from the system
+ * Get a new block, from a free list if possible, from the system
* if necessary. Must be called with alarms blocked.
*/
-static union block_hdr *new_block(apr_size_t min_size, apr_abortfunc_t abortfunc)
+static union block_hdr *new_block(union block_hdr **freelist,
+ apr_size_t min_size,
+ apr_abortfunc_t abortfunc)
{
- union block_hdr **lastptr = &block_freelist;
- union block_hdr *blok = block_freelist;
+ union block_hdr **lastptr;
+ union block_hdr *blok;
+#if APR_HAS_THREADS
+ int lock_required;
+#endif /* APR_HAS_THREADS */
+
+ /* If the caller supplied a thread-specific free list,
+ * use it for allocation; otherwise, use the global
+ * free list.
+ */
+ if (freelist) {
+ lastptr = freelist;
+ blok = *freelist;
+#if APR_HAS_THREADS
+ lock_required = 0;
+#endif /* APR_HAS_THREADS */
+ }
+ else {
+ lastptr = &block_freelist;
+ blok = block_freelist;
+#if APR_HAS_THREADS
+ lock_required = 1;
+ if (alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
+ }
/* First, see if we have anything of the required size
* on the free list...
@@ -522,6 +570,11 @@
debug_verify_filled(blok->h.first_avail, blok->h.endp,
"[new_block] Ouch! Someone trounced a block "
"on the free list!\n");
+#if APR_HAS_THREADS
+ if (lock_required && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
return blok;
}
else {
@@ -535,6 +588,11 @@
min_size += BLOCK_MINFREE;
blok = malloc_block((min_size > BLOCK_MINALLOC)
? min_size : BLOCK_MINALLOC, abortfunc);
+#if APR_HAS_THREADS
+ if (lock_required && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
return blok;
}@@ -587,14 +645,8 @@
union block_hdr *blok;
apr_pool_t *new_pool;-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
- blok = new_block(POOL_HDR_BYTES, abortfunc);
+ blok = new_block((parent ? parent->free_list : NULL),
+ POOL_HDR_BYTES, abortfunc);
new_pool = (apr_pool_t *) blok->h.first_avail;
blok->h.first_avail += POOL_HDR_BYTES;
#ifdef APR_POOL_DEBUG
@@ -607,6 +659,7 @@ if (parent) {
new_pool->parent = parent;
+ new_pool->free_list = parent->free_list;
new_pool->sub_next = parent->sub_pools;
if (new_pool->sub_next) {
new_pool->sub_next->sub_prev = new_pool;
@@ -614,12 +667,6 @@
parent->sub_pools = new_pool;
}-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
*p = new_pool;
}@@ -674,6 +721,28 @@
return APR_SUCCESS;
}+APR_DECLARE(apr_status_t) apr_pool_create_for_thread(apr_pool_t **newpool)
+{
+ union block_hdr *tmp_free_list = NULL;
+ union block_hdr *blok;
+
+ blok = new_block(&tmp_free_list, POOL_HDR_BYTES, NULL);
+ *newpool = (apr_pool_t *) blok->h.first_avail;
+ blok->h.first_avail += POOL_HDR_BYTES;
+#ifdef APR_POOL_DEBUG
+ blok->h.owning_pool = new_pool;
+#endif
+
+ memset((char *) *newpool, '\0', sizeof(struct apr_pool_t));
+ (*newpool)->free_first_avail = blok->h.first_avail;
+ (*newpool)->first = (*newpool)->last = blok;
+
+ (*newpool)->private_free_list = tmp_free_list;
+ (*newpool)->free_list = &((*newpool)->private_free_list);
+
+ return APR_SUCCESS;
+}
+
APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abortfunc,
apr_pool_t *pool)
{
@@ -879,7 +948,7 @@
/* free the pool's blocks, *except* for the first one. the actual pool
structure is contained in the first block. this also gives us some
ready memory for reallocating within this pool. */
- free_blocks(a->first->h.next);
+ free_blocks(a->free_list, a->first->h.next);
a->first->h.next = NULL; /* this was allocated in self, or a subpool of self. it simply
@@ -919,12 +988,6 @@
/* toss everything in the pool. */
apr_pool_clear(a);-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
/* detach this pool from its parent. */
if (a->parent) {
if (a->parent->sub_pools == a) {
@@ -938,12 +1001,6 @@
}
}-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
/* freeing the first block will include the pool structure. to prevent
a double call to apr_pool_destroy, we want to fill a NULL into
a->first so that the second call (or any attempted usage of the
@@ -956,7 +1013,16 @@
*/
blok = a->first;
a->first = NULL;
- free_blocks(blok);
+ free_blocks(a->free_list, blok);
+
+ if (a->private_free_list) {
+ union block_hdr *blok = a->private_free_list;
+ union block_hdr *next;
+ for ( ; blok; blok = next) {
+ next = blok->h.next;
+ DO_FREE(blok);
+ }
+ }
}
@@ -1153,26 +1219,14 @@ }
/* Nope --- get a new one that's guaranteed to be big enough */
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif- blok = new_block(size, a->apr_abort);
+ blok = new_block(a->free_list, size, a->apr_abort);
a->last->h.next = blok;
a->last = blok;
#ifdef APR_POOL_DEBUG
blok->h.owning_pool = a;
#endif-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
first_avail = blok->h.first_avail;
blok->h.first_avail += size;@@ -1246,6 +1300,7 @@
#ifdef ALLOC_USE_MALLOC
char *base;
#else
+ union block_hdr **free_list;
union block_hdr *blok;
int got_a_new_block;
#endif
@@ -1279,13 +1334,8 @@
cur_len = strp - blok->h.first_avail; /* must try another blok */
-#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
-#endif
- nblok = new_block(2 * cur_len, NULL);
-#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
-#endif
+ nblok = new_block(ps->free_list, 2 * cur_len, NULL);
+
memcpy(nblok->h.first_avail, blok->h.first_avail, cur_len);
ps->vbuff.curpos = nblok->h.first_avail + cur_len;
/* save a byte for the NUL terminator */
@@ -1295,12 +1345,16 @@
if (ps->got_a_new_block) {
debug_fill(blok->h.first_avail, blok->h.endp - blok->h.first_avail);
#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
+ if ((ps->free_list == &block_freelist) && alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
#endif
- blok->h.next = block_freelist;
- block_freelist = blok;
+ blok->h.next = *(ps->free_list);
+ *(ps->free_list) = blok;
#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
+ if ((ps->free_list == &block_freelist) && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
#endif
}
ps->blok = nblok;
@@ -1344,6 +1398,7 @@
char *strp;
apr_size_t size;+ ps.free_list = p->free_list;
ps.blok = p->last;
ps.vbuff.curpos = ps.blok->h.first_avail;
ps.vbuff.endpos = ps.blok->h.endp - 1; /* save one for NUL */
Index: server/mpm/threaded/threaded.c
===================================================================
RCS file: /home/cvspublic/httpd-2.0/server/mpm/threaded/threaded.c,v
retrieving revision 1.61
diff -u -r1.61 threaded.c
--- server/mpm/threaded/threaded.c 2001/08/16 13:59:14 1.61
+++ server/mpm/threaded/threaded.c 2001/08/19 22:15:45
@@ -545,7 +545,7 @@free(ti);
- apr_pool_create(&ptrans, tpool); + apr_pool_create_for_thread(&ptrans);
apr_lock_acquire(worker_thread_count_mutex);
worker_thread_count++;
@@ -654,6 +654,7 @@
apr_pool_clear(ptrans);
}+ apr_pool_destroy(ptrans);
apr_pool_destroy(tpool);
ap_update_child_status(process_slot, thread_slot, (dying) ? SERVER_DEAD : SERVER_GRACEFUL,
(request_rec *) NULL);
Index: srclib/apr/include/apr_pools.h
===================================================================
RCS file: /home/cvspublic/apr/include/apr_pools.h,v
retrieving revision 1.59
diff -u -r1.59 apr_pools.h
--- srclib/apr/include/apr_pools.h 2001/08/15 21:02:53 1.59
+++ srclib/apr/include/apr_pools.h 2001/08/19 22:15:43
@@ -238,6 +238,15 @@
APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newcont,
apr_pool_t *cont);
+/**
+ * Create a new root pool that manages a private free block list.
+ * @param newpool The pool we have just created.
+ * @remark This pool and any subpools created from it must not be
+ * used from multiple threads simultaneously unless the caller
+ * does its own locking.
+ */
+APR_DECLARE(apr_status_t) apr_pool_create_for_thread(apr_pool_t **newpool);
+
#if !defined(APR_POOLS_ARE_SMS) || defined(DOXYGEN)
/**
* Set the function to be called when an allocation failure occurs.
Index: srclib/apr/memory/unix/apr_pools.c
===================================================================
RCS file: /home/cvspublic/apr/memory/unix/apr_pools.c,v
retrieving revision 1.108
diff -u -r1.108 apr_pools.c
--- srclib/apr/memory/unix/apr_pools.c 2001/08/13 04:20:19 1.108
+++ srclib/apr/memory/unix/apr_pools.c 2001/08/19 22:15:44
@@ -205,6 +205,12 @@
int (*apr_abort)(int retcode);
/** A place to hold user data associated with this pool */
struct apr_hash_t *prog_data;
+ /** Free list owned by this pool */
+ union block_hdr *private_free_list;
+ /** Handle to free list to use when allocating new blocks for this pool
+ (if non-null, points to private_free_list in this pool or
+ an ancestor of this pool) */
+ union block_hdr **free_list;
};
@@ -415,7 +421,8 @@
/* Free a chain of blocks --- must be called with alarms blocked. */
-static void free_blocks(union block_hdr *blok)
+static void free_blocks(union block_hdr **freelist,
+ union block_hdr *blok)
{
#ifdef ALLOC_USE_MALLOC
union block_hdr *next;
@@ -430,6 +437,10 @@
unsigned num_blocks;
#endif /* ALLOC_STATS */
+#if APR_HAS_THREADS
+ int lock_required;
+#endif /* APR_HAS_THREADS */
+
/*
* First, put new blocks at the head of the free list ---
* we'll eventually bash the 'next' pointer of the last block
@@ -442,13 +453,23 @@
return; /* Sanity check --- freeing empty pool? */
}
+ if (freelist) {
#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
+ lock_required = 0;
+#endif /* APR_HAS_THREADS */
}
-#endif
- old_free_list = block_freelist;
- block_freelist = blok;
+ else {
+#if APR_HAS_THREADS
+ lock_required = 1;
+ if (alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
+ freelist = &block_freelist;
+ }
+
+ old_free_list = *freelist;
+ *freelist = blok;
/*
* Next, adjust first_avail pointers of each block --- have to do it
@@ -495,7 +516,7 @@
#endif /* ALLOC_STATS */
#if APR_HAS_THREADS
- if (alloc_mutex) {
+ if (lock_required && alloc_mutex) {
apr_lock_release(alloc_mutex);
}
#endif /* APR_HAS_THREADS */
@@ -503,13 +524,40 @@
}
/*
- * Get a new block, from our own free list if possible, from the system
+ * Get a new block, from a free list if possible, from the system
* if necessary. Must be called with alarms blocked.
*/
-static union block_hdr *new_block(apr_size_t min_size, apr_abortfunc_t
abortfunc)
+static union block_hdr *new_block(union block_hdr **freelist,
+ apr_size_t min_size,
+ apr_abortfunc_t abortfunc)
{
- union block_hdr **lastptr = &block_freelist;
- union block_hdr *blok = block_freelist;
+ union block_hdr **lastptr;
+ union block_hdr *blok;
+#if APR_HAS_THREADS
+ int lock_required;
+#endif /* APR_HAS_THREADS */
+
+ /* If the caller supplied a thread-specific free list,
+ * use it for allocation; otherwise, use the global
+ * free list.
+ */
+ if (freelist) {
+ lastptr = freelist;
+ blok = *freelist;
+#if APR_HAS_THREADS
+ lock_required = 0;
+#endif /* APR_HAS_THREADS */
+ }
+ else {
+ lastptr = &block_freelist;
+ blok = block_freelist;
+#if APR_HAS_THREADS
+ lock_required = 1;
+ if (alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
+ }
/* First, see if we have anything of the required size
* on the free list...
@@ -522,6 +570,11 @@
debug_verify_filled(blok->h.first_avail, blok->h.endp,
"[new_block] Ouch! Someone trounced a block "
"on the free list!\n");
+#if APR_HAS_THREADS
+ if (lock_required && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
return blok;
}
else {
@@ -535,6 +588,11 @@
min_size += BLOCK_MINFREE;
blok = malloc_block((min_size > BLOCK_MINALLOC)
? min_size : BLOCK_MINALLOC, abortfunc);
+#if APR_HAS_THREADS
+ if (lock_required && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
+#endif /* APR_HAS_THREADS */
return blok;
}
@@ -587,14 +645,8 @@
union block_hdr *blok;
apr_pool_t *new_pool;
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
- blok = new_block(POOL_HDR_BYTES, abortfunc);
+ blok = new_block((parent ? parent->free_list : NULL),
+ POOL_HDR_BYTES, abortfunc);
new_pool = (apr_pool_t *) blok->h.first_avail;
blok->h.first_avail += POOL_HDR_BYTES;
#ifdef APR_POOL_DEBUG
@@ -607,6 +659,7 @@
if (parent) {
new_pool->parent = parent;
+ new_pool->free_list = parent->free_list;
new_pool->sub_next = parent->sub_pools;
if (new_pool->sub_next) {
new_pool->sub_next->sub_prev = new_pool;
@@ -614,12 +667,6 @@
parent->sub_pools = new_pool;
}
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
*p = new_pool;
}
@@ -674,6 +721,28 @@
return APR_SUCCESS;
}
+APR_DECLARE(apr_status_t) apr_pool_create_for_thread(apr_pool_t **newpool)
+{
+ union block_hdr *tmp_free_list = NULL;
+ union block_hdr *blok;
+
+ blok = new_block(&tmp_free_list, POOL_HDR_BYTES, NULL);
+ *newpool = (apr_pool_t *) blok->h.first_avail;
+ blok->h.first_avail += POOL_HDR_BYTES;
+#ifdef APR_POOL_DEBUG
+ blok->h.owning_pool = new_pool;
+#endif
+
+ memset((char *) *newpool, '\0', sizeof(struct apr_pool_t));
+ (*newpool)->free_first_avail = blok->h.first_avail;
+ (*newpool)->first = (*newpool)->last = blok;
+
+ (*newpool)->private_free_list = tmp_free_list;
+ (*newpool)->free_list = &((*newpool)->private_free_list);
+
+ return APR_SUCCESS;
+}
+
APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abortfunc,
apr_pool_t *pool)
{
@@ -879,7 +948,7 @@
/* free the pool's blocks, *except* for the first one. the actual pool
structure is contained in the first block. this also gives us some
ready memory for reallocating within this pool. */
- free_blocks(a->first->h.next);
+ free_blocks(a->free_list, a->first->h.next);
a->first->h.next = NULL;
/* this was allocated in self, or a subpool of self. it simply
@@ -919,12 +988,6 @@
/* toss everything in the pool. */
apr_pool_clear(a);
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
-
/* detach this pool from its parent. */
if (a->parent) {
if (a->parent->sub_pools == a) {
@@ -938,12 +1001,6 @@
}
}
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
/* freeing the first block will include the pool structure. to prevent
a double call to apr_pool_destroy, we want to fill a NULL into
a->first so that the second call (or any attempted usage of the
@@ -956,7 +1013,16 @@
*/
blok = a->first;
a->first = NULL;
- free_blocks(blok);
+ free_blocks(a->free_list, blok);
+
+ if (a->private_free_list) {
+ union block_hdr *blok = a->private_free_list;
+ union block_hdr *next;
+ for ( ; blok; blok = next) {
+ next = blok->h.next;
+ DO_FREE(blok);
+ }
+ }
}
@@ -1153,26 +1219,14 @@
}
/* Nope --- get a new one that's guaranteed to be big enough */
-
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_acquire(alloc_mutex);
- }
-#endif
- blok = new_block(size, a->apr_abort);
+ blok = new_block(a->free_list, size, a->apr_abort);
a->last->h.next = blok;
a->last = blok;
#ifdef APR_POOL_DEBUG
blok->h.owning_pool = a;
#endif
-#if APR_HAS_THREADS
- if (alloc_mutex) {
- apr_lock_release(alloc_mutex);
- }
-#endif
-
first_avail = blok->h.first_avail;
blok->h.first_avail += size;
@@ -1246,6 +1300,7 @@
#ifdef ALLOC_USE_MALLOC
char *base;
#else
+ union block_hdr **free_list;
union block_hdr *blok;
int got_a_new_block;
#endif
@@ -1279,13 +1334,8 @@
cur_len = strp - blok->h.first_avail;
/* must try another blok */
-#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
-#endif
- nblok = new_block(2 * cur_len, NULL);
-#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
-#endif
+ nblok = new_block(ps->free_list, 2 * cur_len, NULL);
+
memcpy(nblok->h.first_avail, blok->h.first_avail, cur_len);
ps->vbuff.curpos = nblok->h.first_avail + cur_len;
/* save a byte for the NUL terminator */
@@ -1295,12 +1345,16 @@
if (ps->got_a_new_block) {
debug_fill(blok->h.first_avail, blok->h.endp - blok->h.first_avail);
#if APR_HAS_THREADS
- apr_lock_acquire(alloc_mutex);
+ if ((ps->free_list == &block_freelist) && alloc_mutex) {
+ apr_lock_acquire(alloc_mutex);
+ }
#endif
- blok->h.next = block_freelist;
- block_freelist = blok;
+ blok->h.next = *(ps->free_list);
+ *(ps->free_list) = blok;
#if APR_HAS_THREADS
- apr_lock_release(alloc_mutex);
+ if ((ps->free_list == &block_freelist) && alloc_mutex) {
+ apr_lock_release(alloc_mutex);
+ }
#endif
}
ps->blok = nblok;
@@ -1344,6 +1398,7 @@
char *strp;
apr_size_t size;
+ ps.free_list = p->free_list;
ps.blok = p->last;
ps.vbuff.curpos = ps.blok->h.first_avail;
ps.vbuff.endpos = ps.blok->h.endp - 1; /* save one for NUL */
Index: server/mpm/threaded/threaded.c
===================================================================
RCS file: /home/cvspublic/httpd-2.0/server/mpm/threaded/threaded.c,v
retrieving revision 1.61
diff -u -r1.61 threaded.c
--- server/mpm/threaded/threaded.c 2001/08/16 13:59:14 1.61
+++ server/mpm/threaded/threaded.c 2001/08/19 22:15:45
@@ -545,7 +545,7 @@
free(ti);
- apr_pool_create(&ptrans, tpool);
+ apr_pool_create_for_thread(&ptrans);
apr_lock_acquire(worker_thread_count_mutex);
worker_thread_count++;
@@ -654,6 +654,7 @@
apr_pool_clear(ptrans);
}
+ apr_pool_destroy(ptrans);
apr_pool_destroy(tpool);
ap_update_child_status(process_slot, thread_slot, (dying) ? SERVER_DEAD :
SERVER_GRACEFUL,
(request_rec *) NULL);
