dgaudet     99/08/05 18:56:25

  Modified:    mpm/src/main alloc.c
  Log:
  Remove a difference with 1.3's alloc.c ... the root_pool stuff was an
  experiment I tried with apache-nspr, and it really didn't win much.
  The idea was to have a free list per thread.  But in practice that's
  too many free lists, too much memory sitting around doing squat.  If
  we have lock contention on the alloc mutex then we might consider a
  handful, say 8, free lists, and use tid % 8 to figure out which one
  to go after.
  
  Revision  Changes    Path
  1.7       +23 -50    apache-2.0/mpm/src/main/alloc.c
  
  Index: alloc.c
  ===================================================================
  RCS file: /home/cvs/apache-2.0/mpm/src/main/alloc.c,v
  retrieving revision 1.6
  retrieving revision 1.7
  diff -u -r1.6 -r1.7
  --- alloc.c   1999/07/06 21:32:09     1.6
  +++ alloc.c   1999/08/06 01:56:25     1.7
  @@ -179,7 +179,6 @@
   
   struct process_chain;
   struct cleanup;
  -struct root_pool;
   
   struct pool {
       union block_hdr *first;
  @@ -190,7 +189,6 @@
       struct pool *sub_next;
       struct pool *sub_prev;
       struct pool *parent;
  -    struct root_pool *thread_root;   /* the root pool for this thread */
       char *free_first_avail;
   #ifdef ALLOC_USE_MALLOC
       void *allocation_list;
  @@ -200,13 +198,6 @@
   #endif
   };
   
  -/* there's really no difference between a root_pool and a regular pool
  - * at the moment.
  - */
  -typedef struct root_pool {
  -    struct pool p;
  -} root_pool;
  -
   
   static union block_hdr *block_freelist = NULL;
   static ap_thread_mutex *alloc_mutex;
  @@ -320,7 +311,7 @@
   #endif
   
   
  -static void free_blocks(root_pool *root,union block_hdr *blok)
  +static void free_blocks(union block_hdr *blok)
   {
   #ifdef ALLOC_USE_MALLOC
       union block_hdr *next;
  @@ -422,7 +413,7 @@
       return blok;
   }
   
  -static ap_inline union block_hdr *new_local_block(root_pool *root, int 
min_size)
  +static ap_inline union block_hdr *new_local_block(int min_size)
   {
       union block_hdr *blok;
   
  @@ -465,32 +456,6 @@
    * gets taken off the parent's sub-pool list...
    */
   
  -#define ROOT_HDR_CLICKS (1 + ((sizeof(struct root_pool) - 1) / CLICK_SZ))
  -#define ROOT_HDR_BYTES (ROOT_HDR_CLICKS * CLICK_SZ)
  -
  -API_EXPORT(struct pool *) ap_make_root_pool(void)
  -{
  -    union block_hdr *blok;
  -    root_pool *new_pool;
  -
  -    (void) ap_thread_mutex_lock(alloc_mutex);
  -
  -    blok = new_block(ROOT_HDR_BYTES);
  -    new_pool = (root_pool *) blok->h.first_avail;
  -    blok->h.first_avail += ROOT_HDR_BYTES;
  -#ifdef POOL_DEBUG
  -    blok->h.owning_pool = &(new_pool->p);
  -#endif
  -
  -    memset(new_pool, 0, sizeof(*new_pool));
  -    new_pool->p.free_first_avail = blok->h.first_avail;
  -    new_pool->p.first = new_pool->p.last = blok;
  -    new_pool->p.thread_root = new_pool;
  -
  -    (void) ap_thread_mutex_unlock(alloc_mutex);
  -    return (pool *)new_pool;
  -}
  -
   static pool *permanent_pool;
   
   /* Each pool structure is allocated in the start of its own first block,
  @@ -508,7 +473,7 @@
       union block_hdr *blok;
       pool *new_pool;
   
  -    blok = new_local_block(p->thread_root, POOL_HDR_BYTES);
  +    blok = new_local_block(POOL_HDR_BYTES);
       new_pool = (pool *) blok->h.first_avail;
       blok->h.first_avail += POOL_HDR_BYTES;
   #ifdef POOL_DEBUG
  @@ -519,12 +484,13 @@
       new_pool->free_first_avail = blok->h.first_avail;
       new_pool->first = new_pool->last = blok;
   
  -    new_pool->thread_root = p->thread_root;
  -    new_pool->parent = p;
  -    new_pool->sub_next = p->sub_pools;
  -    if (new_pool->sub_next)
  -      new_pool->sub_next->sub_prev = new_pool;
  -    p->sub_pools = new_pool;
  +    if (p) {
  +     new_pool->parent = p;
  +     new_pool->sub_next = p->sub_pools;
  +     if (new_pool->sub_next)
  +         new_pool->sub_next->sub_prev = new_pool;
  +     p->sub_pools = new_pool;
  +    }
   
       return new_pool;
   }
  @@ -569,7 +535,7 @@
   #ifdef WIN32
       spawn_mutex = ap_thread_mutex_new();
   #endif
  -    permanent_pool = ap_make_root_pool();
  +    permanent_pool = ap_make_sub_pool(NULL);
   #ifdef ALLOC_STATS
       atexit(dump_stats);
   #endif
  @@ -591,7 +557,7 @@
       a->cleanups = NULL;
       free_proc_chain(a->subprocesses);
       a->subprocesses = NULL;
  -    free_blocks(a->thread_root, a->first->h.next);
  +    free_blocks(a->first->h.next);
       a->first->h.next = NULL;
   
       a->last = a->first;
  @@ -616,6 +582,15 @@
   {
       ap_clear_pool(a);
   
  +    /* XXX: I don't think this mutex is required here.  In theory,
  +     our plan is that upon thread creation, the creator thread
  +     will create a pool A which it will hand the created thread.
  +     The created thread then can create any subpools of A it
  +     wants, and it doesn't need any locks to do this... and it
  +     can destroy any subpool of A it desires -- it just can't
  +     destroy A itself.  When the thread dies, A can be
  +     destroyed by the thread creator...  -djg
  +    */
       (void) ap_thread_mutex_lock(alloc_mutex);
       if (a->parent) {
        if (a->parent->sub_pools == a)
  @@ -627,7 +602,7 @@
       }
       (void) ap_thread_mutex_unlock(alloc_mutex);
   
  -    free_blocks(a->thread_root, a->first);
  +    free_blocks(a->first);
   }
   
   /*****************************************************************
  @@ -786,7 +761,7 @@
   
       /* Nope --- get a new one that's guaranteed to be big enough */
   
  -    blok = new_local_block(a->thread_root, size);
  +    blok = new_local_block(size);
       a->last->h.next = blok;
       a->last = blok;
   #ifdef POOL_DEBUG
  @@ -890,7 +865,6 @@
   #else
       union block_hdr *blok;
       int got_a_new_block;
  -    root_pool *thread_root;
   #endif
   };
   
  @@ -979,7 +953,6 @@
       char *strp;
       int size;
   
  -    ps.thread_root = p->thread_root;
       ps.blok = p->last;
       ps.vbuff.curpos = ps.blok->h.first_avail;
       ps.vbuff.endpos = ps.blok->h.endp - 1;   /* save one for NUL */
  
  
  

Reply via email to