PatchSet 4071 
Date: 2003/09/28 19:47:40
Author: hkraemer
Branch: HEAD
Tag: (none) 
Log:
fixes and improvement for the garbage collector

* fixed the endless loop in startGC when running javalayer 0.3.0
* (hopefully) made the heap management more efficient

Members: 
        ChangeLog:1.1666->1.1667 
        kaffe/kaffevm/mem/gc-incremental.c:1.67->1.68 
        kaffe/kaffevm/mem/gc-mem.c:1.46->1.47 
        kaffe/kaffevm/mem/gc-mem.h:1.16->1.17 

Index: kaffe/ChangeLog
diff -u kaffe/ChangeLog:1.1666 kaffe/ChangeLog:1.1667
--- kaffe/ChangeLog:1.1666      Sat Sep 27 12:37:46 2003
+++ kaffe/ChangeLog     Sun Sep 28 19:47:40 2003
@@ -1,3 +1,25 @@
+2003-09-28  Helmer Kraemer  <[EMAIL PROTECTED]>
+
+       * kaffe/kaffevm/mem/gc-incremental.c:
+       (startGC, finaliserMan) properly deal with objects on the finaliser
+       list when starting a gc pass (fixes endless loop)
+       (createGC) initialise the heap and reserve primitive pages for OOM
+       handling
+       
+       * kaffe/kaffevm/gc-mem.h: (struct gc_block) added pnext and pprev
+       fields for management of primitive blocks; removed inuse field
+       (GCBLOCKINUSE) new macro to test whether a gc_block is used
+
+       replaced all uses of the inuse field by calls to the GCBLOCKINUSE
+       macro
+
+       * kaffe/kaffevm/gc-mem.c: (gc_get_prim_freelist, gc_add_to_prim_freelist,
+       gc_remove_from_prim_freelist, gc_merge_with_successor) new helper
+       methods for management of primitive blocks
+       (gc_primitive_alloc, gc_primitive_free) manage primitive blocks
+       using a best fit algorithm
+       (gc_heap_grow) don't forget to lock the gc_heap_lock 
+       
 2003-09-27  Guilhem Lavaux <[EMAIL PROTECTED]>
 
        * libraries/javalib/kjc.jar: Fix for path method invocation.
Index: kaffe/kaffe/kaffevm/mem/gc-incremental.c
diff -u kaffe/kaffe/kaffevm/mem/gc-incremental.c:1.67 
kaffe/kaffe/kaffevm/mem/gc-incremental.c:1.68
--- kaffe/kaffe/kaffevm/mem/gc-incremental.c:1.67       Fri Aug 22 11:42:15 2003
+++ kaffe/kaffe/kaffevm/mem/gc-incremental.c    Sun Sep 28 19:47:41 2003
@@ -228,7 +228,7 @@
        uintp p = (uintp) UTOMEM(unit) - gc_heap_base;
        int idx;
 
-       if (!(p & (MEMALIGN - 1)) && p < gc_heap_range && info->inuse) {
+       if (!(p & (MEMALIGN - 1)) && p < gc_heap_range && GCBLOCKINUSE(info)) {
                /* Make sure 'unit' refers to the beginning of an
                 * object.  We do this by making sure it is correctly
                 * aligned within the block.
@@ -246,7 +246,7 @@
 static inline void
 markObjectDontCheck(gc_unit *unit, gc_block *info, int idx)
 {
-       /* If object's been traced before, don't do it again */
+       /* If the object has been traced before, don't do it again. */
        if (GC_GET_COLOUR(info, idx) != GC_COLOUR_WHITE) {
                return;
        }
@@ -382,15 +382,15 @@
        }
 
        info = GCMEM2BLOCK(mem);
-       if (!info->inuse) {
+       if (!GCBLOCKINUSE(info)) {
                /* go down block list to find out whether we were hitting
                 * in a large object
                 */
-               while (!info->inuse && (uintp)info > (uintp)gc_block_base) {
+               while (!GCBLOCKINUSE(info) && (uintp)info > (uintp)gc_block_base) {
                        info--;
                }
                /* must be a large block, hence nr must be 1 */
-               if (!info->inuse || info->nr != 1) {
+               if (!GCBLOCKINUSE(info) || info->nr != 1) {
                        return (0);
                }
        }
@@ -663,6 +663,8 @@
 startGC(Collector *gcif)
 {
        gc_unit* unit;
+       gc_block* info;
+       int idx;
 
        gcStats.freedmem = 0;
        gcStats.freedobj = 0;
@@ -685,10 +687,25 @@
        /* measure time */
        startTiming(&gc_time, "gctime-scan");
 
-       /* Walk all objects on the finalizer list */
+       /*
+        * Since objects whose finaliser has to be run need to
+        * be kept alive, we have to mark them here. They will
+        * be put back into the finalise list later on during
+        * the gc pass.
+        *
+        * Since these objects are treated like garbage, we have
+        * to set their colour to white before marking them.
+        */
        while (gclists[finalise].cnext != &gclists[finalise]) {
                unit = gclists[finalise].cnext;
-               gcMarkObject(gcif, UTOMEM(unit));
+               info = GCMEM2BLOCK(unit);
+               idx = GCMEM2IDX(info, unit);
+
+               GC_SET_COLOUR (info, idx, GC_COLOUR_WHITE);
+               gcStats.finalobj -= 1;
+               gcStats.finalmem -= GCBLOCKSIZE(info);
+
+               markObjectDontCheck(unit, info, idx); 
        }
 
        (*walkRootSet)(gcif);
@@ -851,27 +868,52 @@
                }
                assert(finalRunning == true);
 
+               /*
+                * Loop until the list of objects whose finaliser needs to be run is 
empty
+                * [ checking the condition without holding a lock is ok, since we're 
the only
+                * thread removing elements from the list (the list can never shrink 
during
+                * a gc pass) ].
+                *
+                * According to the spec, the finalisers have to be run without any 
user
+                * visible locks held. Therefore, we must temporarily release the 
finman
+                * lock and may not hold the gc_lock while running the finalisers as 
they
+                * are exposed to the user by java.lang.Runtime.
+                * 
+                * In addition, we must prevent an object and everything it references 
from
+                * being collected while the finaliser is run (since we can't hold the 
gc_lock,
+                * there may be several gc passes in the meantime). To do so, we keep 
the
+                * object in the finalise list and only remove it from there when its
+                * finaliser is done (simply adding the object to the grey list while 
its
+                * finaliser is run only works as long as there's at most one gc pass).
+                *
+                * In order to determine the finaliser of an object, we have to access 
the
+                * gc_block that contains it and its index. Doing this without holding 
a
+                * lock only works as long as both, the gc_blocks and the indices of 
the
+                * objects in a gc_block, are constant.
+                */
                while (gclists[finalise].cnext != &gclists[finalise]) {
-                       lockStaticMutex(&gc_lock);
                        unit = gclists[finalise].cnext;
-                       UREMOVELIST(unit);
-                       UAPPENDLIST(gclists[grey], unit);
-
                        info = GCMEM2BLOCK(unit);
                        idx = GCMEM2IDX(info, unit);
+
+                       /* Call finaliser */
+                       unlockStaticMutex(&finman);
+                       (*gcFunctions[GC_GET_FUNCS(info,idx)].final)(gcif, 
UTOMEM(unit));
+                       lockStaticMutex(&finman);
+
+                       /* and remove unit from the finaliser list */
+                       lockStaticMutex(&gc_lock);
+                       UREMOVELIST(unit);
+                       UAPPENDLIST(gclists[nofin_white], unit);
+
                        gcStats.finalmem -= GCBLOCKSIZE(info);
                        gcStats.finalobj -= 1;
 
                        assert(GC_GET_STATE(info,idx) == GC_STATE_INFINALIZE);
                        /* Objects are only finalised once */
                        GC_SET_STATE(info, idx, GC_STATE_FINALIZED);
-                       GC_SET_COLOUR(info, idx, GC_COLOUR_GREY);
+                       GC_SET_COLOUR(info, idx, GC_COLOUR_WHITE);
                        unlockStaticMutex(&gc_lock);
-
-                       /* Call finaliser */
-                       unlockStaticMutex(&finman);
-                       (*gcFunctions[GC_GET_FUNCS(info,idx)].final)(gcif, 
UTOMEM(unit));
-                       lockStaticMutex(&finman);
                }
 
                /* Wake up anyone waiting for the finalizer to finish */
@@ -999,6 +1041,10 @@
 
                        case 2:
                                /* Grow the heap */
+                               DBG (GCSYSALLOC, dprintf ("growing heap by %u bytes of 
type %s (%2.1f%% free)\n", 
+                                                         size, 
gcFunctions[fidx].description,
+                                                         (1.0 - (gcStats.totalmem / 
(double)gc_heap_total)) * 100.0); )
+                               
                                gc_heap_grow(size);
                                break;
 
@@ -1068,9 +1114,6 @@
                        UAPPENDLIST(gclists[nofin_white], unit);
                }
        }
-       if (!reserve) {
-               reserve = gc_primitive_reserve();
-       }
 
        /* It is not safe to allocate java objects the first time
         * gcMalloc is called, but it should be safe after gcEnable
@@ -1163,6 +1206,8 @@
        idx = GCMEM2IDX(info, unit);
        osize = GCBLOCKSIZE(info) - sizeof(gc_unit);
 
+       assert(GC_GET_FUNCS(info, idx) == fidx);
+
        /* Can only handled fixed objects at the moment */
        assert(GC_GET_COLOUR(info, idx) == GC_COLOUR_FIXED);
        info = 0;
@@ -1398,6 +1443,10 @@
        URESETLIST(gclists[fin_black]);
        URESETLIST(gclists[finalise]);
        gc_obj.collector.ops = &GC_Ops;
+
+       gc_heap_initialise ();
+       reserve = gc_primitive_reserve ();
+
        return (&gc_obj.collector);
 }
 
Index: kaffe/kaffe/kaffevm/mem/gc-mem.c
diff -u kaffe/kaffe/kaffevm/mem/gc-mem.c:1.46 kaffe/kaffe/kaffevm/mem/gc-mem.c:1.47
--- kaffe/kaffe/kaffevm/mem/gc-mem.c:1.46       Fri Aug 22 11:42:15 2003
+++ kaffe/kaffe/kaffevm/mem/gc-mem.c    Sun Sep 28 19:47:41 2003
@@ -42,10 +42,6 @@
 static gc_block* gc_primitive_alloc(size_t);
 void gc_primitive_free(gc_block*);
 
-uintp gc_heap_base;
-uintp gc_block_base;
-uintp gc_heap_range;
-
 /**
  * A preallocated block for small objects.
  *
@@ -98,13 +94,14 @@
 } sztable[MAX_SMALL_OBJECT_SIZE+1];
 static int max_freelist;
 
-static gc_block* gc_prim_freelist;
 static size_t max_small_object_size;
 
 size_t gc_heap_total;          /* current size of the heap */
 size_t gc_heap_allocation_size;        /* amount of memory by which to grow heap */
 size_t gc_heap_initial_size;   /* amount of memory to initially allocate */
 size_t gc_heap_limit;          /* maximum size to which heap should grow */
+uintp gc_heap_base;
+uintp gc_heap_range;
 
 #ifndef gc_pgsize
 size_t gc_pgsize;
@@ -133,6 +130,7 @@
                totalsmallobjs, totalslack, totalslack/(double)totalsmallobjs);
 }
 
+
 /*
  * check whether the heap is still in a consistent state
  */
@@ -148,7 +146,7 @@
                } else {
                        gc_freeobj* mem = blk->free;
 
-                       assert(blk->inuse);
+                       assert(GCBLOCKINUSE(blk));
                        assert(blk->avail < blk->nr);
                        assert(blk->funcs == (uint8*)GCBLOCK2BASE(blk));
                        assert(blk->state == (uint8*)(blk->funcs + blk->nr));
@@ -167,7 +165,6 @@
 /*
  * Initialise allocator.
  */
-static
 void
 gc_heap_initialise(void)
 {
@@ -288,7 +285,6 @@
 void*
 gc_heap_malloc(size_t sz)
 {
-       static int gc_heap_init = 0;
        size_t lnr;
        gc_freeobj* mem = 0;
        gc_block** mptr;
@@ -296,14 +292,6 @@
        size_t nsz;
        int iLockRoot;
 
-       /* Initialise GC heap first time in - we must assume single threaded
-        * operation here so we can do the lock initialising.
-        */
-       if (gc_heap_init == 0) {
-               gc_heap_initialise();
-               gc_heap_init = 1;
-       }
-
        lockStaticMutex(&gc_heap_lock);
 
 DBG(SLACKANAL,
@@ -466,7 +454,7 @@
        }
        else {
                /* Calculate true size of block */
-               msz = info->size + ROUNDUPALIGN(1);
+               msz = info->size + 2 + ROUNDUPALIGN(1);
                msz = ROUNDUPPAGESIZE(msz);
                info->size = msz;
                gc_primitive_free(info);
@@ -578,9 +566,30 @@
 /*
  * Primitive block management:  Allocating and freeing whole pages.
  *
- * Unused pages may be marked unreadable.  This is only done when
- * compiled with KAFFE_VMDEBUG.
- */ 
+ * Each primitive block of the heap consists of one or more contiguous
+ * pages. Pages of unused primitive blocks are marked unreadable when
+ * kaffe is compiled with debugging enabled. Whether a block is in use
+ * can be determined by its nr field: when it's in use, its nr field
+ * will be > 0.
+ *
+ * All primitive blocks are chained through their pnext / pprev fields,
+ * no matter whether or not they are in use. This makes the necessary
+ * check for mergable blocks as cheap as possible. Merging small blocks
+ * is necessary so that single unused primitive blocks in the heap are
+ * always as large as possible. The first block in the list is stored
+ * in gc_block_base, the last block in the list is gc_last_block.
+ *
+ * In order to speed up the search for the primitive block that fits
+ * a given allocation request best, small primitive blocks are stored
+ * in several lists (one per size). If no primitive blocks of a given
+ * size are left, a larger one is splitted instead. 
+ */
+#define GC_PRIM_LIST_COUNT 20
+
+uintp gc_block_base;
+static gc_block *gc_last_block;
+static gc_block *gc_prim_freelist[GC_PRIM_LIST_COUNT+1];
+
 
 #ifndef PROT_NONE
 #define PROT_NONE 0
@@ -596,22 +605,68 @@
 #define NO_PROT  PROT_NONE
 #endif
 
-/* Mark this block as in-use */
+/* Mark a primitive block as used */
 static inline void 
 gc_block_add(gc_block *b)
 {
-       b->inuse = 1;
+       b->nr = 1;
        mprotect(GCBLOCK2BASE(b), b->size, ALL_PROT);
 }
 
-/* Mark this block as free */
+/* Mark a primitive block as unused */
 static inline void 
 gc_block_rm(gc_block *b)
 {
-       b->inuse = 0;
+       b->nr = 0;
        mprotect(GCBLOCK2BASE(b), b->size, NO_PROT);
 }
 
+/* return the prim list blk belongs to */
+static inline gc_block **
+gc_get_prim_freelist (gc_block *blk)
+{
+       size_t sz = blk->size >> gc_pgbits;
+
+       if (sz <= GC_PRIM_LIST_COUNT)
+       {
+               return &gc_prim_freelist[sz-1];
+       }
+
+       return &gc_prim_freelist[GC_PRIM_LIST_COUNT];
+}
+
+/* add a primitive block to the correct freelist */
+static inline void
+gc_add_to_prim_freelist(gc_block *blk)
+{
+       gc_block **list = gc_get_prim_freelist (blk);
+
+       /* insert the block int the list, sorting by ascending addresses */
+       while (*list && blk > *list)
+       {
+               list = &(*list)->next;
+       }
+
+       if (*list) {
+               (*list)->free = (gc_freeobj *)&blk->next;
+       }
+
+       blk->next = *list;
+       *list = blk;
+       blk->free = (gc_freeobj *)list;
+}
+
+/* remove a primitive block from its freelist */
+static inline void
+gc_remove_from_prim_freelist(gc_block *blk)
+{
+       *( (gc_block **) blk->free ) = blk->next;
+
+       if (blk->next) {
+               blk->next->free = blk->free;
+       }
+}
+ 
 /*
  * Allocate a block of memory from the free list or, failing that, the
  * system pool.
@@ -620,127 +675,165 @@
 gc_block*
 gc_primitive_alloc(size_t sz)
 {
-       gc_block* ptr;
-       gc_block** pptr;
+       size_t diff = 0;
+       gc_block* best_fit = NULL;
+       size_t i = sz >> gc_pgbits;
 
        assert(sz % gc_pgsize == 0);
 
-       for (pptr = &gc_prim_freelist; *pptr != 0; pptr = &ptr->next) {
-               ptr = *pptr;
-               /* First fit */
-               if (sz <= ptr->size) {
-                       size_t left;
-                       /* If there's more than a page left, split it */
-                       left = ptr->size - sz;
-                       if (left >= gc_pgsize) {
-                               gc_block* nptr;
-
-                               ptr->size = sz;
-                               nptr = GCBLOCKEND(ptr);
-                               nptr->size = left;
+       DBG(GCPRIM, dprintf("\ngc_primitive_alloc: got to allocate 0x%x bytes\n", sz); 
)
 
-                               DBG(GCDIAG, nptr->magic = GC_MAGIC);
+       /* try freelists for small primitive blocks first */
+       if (i <= GC_PRIM_LIST_COUNT) {
+               for (i-=1; i<GC_PRIM_LIST_COUNT; i++) {
+                       if (gc_prim_freelist[i]) {
+                               best_fit = gc_prim_freelist[i]; 
+                               diff = gc_prim_freelist[i]->size - sz;
+                               break;
+                       }
+               }
+       }
 
-                               nptr->next = ptr->next;
-                               ptr->next = nptr;
+       /* if that fails, try the big remaining list */
+       if (!best_fit) {
+               gc_block *ptr;
+               for (ptr = gc_prim_freelist[GC_PRIM_LIST_COUNT]; ptr != 0; 
ptr=ptr->next) {
+
+                       /* Best fit */
+                       if (sz == ptr->size) {
+                               diff = 0;
+                               best_fit = ptr;
+                               break;
+                       } else if (sz < ptr->size) {
+                               size_t left = ptr->size - sz;
+               
+                               if (best_fit==NULL || left<diff) {
+                                       diff = left;
+                                       best_fit = ptr;
+                               }               
                        }
-                       *pptr = ptr->next;
-DBG(GCPRIM,            dprintf("gc_primitive_alloc: %d bytes from freelist @ %p\n", 
ptr->size, ptr); )
-                       gc_block_add(ptr);
-                       return (ptr);
                }
        }
 
+       /* if we found a block, remove it from the list and check if splitting is 
necessary */
+       if (best_fit) {
+               gc_remove_from_prim_freelist (best_fit);
+
+               DBG(GCPRIM, dprintf ("gc_primitive_alloc: found best_fit %p diff 0x%x 
(0x%x - 0x%x)\n",
+                                    best_fit, diff, best_fit->size, sz); )
+               assert ( diff % gc_pgsize == 0 );
+
+               if (diff > 0) {
+                       gc_block *nptr;
+
+                       best_fit->size = sz;
+               
+                       nptr = GCBLOCKEND(best_fit);
+                       nptr->size = diff;
+                       gc_block_rm (nptr);
+
+                       DBG(GCPRIM, dprintf ("gc_primitive_alloc: splitted remaining 
0x%x bytes @ %p\n", diff, nptr); )
+
+                       DBG(GCDIAG, nptr->magic = GC_MAGIC);
+
+                       /* maintain list of primitive blocks */
+                       nptr->pnext = best_fit->pnext;
+                       nptr->pprev = best_fit;
+
+                       best_fit->pnext = nptr;
+
+                       if (nptr->pnext) {
+                               nptr->pnext->pprev = nptr;
+                       } else {
+                               gc_last_block = nptr;
+                       }
+
+                       /* and add nptr to one of the freelists */
+                       gc_add_to_prim_freelist (nptr);
+               }
+
+DBG(GCPRIM,    dprintf("gc_primitive_alloc: 0x%x bytes from freelist @ %p\n", 
best_fit->size, best_fit); )
+               gc_block_add(best_fit);
+               return (best_fit);
+       }
+DBG(GCPRIM,    dprintf("gc_primitive_alloc: no suitable block found!\n"); )
+
        /* Nothing found on free list */
        return (0);
 }
 
 /*
+ * merge a primitive block with its successor.
+ */
+static inline void
+gc_merge_with_successor (gc_block *blk)
+{
+       gc_block *next_blk = blk->pnext;
+
+       assert (next_blk);
+
+       blk->size += next_blk->size;
+       blk->pnext = next_blk->pnext;
+
+       /*
+        * if the merged block has a successor, update its pprev field.
+        * otherwise, the merged block is the last block in the primitive
+        * chain.
+        */
+       if (blk->pnext) {
+               blk->pnext->pprev = blk;
+       } else {
+               gc_last_block = blk;
+       }
+}
+
+
+/*
  * Return a block of memory to the free list.
  */
 void
 gc_primitive_free(gc_block* mem)
 {
-       gc_block* lptr;
-       gc_block* nptr;
+       gc_block *blk;
 
        assert(mem->size % gc_pgsize == 0);
 
        /* Remove from object hash */
        gc_block_rm(mem);
-       mem->next = 0;
 
-       if(mem < gc_prim_freelist || gc_prim_freelist == 0) {
-               /* If this block is directly before the first block on the
-                * freelist, merge it into that block.  Otherwise just
-                * attached it to the beginning.
-                */
-               if (GCBLOCKEND(mem) == gc_prim_freelist) {
-DBG(GCPRIM,    dprintf("gc_primitive_free: Merging (%d,%p) beginning of freelist\n", 
mem->size, mem); )
-                       mem->size += gc_prim_freelist->size;
-                       mem->next = gc_prim_freelist->next;
-               }
-               else {
-DBG(GCPRIM,    dprintf("gc_primitive_free: Prepending (%d,%p) beginning of 
freelist\n", mem->size, mem); )
-                       mem->next = gc_prim_freelist;
-               }
-               gc_prim_freelist = mem;
-               return;
-       }
+       DBG(GCPRIM, dprintf ("\ngc_primitive_free: freeing block %p (%x bytes, %x)\n", 
mem, mem->size, mem->size >> gc_pgbits); )
 
-       /* Search the freelist for the logical place to put this block */
-       lptr = gc_prim_freelist;
-       while (lptr->next != 0) {
-               nptr = lptr->next;
-               if (mem > lptr && mem < nptr) {
-                       /* Block goes here in the logical scheme of things.
-                        * Work out how to merge it with those which come
-                        * before and after.
-                        */
-                       if (GCBLOCKEND(lptr) == mem) {
-                               if (GCBLOCKEND(mem) == nptr) {
-                                       /* Merge with last and next */
-DBG(GCPRIM,                            dprintf("gc_primitive_free: Merging (%d,%p) 
into list\n", mem->size, mem); )
-                                       lptr->size += mem->size + nptr->size;
-                                       lptr->next = nptr->next;
-                               }
-                               else {
-                                       /* Merge with last but not next */
-DBG(GCPRIM,                            dprintf("gc_primitive_free: Merging (%d,%p) 
with last in list\n", mem->size, mem); )
-                                       lptr->size += mem->size;
-                               }
-                       }
-                       else {
-                               if (GCBLOCKEND(mem) == nptr) {
-                                       /* Merge with next but not last */
-DBG(GCPRIM,                            dprintf("gc_primitive_free: Merging (%d,%p) 
with next in list\n", mem->size, mem); )
-                                       mem->size += nptr->size;
-                                       mem->next = nptr->next;
-                                       lptr->next = mem;
-                               }
-                               else {
-                                       /* Wont merge with either */
-DBG(GCPRIM,                            dprintf("gc_primitive_free: Inserting (%d,%p) 
into list\n", mem->size, mem); )
-                                       mem->next = nptr;
-                                       lptr->next = mem;
-                               }
-                       }
-                       return;
-               }
-               lptr = nptr;
-       }
-
-       /* If 'mem' goes directly after the last block, merge it in.
-        * Otherwise, just add in onto the list at the end.
+       /*
+        * Test whether this block is mergable with its successor.
+        * We need to do the GCBLOCKEND check, since the heap may not be a continuous
+        * memory area and thus two consecutive blocks need not be mergable. 
         */
-       if (GCBLOCKEND(lptr) == mem) {
-DBG(GCPRIM,    dprintf("gc_primitive_free: Merge (%d,%p) onto last in list\n", 
mem->size, mem); )
-               lptr->size += mem->size;
+       if ((blk=mem->pnext) &&
+           !GCBLOCKINUSE(blk) &&
+           GCBLOCKEND(mem)==blk) {
+               DBG(GCPRIM, dprintf ("gc_primitive_free: merging %p with its successor 
(%p, %u)\n", mem, blk, blk->size);)
+
+               gc_remove_from_prim_freelist(blk);
+
+               gc_merge_with_successor (mem);
        }
-       else {
-DBG(GCPRIM,    dprintf("gc_primitive_free: Append (%d,%p) onto last in list\n", 
mem->size, mem); )
-               lptr->next = mem;
+
+       if ((blk=mem->pprev) &&
+           !GCBLOCKINUSE(blk) &&
+           GCBLOCKEND(blk)==mem) {
+               DBG(GCPRIM, dprintf ("gc_primitive_free: merging %p with its 
predecessor (%p, %u)\n", mem, blk, blk->size); )
+
+               gc_remove_from_prim_freelist(blk);
+
+               mem = blk;
+
+               gc_merge_with_successor (mem);
        }
+
+       gc_add_to_prim_freelist (mem);
+
+       DBG(GCPRIM, dprintf ("gc_primitive_free: added 0x%x bytes @ %p to freelist %u 
@ %p\n", mem->size, mem,
+                            gc_get_prim_freelist(mem)-&gc_prim_freelist[0], 
gc_get_prim_freelist(mem)); )
 }
 
 /*
@@ -851,9 +944,8 @@
        static uintp last_addr;
 
        if (!gc_block_base) {
-               nblocks = (gc_heap_limit>>gc_pgbits);
+               nblocks = (gc_heap_limit+gc_pgsize-1)>>gc_pgbits;
 
-               nblocks += nblocks/4;
                gc_block_base = (uintp) malloc(nblocks * sizeof(gc_block));
                if (!gc_block_base) return 0;
                memset((void *)gc_block_base, 0, nblocks * sizeof(gc_block));
@@ -920,7 +1012,6 @@
                   There should be no gc_block *'s on any stack
                   now. */ 
                if (gc_block_base != old_blocks) {
-                       extern gc_block *gc_prim_freelist;
                        int i;
                        gc_block *b = (void *) gc_block_base;
                        uintp delta = gc_block_base - old_blocks;
@@ -932,7 +1023,8 @@
                        memset(b + onb, 0,
                               (nblocks - onb) * sizeof(gc_block));
 
-                       R(gc_prim_freelist);
+                       for (i = 0; i<=GC_PRIM_LIST_COUNT; i++)
+                               R(gc_prim_freelist[i]);
 
                        for (i = 0; freelist[i].list != (void*)-1; i++) 
                                R(freelist[i].list);
@@ -960,6 +1052,7 @@
 gc_heap_grow(size_t sz)
 {
        gc_block* blk;
+       int iLockRoot;
 
        if (GC_SMALL_OBJECT(sz)) {
                sz = gc_pgsize;
@@ -974,6 +1067,8 @@
 
        assert(sz % gc_pgsize == 0);
 
+       lockStaticMutex(&gc_heap_lock);
+
        if (gc_heap_total == gc_heap_limit) {
                return (0);
        } else if (gc_heap_total + sz > gc_heap_limit) {
@@ -1002,11 +1097,18 @@
        DBG(GCDIAG, blk->magic = GC_MAGIC);
        blk->size = sz;
 
-       /* Attach block to object hash */
-       gc_block_add(blk);
+       /* maintain list of primitive blocks */
+       if (gc_last_block) {
+               gc_last_block->pnext = blk;
+               blk->pprev = gc_last_block;
+       } else {
+               gc_last_block = blk;
+       }
 
        /* Free block into the system */
        gc_primitive_free(blk);
+
+       unlockStaticMutex(&gc_heap_lock);
 
        return (blk);
 }
Index: kaffe/kaffe/kaffevm/mem/gc-mem.h
diff -u kaffe/kaffe/kaffevm/mem/gc-mem.h:1.16 kaffe/kaffe/kaffevm/mem/gc-mem.h:1.17
--- kaffe/kaffe/kaffevm/mem/gc-mem.h:1.16       Fri Jun 20 13:01:08 2003
+++ kaffe/kaffe/kaffevm/mem/gc-mem.h    Sun Sep 28 19:47:41 2003
@@ -72,6 +72,7 @@
 
 /* ------------------------------------------------------------------------ */
 
+extern void    gc_heap_initialise (void);
 extern void*   gc_heap_malloc(size_t);    
 extern void    gc_heap_free(void*);
 
@@ -94,8 +95,8 @@
 #endif
        struct _gc_freeobj*     free;   /* Next free sub-block */
        struct _gc_block*       next;   /* Next block in prim/small freelist */
-       struct _gc_block*       nfree;  /* Next block on sub-freelist */
-       uint32                  inuse;  /* 1bit! Is block allocated? */
+       struct _gc_block*       pnext;  /* next primitive block */
+       struct _gc_block*       pprev;  /* previous primitive block */
        uint32                  size;   /* Size of objects in this block */
        uint16                  nr;     /* Nr of objects in block */
        uint16                  avail;  /* Nr of objects available in block */
@@ -111,9 +112,13 @@
 
 #define        GC_MAGIC                0xD0DECADE
 
-#define GCBLOCK_LIVE           ((gc_block *) -1) /* block->next when alloced*/
-
 #define GC_BLOCKS              ((gc_block *) gc_block_base)
+
+/**
+ * Tests whether a block is in use.
+ *
+ */
+#define GCBLOCKINUSE(B)                ((B)->nr > 0)
 
 /**
  * Evaluates to the array that contains the states of the objects contained in @B.

_______________________________________________
kaffe mailing list
[EMAIL PROTECTED]
http://kaffe.org/cgi-bin/mailman/listinfo/kaffe

Reply via email to