Hey folks,

The attached patch set implements block map support for sgen. It uses a
schema similar to boehm's, which is a 2 level sparse map.
Under 64 bits it uses hashing.

I benchmarked a modified binary-trees without valuetypes. Block maps gives a
very modest speedup under major-copying (about 2%) and
nothing under major-marksweep. I've only used the block map for
major_copy_or_mark_object thou. There are probably other places it
oould be used too.

The design is basically the same as boehm's except for a few things:

-It doesn't store list heads or address on each segment. This allows
segment's size to be a power of 2; and
-LOS is handled by filling all covering slots with its block instead of
using forwarding

Few notes:

Segments are not deallocated since this requires either scanning whole
segments on each deallocation or keeping block counts.
And it's probably not needed since Boehm doesn't do it. It's doable as long
as the block map is only read during GC and mutated
with the gc lock held.

64bits support has not been committed since it is a minor change to the code
in sgen-gc.c and I want to have the current change set
validated first.

A small config option that uses either a 3 level map or just hashing under
32bits can be done with ease.

The embedding of Block in MSBlockInfo wastes a word of memory. This could be
worked out by either factoring Block::role into a separate
struct or by using Block::next in place of MSBlockInfo::next.

Cheers,
Rodrigo
diff --git a/mono/metadata/ChangeLog b/mono/metadata/ChangeLog
index 50972b9..8226c9a 100644
--- a/mono/metadata/ChangeLog
+++ b/mono/metadata/ChangeLog
@@ -1,3 +1,10 @@
+2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
+
+	* sgen-gc.c: Introduce two new kinds of memory holes for large objects.
+
+	* sgen-gc.c: Make LOSObject have a struct Block header. Change
+	code to use Block::next instead of LOSObject::next.
+
 2010-05-23  Zoltan Varga  <var...@gmail.com>
 
 	* marshal.c (free_wrapper): New helper function to free dynamic wrappers.
diff --git a/mono/metadata/sgen-gc.c b/mono/metadata/sgen-gc.c
index 1852431..48b8f10 100644
--- a/mono/metadata/sgen-gc.c
+++ b/mono/metadata/sgen-gc.c
@@ -366,7 +366,9 @@ mono_gc_flush_info (void)
 enum {
 	MEMORY_ROLE_GEN0,
 	MEMORY_ROLE_GEN1,
-	MEMORY_ROLE_PINNED
+	MEMORY_ROLE_PINNED,
+	MEMORY_ROLE_LOS_GEN1,
+	MEMORY_ROLE_SINGLE_LOS
 };
 
 typedef struct _Block Block;
@@ -413,9 +415,8 @@ struct _GCMemSection {
  */
 typedef struct _LOSObject LOSObject;
 struct _LOSObject {
-	LOSObject *next;
+	Block block;
 	mword size; /* this is the object size */
-	guint16 role;
 	int dummy; /* to have a sizeof (LOSObject) a multiple of ALLOC_ALIGN  and data starting at same alignment */
 	char data [MONO_ZERO_LEN_ARRAY];
 };
@@ -1718,7 +1719,7 @@ mono_gc_scan_for_specific_ref (MonoObject *key)
 
 	major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)scan_object_for_specific_ref_callback, key);
 
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next)
 		scan_object_for_specific_ref (bigobj->data, key);
 
 	scan_roots_for_specific_ref (key, ROOT_TYPE_NORMAL);
@@ -1864,7 +1865,7 @@ check_for_xdomain_refs (void)
 
 	major_iterate_objects (TRUE, TRUE, scan_pinned_object_for_xdomain_refs_callback, NULL);
 
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next)
 		scan_object_for_xdomain_refs (bigobj->data);
 }
 
@@ -1955,7 +1956,7 @@ mono_gc_clear_domain (MonoDomain * domain)
 	   dereference a pointer from an object to another object if
 	   the first object is a proxy. */
 	major_iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next)
 		clear_domain_process_object (bigobj->data, domain);
 
 	prev = NULL;
@@ -1963,17 +1964,17 @@ mono_gc_clear_domain (MonoDomain * domain)
 		if (need_remove_object_for_domain (bigobj->data, domain)) {
 			LOSObject *to_free = bigobj;
 			if (prev)
-				prev->next = bigobj->next;
+				prev->block.next = bigobj->block.next;
 			else
-				los_object_list = bigobj->next;
-			bigobj = bigobj->next;
+				los_object_list = bigobj->block.next;
+			bigobj = bigobj->block.next;
 			DEBUG (4, fprintf (gc_debug_file, "Freeing large object %p\n",
 					bigobj->data));
 			free_large_object (to_free);
 			continue;
 		}
 		prev = bigobj;
-		bigobj = bigobj->next;
+		bigobj = bigobj->block.next;
 	}
 	major_iterate_objects (TRUE, FALSE, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
 	major_iterate_objects (FALSE, TRUE, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
@@ -3142,7 +3143,7 @@ dump_heap (const char *type, int num, const char *reason)
 	major_dump_heap ();
 
 	fprintf (heap_dump_file, "<los>\n");
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next)
 		dump_object ((MonoObject*)bigobj->data, FALSE);
 	fprintf (heap_dump_file, "</los>\n");
 
@@ -3429,7 +3430,7 @@ major_do_collection (const char *reason)
 	major_find_pin_queue_start_ends ();
 	/* identify possible pointers to the insize of large objects */
 	DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next) {
 		int start, end;
 		find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &start, &end);
 		if (start != end) {
@@ -3497,16 +3498,16 @@ major_do_collection (const char *reason)
 			LOSObject *to_free;
 			/* not referenced anywhere, so we can free it */
 			if (prevbo)
-				prevbo->next = bigobj->next;
+				prevbo->block.next = bigobj->block.next;
 			else
-				los_object_list = bigobj->next;
+				los_object_list = bigobj->block.next;
 			to_free = bigobj;
-			bigobj = bigobj->next;
+			bigobj = bigobj->block.next;
 			free_large_object (to_free);
 			continue;
 		}
 		prevbo = bigobj;
-		bigobj = bigobj->next;
+		bigobj = bigobj->block.next;
 	}
 
 	major_sweep ();
@@ -3965,7 +3966,9 @@ alloc_large_inner (MonoVTable *vtable, size_t size)
 	*vtslot = vtable;
 	total_alloc += alloc_size;
 	UPDATE_HEAP_BOUNDARIES (obj->data, (char*)obj->data + size);
-	obj->next = los_object_list;
+	obj->block.role = MEMORY_ROLE_SINGLE_LOS;
+	obj->block.next = los_object_list;
+
 	los_object_list = obj;
 	los_memory_usage += size;
 	los_num_objects++;
@@ -6397,7 +6400,7 @@ find_object_for_ptr (char *ptr)
 	if (ptr >= nursery_section->data && ptr < nursery_section->end_data)
 		return find_object_for_ptr_in_area (ptr, nursery_section->data, nursery_section->end_data);
 
-	for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
+	for (bigobj = los_object_list; bigobj; bigobj = bigobj->block.next) {
 		if (ptr >= bigobj->data && ptr < bigobj->data + bigobj->size)
 			return bigobj->data;
 	}
diff --git a/mono/metadata/ChangeLog b/mono/metadata/ChangeLog
index 8226c9a..015b91f 100644
--- a/mono/metadata/ChangeLog
+++ b/mono/metadata/ChangeLog
@@ -1,5 +1,12 @@
 2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
 
+	* sgen-gc.c: New block map API. Maps addresses to Block structs. Use a two
+	level sparse array similar to boehm's block map.
+
+	* sgen-gc.c: Register common memory regions (nursery, pinned, LOS) to block map.
+
+2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
+
 	* sgen-gc.c: Introduce two new kinds of memory holes for large objects.
 
 	* sgen-gc.c: Make LOSObject have a struct Block header. Change
diff --git a/mono/metadata/sgen-gc.c b/mono/metadata/sgen-gc.c
index 48b8f10..15ed6cd 100644
--- a/mono/metadata/sgen-gc.c
+++ b/mono/metadata/sgen-gc.c
@@ -333,6 +333,7 @@ enum {
 	INTERNAL_MEM_MS_TABLES,
 	INTERNAL_MEM_MS_BLOCK_INFO,
 	INTERNAL_MEM_EPHEMERON_LINK,
+	INTERNAL_MEM_BLOCK_MAP_SEGMENT,
 	INTERNAL_MEM_MAX
 };
 
@@ -1045,6 +1046,113 @@ static int mark_ephemerons_in_range (CopyOrMarkObjectFunc copy_func, char *start
 static void clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end);
 static void null_ephemerons_for_domain (MonoDomain *domain);
 
+
+
+/*
+ * ######################################################################
+ * ########  Block Map.
+ * ######################################################################
+ */
+
+/*
+ * Constraints on 32 bits systems:
+ *	level0 + level1 + smallest allocation unit >= 32
+ *
+ * The smallest alloc unit is usually pagesized due to LOS.
+ * Otherwise it would be 16k for Mark&Sweep, 128k for pinned/Copying
+ *
+ * TODO add support for 64 bits systems by using hashing and/or 3 level tables.
+ * TODO alloc segments on an outer loop.
+ * TODO figure out how to release level1 memory (or if it's even desired)
+ */
+#define BITS_PER_WORD (sizeof (mword) * 8)
+#define LEVEL0_BITS 10
+#define LEVEL1_BITS 10
+
+#define LEVEL0_SIZE (1 << LEVEL0_BITS)
+#define LEVEL1_SIZE (1 << LEVEL1_BITS)
+
+#define LEVEL0_SHIFT (BITS_PER_WORD - LEVEL0_BITS)
+#define LEVEL1_SHIFT (BITS_PER_WORD - LEVEL0_BITS - LEVEL1_BITS)
+
+#define LEVEL1_MASK ((1 << LEVEL1_BITS) - 1)
+
+#define SMALL_BITS_SIZE (1 << LEVEL1_SHIFT)
+
+typedef struct {
+	Block *blocks [LEVEL1_SIZE];
+} SgenSegmentInfo;
+
+static SgenSegmentInfo *all_zero_segment;
+static SgenSegmentInfo *block_map [LEVEL0_SIZE];
+
+static mword
+pointer_to_block_index (mword ptr)
+{
+	return (ptr >> LEVEL1_SHIFT) & LEVEL1_MASK;
+}
+
+static mword
+pointer_to_segment_index (mword ptr)
+{
+	return ptr >> LEVEL0_SHIFT;
+}
+
+static SgenSegmentInfo*
+block_map_get_segment (mword ptr)
+{
+	return block_map [pointer_to_segment_index (ptr)];
+}
+
+static Block*
+block_map_find_block (mword ptr)
+{
+	return block_map_get_segment (ptr)->blocks [pointer_to_block_index (ptr)];
+}
+
+/* LOCKING: requires that the GC lock is held */
+static SgenSegmentInfo	*
+block_map_get_or_alloc_segment (mword ptr)
+{
+	mword level0 = pointer_to_segment_index (ptr);
+	SgenSegmentInfo	*segment = block_map [level0];
+
+	if (segment == all_zero_segment)
+		block_map [level0] = segment = get_internal_mem (sizeof (SgenSegmentInfo), INTERNAL_MEM_BLOCK_MAP_SEGMENT);
+
+	return segment;
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+block_map_register_block (Block *block, mword start, mword end)
+{
+	for (; start < end; start += SMALL_BITS_SIZE) {
+		SgenSegmentInfo	*segment = block_map_get_or_alloc_segment (start);
+		mword block_idx = pointer_to_block_index (start);
+		segment->blocks [block_idx] = block;
+	}
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+block_map_deregister_block (mword start, mword end)
+{
+	for (; start < end; start += LEVEL1_SIZE) {
+		SgenSegmentInfo	*segment = block_map_get_segment (start);
+		segment->blocks [pointer_to_block_index (start)] = NULL;
+	}
+}
+
+static void
+block_map_init (void)
+{
+	int i;
+	all_zero_segment = get_internal_mem (sizeof (SgenSegmentInfo), INTERNAL_MEM_BLOCK_MAP_SEGMENT);
+	for (i = 0; i < LEVEL0_SIZE; ++i)
+		block_map [i] = all_zero_segment;
+}
+
 //#define BINARY_PROTOCOL
 #include "sgen-protocol.c"
 #include "sgen-pinning.c"
@@ -2760,6 +2868,8 @@ alloc_nursery (void)
 
 	nursery_section = section;
 
+	block_map_register_block (&section->block, (mword)section->data, (mword)section->end_data);
+
 	/* Setup the single first large fragment */
 	frag = alloc_fragment ();
 	frag->fragment_start = nursery_start;
@@ -3114,7 +3224,7 @@ dump_heap (const char *type, int num, const char *reason)
 						     "fin-table", "finalize-entry", "dislink-table",
 						     "dislink", "roots-table", "root-record", "statistics",
 						     "remset", "gray-queue", "store-remset", "marksweep-tables",
-						     "marksweep-block-info", "ephemeron-link" };
+						     "marksweep-block-info", "ephemeron-link", "block-map-segments" };
 
 	ObjectList *list;
 	LOSObject *bigobj;
@@ -3768,6 +3878,10 @@ alloc_pinned_chunk (void)
 	chunk->page_sizes [0] = PINNED_FIRST_SLOT_SIZE;
 	build_freelist (chunk, slot_for_size (PINNED_FIRST_SLOT_SIZE), PINNED_FIRST_SLOT_SIZE, chunk->start_data, ((char*)chunk + FREELIST_PAGESIZE));
 	DEBUG (4, fprintf (gc_debug_file, "Allocated pinned chunk %p, size: %d\n", chunk, size));
+
+	/*FIXME where are pinned chucks released? */
+	block_map_register_block (&chunk->block, (mword)chunk->start_data, (mword)((char*)chunk + size));
+
 	return chunk;
 }
 
@@ -3901,6 +4015,8 @@ free_large_object (LOSObject *obj)
 	size += sizeof (LOSObject);
 	size += pagesize - 1;
 	size &= ~(pagesize - 1);
+	block_map_deregister_block ((mword)obj->data, (mword)((char*)obj->data + size));
+
 	total_alloc -= size;
 	los_num_objects--;
 	free_os_memory (obj, size);
@@ -3969,6 +4085,7 @@ alloc_large_inner (MonoVTable *vtable, size_t size)
 	obj->block.role = MEMORY_ROLE_SINGLE_LOS;
 	obj->block.next = los_object_list;
 
+	block_map_register_block (&obj->block, (mword)obj->data, (mword)((char*)obj->data + size));
 	los_object_list = obj;
 	los_memory_usage += size;
 	los_num_objects++;
@@ -5840,7 +5957,7 @@ scan_from_remsets (void *start_nursery, void *end_nursery)
 		DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
 		store_pos = remset->data;
 		for (p = remset->data; p < remset->store_next; p = next_p) {
-			void **ptr = p [0];
+			void **ptr = (void**)p [0];
 
 			/*Ignore previously processed remset.*/
 			if (!global_remset_location_was_not_added (ptr)) {
@@ -7222,6 +7339,8 @@ mono_gc_base_init (void)
 	pthread_key_create (&thread_info_key, NULL);
 #endif
 
+	block_map_init ();
+
 	gc_initialized = TRUE;
 	UNLOCK_GC;
 	mono_gc_register_thread (&sinfo);
diff --git a/mono/metadata/ChangeLog b/mono/metadata/ChangeLog
index 015b91f..0a64469 100644
--- a/mono/metadata/ChangeLog
+++ b/mono/metadata/ChangeLog
@@ -1,5 +1,16 @@
 2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
 
+	* sgen-major-copying.c (alloc_major_section): Register with block map.
+
+	* sgen-major-copying.c (major_copy_or_mark_object): Use block map to figure out
+	where the object lives.
+
+	* sgen-gc.c: Add some notes regarding major-copying and block map.
+
+	Gives a 2% reduction in execution time on modified binary-tree (no structs).
+
+2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
+
 	* sgen-gc.c: New block map API. Maps addresses to Block structs. Use a two
 	level sparse array similar to boehm's block map.
 
diff --git a/mono/metadata/sgen-gc.c b/mono/metadata/sgen-gc.c
index 15ed6cd..ceab703 100644
--- a/mono/metadata/sgen-gc.c
+++ b/mono/metadata/sgen-gc.c
@@ -364,6 +364,10 @@ mono_gc_flush_info (void)
 
 #define GC_BITS_PER_WORD (sizeof (mword) * 8)
 
+/*
+ * When adding or changing this enum, take care of major_copy_or_mark_object, it depends on
+ * a specific ordering.
+ */
 enum {
 	MEMORY_ROLE_GEN0,
 	MEMORY_ROLE_GEN1,
@@ -396,7 +400,7 @@ struct _GCMemSection {
 	int pin_queue_start;
 	int pin_queue_end;
 	unsigned short num_scan_start;
-	gboolean is_to_space;
+	gboolean is_to_space; /*XXX move me next to block to reduce cache misses since major-copying uses me in major_copy_or_mark_object*/
 };
 
 #define SIZEOF_GC_MEM_SECTION	((sizeof (GCMemSection) + 7) & ~7)
diff --git a/mono/metadata/sgen-major-copying.c b/mono/metadata/sgen-major-copying.c
index 4a6b623..617937b 100644
--- a/mono/metadata/sgen-major-copying.c
+++ b/mono/metadata/sgen-major-copying.c
@@ -116,6 +116,8 @@ alloc_major_section (void)
 	section->block.role = MEMORY_ROLE_GEN1;
 	section->is_to_space = TRUE;
 
+	block_map_register_block (&section->block, (mword)section->data, (mword)section->end_data);
+
 	/* add to the section list */
 	section->block.next = section_list;
 	section_list = section;
@@ -129,6 +131,7 @@ static void
 free_major_section (GCMemSection *section)
 {
 	DEBUG (3, fprintf (gc_debug_file, "Freed major section %p (%p-%p)\n", section, section->data, section->end_data));
+	block_map_deregister_block ((mword)section->data, (mword)section->end_data);
 	free_internal_mem (section->scan_starts, INTERNAL_MEM_SCAN_STARTS);
 	free_os_memory (section, MAJOR_SECTION_SIZE);
 	total_alloc -= MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
@@ -283,9 +286,11 @@ alloc_degraded (MonoVTable *vtable, size_t size)
 static void
 major_copy_or_mark_object (void **obj_slot)
 {
+	unsigned char role;
 	char *forwarded;
 	char *obj = *obj_slot;
 	mword objsize;
+	Block *block;
 
 	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
 
@@ -332,32 +337,21 @@ major_copy_or_mark_object (void **obj_slot)
 		return;
 	}
 
-	if (ptr_in_nursery (obj))
-		goto copy;
 
 	/*
-	 * At this point we know obj is not pinned, not forwarded and
-	 * belongs to 2, 3, 4, or 5.
+	 * At this point we know obj is not pinned and not forwarded.
 	 *
-	 * LOS object (2) are simple, at least until we always follow
-	 * the rule: if objsize > MAX_SMALL_OBJ_SIZE, pin the object
-	 * and return it.  At the end of major collections, we walk
-	 * the los list and if the object is pinned, it is marked,
-	 * otherwise it can be freed.
+	 * If the object belong to pinned or LOS spaces, just pin it and forget.
 	 *
-	 * Pinned chunks (3) and major heap sections (4, 5) both
-	 * reside in blocks, which are always aligned, so once we've
-	 * eliminated LOS objects, we can just access the block and
-	 * see whether it's a pinned chunk or a major heap section.
+	 * If the object belongs to gen1 and in on to_space, ignore it.
 	 */
 
-	objsize = safe_object_get_size ((MonoObject*)obj);
-	objsize += ALLOC_ALIGN - 1;
-	objsize &= ~(ALLOC_ALIGN - 1);
+	block = block_map_find_block ((mword)obj);
+	role = block->role;
+
+	if (role >= MEMORY_ROLE_PINNED) {
+		DEBUG (9, g_assert (!object_is_pinned (obj)));/*It can't be pinned since this was checked before*/
 
-	if (G_UNLIKELY (objsize > MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
-		if (object_is_pinned (obj))
-			return;
 		DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %zd)\n", obj, safe_name (obj), objsize));
 		binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
 		pin_object (obj);
@@ -366,12 +360,7 @@ major_copy_or_mark_object (void **obj_slot)
 		return;
 	}
 
-	/*
-	 * Now we know the object is in a major heap section.  All we
-	 * need to do is check whether it's already in to-space (5) or
-	 * not (4).
-	 */
-	if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
+	if (((GCMemSection*)block)->is_to_space) {
 		DEBUG (9, g_assert (objsize <= MAX_SMALL_OBJ_SIZE));
 		DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
 		HEAVY_STAT (++stat_major_copy_object_failed_to_space);
diff --git a/mono/metadata/ChangeLog b/mono/metadata/ChangeLog
index 0a64469..937d342 100644
--- a/mono/metadata/ChangeLog
+++ b/mono/metadata/ChangeLog
@@ -1,5 +1,16 @@
 2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
 
+	* sgen-marksweep.c (ms_alloc_block): Register with block map.
+
+	* sgen-marksweep.c (major_copy_or_mark_object): Use block map to figure out
+	where the object lives.
+
+	* sgen-marksweep.c (major_sweep): Deregister from the block map;
+
+	Performance is stable on modified binary-tree (no structs). 
+
+2010-05-24 Rodrigo Kumpera  <rkump...@novell.com>
+
 	* sgen-major-copying.c (alloc_major_section): Register with block map.
 
 	* sgen-major-copying.c (major_copy_or_mark_object): Use block map to figure out
diff --git a/mono/metadata/sgen-marksweep.c b/mono/metadata/sgen-marksweep.c
index 7f82131..809a2c7 100644
--- a/mono/metadata/sgen-marksweep.c
+++ b/mono/metadata/sgen-marksweep.c
@@ -25,6 +25,7 @@
 
 typedef struct _MSBlockInfo MSBlockInfo;
 struct _MSBlockInfo {
+	Block block_info; /* FIXME this wastes a full pointer since Block::next is unused */
 	int obj_size;
 	gboolean pinned;
 	gboolean has_references;
@@ -243,10 +244,13 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
 	info->pinned = pinned;
 	info->has_references = has_references;
 	info->block = ms_get_empty_block ();
+	info->block_info.role = MEMORY_ROLE_GEN1;
 
 	header = (MSBlockHeader*) info->block;
 	header->info = info;
 
+	block_map_register_block (&info->block_info, (mword)info->block, (mword)(info->block + MS_BLOCK_SIZE));
+
 	/* build free list */
 	obj_start = info->block + MS_BLOCK_SKIP;
 	info->free_list = (void**)obj_start;
@@ -493,13 +497,16 @@ major_copy_or_mark_object (void **ptr)
 	void *obj = *ptr;
 	mword objsize;
 	MSBlockInfo *block;
+	Block *mblock = block_map_find_block ((mword)obj);
+	unsigned char role = mblock->role;
+
 
 	HEAVY_STAT (++stat_copy_object_called_major);
 
 	DEBUG (9, g_assert (obj));
 	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));
 
-	if (ptr_in_nursery (obj)) {
+	if (role == MEMORY_ROLE_GEN0) {
 		int word, bit;
 		char *forwarded;
 
@@ -528,11 +535,7 @@ major_copy_or_mark_object (void **ptr)
 		return;
 	}
 
-	objsize = safe_object_get_size ((MonoObject*)obj);
-	objsize += ALLOC_ALIGN - 1;
-	objsize &= ~(ALLOC_ALIGN - 1);
-
-	if (objsize > MAX_SMALL_OBJ_SIZE) {
+	if (role >= MEMORY_ROLE_LOS_GEN1) {
 		if (object_is_pinned (obj))
 			return;
 		binary_protocol_pin (obj, (gpointer)LOAD_VTABLE (obj), safe_object_get_size ((MonoObject*)obj));
@@ -542,7 +545,7 @@ major_copy_or_mark_object (void **ptr)
 		return;
 	}
 
-	block = MS_BLOCK_FOR_OBJ (obj);
+	block = (MSBlockInfo*)mblock;
 	MS_MARK_OBJECT_AND_ENQUEUE (obj, block);
 }
 
@@ -634,6 +637,7 @@ major_sweep (void)
 			 */
 			*iter = block->next;
 
+			block_map_deregister_block ((mword)block->block, (mword)(block->block + MS_BLOCK_SIZE));
 			ms_free_block (block->block);
 			free_internal_mem (block, INTERNAL_MEM_MS_BLOCK_INFO);
 
_______________________________________________
Mono-devel-list mailing list
Mono-devel-list@lists.ximian.com
http://lists.ximian.com/mailman/listinfo/mono-devel-list

Reply via email to