diff -BburN gc_gen_old/src/common/gc_for_vm.cpp gc_gen_new/src/common/gc_for_vm.cpp --- gc_gen_old/src/common/gc_for_vm.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/gc_for_vm.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -150,8 +150,8 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } -//int32 gc_get_hashcode(Managed_Object_Handle p_object) -//{ return 0; } +int32 gc_get_hashcode(Managed_Object_Handle p_object) +{ return 23; } void gc_finalize_on_exit() diff -BburN gc_gen_old/src/common/fix_repointed_refs.h gc_gen_new/src/common/fix_repointed_refs.h --- gc_gen_old/src/common/fix_repointed_refs.h 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/fix_repointed_refs.h 2007-01-10 02:42:23.000000000 +0800 @@ -53,13 +53,13 @@ /* scan array object */ if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + assert(!obj_is_primitive_array(p_obj)); - int32 array_length = vector_get_length((Vector_Handle) array); + int32 array_length = array->array_len; + Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); - slot_fix(p_ref); + slot_fix(p_refs + i); } return; } diff -BburN gc_gen_old/src/common/gc_common.cpp gc_gen_new/src/common/gc_common.cpp --- gc_gen_old/src/common/gc_common.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/gc_common.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -24,6 +24,7 @@ #include "../finalizer_weakref/finalizer_weakref.h" #include "../gen/gen.h" #include "../common/space_tuner.h" +#include "interior_pointer.h" unsigned int Cur_Mark_Bit = 0x1; unsigned int Cur_Forward_Bit = 0x2; @@ -255,6 +256,7 @@ gc_set_obj_with_fin(gc); gc_gen_reclaim_heap((GC_Gen*)gc); + gc_reset_interior_pointer_table(); gc_metadata_verify(gc, FALSE); diff -BburN gc_gen_old/src/common/gc_common.h gc_gen_new/src/common/gc_common.h --- gc_gen_old/src/common/gc_common.h 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/gc_common.h 2007-01-10 02:42:23.000000000 +0800 @@ -69,6 +69,7 @@ /* major collection */ MAJOR_COMPACT_SLIDE, + MAJOR_COMPACT_MOVE }; diff -BburN gc_gen_old/src/common/gc_metadata.cpp gc_gen_new/src/common/gc_metadata.cpp --- gc_gen_old/src/common/gc_metadata.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/gc_metadata.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -196,7 +196,7 @@ /* generational MINOR_COLLECTION doesn't need rootset update, but need reset */ if( gc->collect_kind != MINOR_COLLECTION ) /* MINOR but not forwarding */ gc_update_repointed_sets(gc, metadata->gc_rootset_pool); - + else gc_set_pool_clear(metadata->gc_rootset_pool); #ifndef BUILD_IN_REFERENT diff -BburN gc_gen_old/src/common/interior_pointer.cpp gc_gen_new/src/common/interior_pointer.cpp --- gc_gen_old/src/common/interior_pointer.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/interior_pointer.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -75,5 +75,12 @@ void *new_slot_contents = (void *)((Byte*)root_base + root_offset); *root_slot = new_slot_contents; } + //can not reset the table here, for the rootset may be updated multi times +} + +void gc_reset_interior_pointer_table() +{ interior_pointer_num_count = 0; + //this function is for the case of out of space which need to call update_rootset_interior_pointer multi-times } + diff -BburN gc_gen_old/src/common/interior_pointer.h gc_gen_new/src/common/interior_pointer.h --- gc_gen_old/src/common/interior_pointer.h 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/common/interior_pointer.h 2007-01-10 02:42:23.000000000 +0800 @@ -26,5 +26,6 @@ void add_root_set_entry_interior_pointer(void **slot, int offset, Boolean is_pinned); void gc_copy_interior_pointer_table_to_rootset(); void update_rootset_interior_pointer(); +void gc_reset_interior_pointer_table(); #endif //INTERIOR_POINTER_H diff -BburN gc_gen_old/src/gen/gen.cpp gc_gen_new/src/gen/gen.cpp --- gc_gen_old/src/gen/gen.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/gen/gen.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -267,6 +267,9 @@ if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){ MAJOR_ALGO= MAJOR_COMPACT_SLIDE; + }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){ + MAJOR_ALGO= MAJOR_COMPACT_MOVE; + }else{ printf("\nGC algorithm setting incorrect. Will use default algorithm.\n"); diff -BburN gc_gen_old/src/gen/gen_adapt.cpp gc_gen_new/src/gen/gen_adapt.cpp --- gc_gen_old/src/gen/gen_adapt.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/gen/gen_adapt.cpp 2007-01-10 18:16:00.000000000 +0800 @@ -53,7 +53,7 @@ return res; } -#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*1024*1024) +#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (4*1024*1024) static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) { Blocked_Space* fspace = (Blocked_Space*)gc->nos; diff -BburN gc_gen_old/src/mark_compact/mspace_collect_compact.cpp gc_gen_new/src/mark_compact/mspace_collect_compact.cpp --- gc_gen_old/src/mark_compact/mspace_collect_compact.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/mark_compact/mspace_collect_compact.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -206,8 +206,7 @@ */ /* nos is higher than mos, we cant use nos block for compaction target */ - Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace); - while( cur_target_block < mspace_heap_end ){ + while( cur_target_block ){ //For_LOS_extend //assert( cur_target_block <= collector->cur_compact_block); Block_Header* next_target_block = cur_target_block->next; @@ -266,6 +265,9 @@ }else if (gc->collect_kind == FALLBACK_COLLECTION){ // printf("for Fallback"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + //IS_MOVE_COMPACT = TRUE; + //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); + //IS_MOVE_COMPACT = FALSE; }else{ @@ -274,6 +276,12 @@ collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); break; + case MAJOR_COMPACT_MOVE: + IS_MOVE_COMPACT = TRUE; + collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); + IS_MOVE_COMPACT = FALSE; + break; + default: printf("\nThe speficied major collection algorithm doesn't exist!\n"); exit(0); diff -BburN gc_gen_old/src/mark_compact/mspace_extend_compact.cpp gc_gen_new/src/mark_compact/mspace_extend_compact.cpp --- gc_gen_old/src/mark_compact/mspace_extend_compact.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/mark_compact/mspace_extend_compact.cpp 2007-01-10 03:02:36.000000000 +0800 @@ -15,7 +15,7 @@ */ /** - * @author Xiao-Feng Li, 2006/10/05 + * @author Chunrong Lai, 2006/12/25 */ #include "mspace_collect_compact.h" @@ -25,30 +25,27 @@ #include "../gen/gen.h" #include "../common/fix_repointed_refs.h" #include "../common/interior_pointer.h" +#include "../verify/verify_live_heap.h" -#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT) - -static volatile unsigned int mem_changed_size; static volatile Block *mos_first_new_block = NULL; static volatile Block *nos_first_free_block = NULL; static volatile Block *first_block_to_move = NULL; -static void set_first_and_end_block_to_move(Collector *collector) +static void set_first_and_end_block_to_move(Collector *collector, unsigned int mem_changed_size) { GC_Gen *gc_gen = (GC_Gen *)collector->gc; Mspace *mspace = gc_gen->mos; Fspace *fspace = gc_gen->nos; - assert(mem_changed_size % SPACE_ALLOC_UNIT); + assert (!(mem_changed_size % SPACE_ALLOC_UNIT)); unsigned int mos_added_block_num = mem_changed_size >> GC_BLOCK_SHIFT_COUNT; // block number needing moving - nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; first_block_to_move = nos_first_free_block - mos_added_block_num; if(first_block_to_move < (Block *)space_heap_start((Space *)fspace)) first_block_to_move = (Block *)space_heap_start((Space *)fspace); } -static unsigned int fspace_shrink(Fspace *fspace, Block *nos_first_free_block) +static unsigned int fspace_shrink(Fspace *fspace) { void *committed_nos_end = (void *)((unsigned int)space_heap_start((Space *)fspace) + fspace->committed_heap_size); @@ -77,13 +74,22 @@ return decommit_size; } -static Block *mspace_extend(Mspace *mspace, Fspace *fspace, unsigned int commit_size) +static void link_mspace_extended_blocks(Mspace *mspace, Fspace *fspace) +{ + Block_Header *old_last_mos_block = (Block_Header *)(mos_first_new_block -1); + old_last_mos_block->next = (Block_Header *)mos_first_new_block; + void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); + Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1); + new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)fspace); +} + +static Block *mspace_extend_without_link(Mspace *mspace, unsigned int commit_size) { assert(commit_size && !(commit_size % SPACE_ALLOC_UNIT)); void *committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); void *commit_base = committed_mos_end; - assert((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT); + assert(!((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT)); void *result = vm_commit_mem(commit_base, commit_size); assert(result == commit_base); @@ -99,11 +105,11 @@ for(i=0; block < (Block_Header *)new_end; i++){ block_init(block); block->block_idx = start_idx + i; - last_block->next = block; + if(i != 0) last_block->next = block; last_block = block; block = (Block_Header *)((Block *)block + 1); } - last_block->next = (Block_Header *)space_heap_start((Space *)fspace); + last_block->next = NULL; mspace->ceiling_block_idx = last_block->block_idx; mspace->num_managed_blocks = mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; @@ -133,53 +139,43 @@ return NULL; } -static void mspace_recompute_object_target(Collector *collector) +inline void object_refix_ref_slots(Partial_Reveal_Object* p_obj, void *start_address, void *end_address, unsigned int addr_diff) { - GC_Gen *gc_gen = (GC_Gen *)collector->gc; - Mspace *mspace = gc_gen->mos; - Fspace *fspace = gc_gen->nos; + if( !object_has_ref_field(p_obj) ) return; - unsigned int block_diff = first_block_to_move - mos_first_new_block; - unsigned int addr_diff = block_diff << GC_BLOCK_SHIFT_COUNT; + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + assert(!obj_is_primitive_array(p_obj)); - assert(!collector->rem_set); - collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); - Block_Header *dest_block; - while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){ - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; - Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->new_free; // new_free or free depends on whether reset is done or not - unsigned int dest_addr; + int32 array_length = array->array_len; + Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); - while(p_obj < block_end){ - dest_addr = (unsigned int)p_obj - addr_diff; - Obj_Info_Type obj_info = get_obj_info(p_obj); - if(obj_info != 0){ - collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); - collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); - } - obj_set_fw_in_oi(p_obj, (void *)dest_addr); - p_obj = obj_end(p_obj); + for (int i = 0; i < array_length; i++) { + Partial_Reveal_Object** p_ref = p_refs + i; + Partial_Reveal_Object* p_element = *p_ref; + if((p_element > start_address) && (p_element < end_address)) + *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff); } - dest_block = (Block_Header *)((Block *)block - block_diff); - dest_block->new_free = (void *)dest_addr; + return; } - pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); - collector->rem_set = NULL; - if(first_block_to_move == (Block *)space_heap_start((Space *)fspace)){ - unsigned int free_block_idx = dest_block->block_idx + 1; - unsigned int cur_free_idx = (unsigned int)mspace->free_block_idx; - while(free_block_idx > cur_free_idx){ - atomic_cas32(&mspace->free_block_idx, free_block_idx, cur_free_idx); - cur_free_idx = (unsigned int)mspace->free_block_idx; - } - } else { - mspace->free_block_idx = ((Block_Header *)first_block_to_move)->block_idx; + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + if (p_ref == NULL) break; /* terminating ref slot */ + + Partial_Reveal_Object* p_element = *p_ref; + if((p_element > start_address) && (p_element < end_address)) + *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff); + offset_scanner = offset_next_ref(offset_scanner); } -} + return; +} -static void mspace_refix_repointed_refs(Collector *collector, Mspace *mspace) +static void mspace_refix_repointed_refs(Collector *collector, Mspace* mspace, void *start_address, void *end_address, unsigned int addr_diff) { Block_Header *mspace_first_free_block = (Block_Header *)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; @@ -187,13 +183,62 @@ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->new_free; // new_free or free depends on whether reset is done or not while(p_obj < block_end){ - object_fix_ref_slots(p_obj); + object_refix_ref_slots(p_obj, start_address, end_address, addr_diff); p_obj = obj_end(p_obj); } } } -static void move_compacted_blocks_to_mspace(Collector *collector) +static void lspace_refix_repointed_refs(Collector* collector, Lspace* lspace, void *start_address, void *end_address, unsigned int addr_diff) +{ + unsigned int start_pos = 0; + Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos); + while( p_obj){ + assert(obj_is_marked_in_vt(p_obj)); + object_refix_ref_slots(p_obj, start_address, end_address, addr_diff); + p_obj = lspace_get_next_marked_object(lspace, &start_pos); + } +} + + +static void gc_reupdate_repointed_sets(GC* gc, Pool* pool, void *start_address, void *end_address, unsigned int addr_diff) +{ + GC_Metadata *metadata = gc->metadata; + assert(gc->collect_kind != MINOR_COLLECTION); + + pool_iterator_init(pool); + + while(Vector_Block *root_set = pool_iterator_next(pool)){ + unsigned int *iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object *p_obj = *p_ref; + if((p_obj > start_address) && (p_obj < end_address)) + *p_ref = (Partial_Reveal_Object*)((unsigned int)p_obj - addr_diff); + } + } +} + +static void gc_refix_rootset(Collector *collector, void *start_address, void *end_address, unsigned int addr_diff) +{ + GC *gc = collector->gc; + GC_Metadata *metadata = gc->metadata; + + /* only for MAJOR_COLLECTION and FALLBACK_COLLECTION */ + assert(gc->collect_kind != MINOR_COLLECTION); + + gc_reupdate_repointed_sets(gc, metadata->gc_rootset_pool, start_address, end_address, addr_diff); + +#ifndef BUILD_IN_REFERENT + gc_update_finref_repointed_refs(gc); +#endif + + update_rootset_interior_pointer(); +} + +static void move_compacted_blocks_to_mspace(Collector *collector, unsigned int addr_diff) { GC_Gen *gc_gen = (GC_Gen *)collector->gc; Mspace *mspace = gc_gen->mos; @@ -201,19 +246,61 @@ while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; - Block_Header *dest_block = GC_BLOCK_HEADER(obj_get_fw_in_oi(p_obj)); - void *src_base = (void *)p_obj; + void *src_base = (void *)block->base; void *block_end = block->new_free; // new_free or free depends on whether reset is done or not unsigned int size = (unsigned int)block_end - (unsigned int)src_base; + Block_Header *dest_block = GC_BLOCK_HEADER((void *)((unsigned int)src_base - addr_diff)); memmove(dest_block->base, src_base, size); + dest_block->new_free = (void *)((unsigned int)block_end - addr_diff); + if(verify_live_heap) + while (p_obj < block_end) { + event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((unsigned int)p_obj - addr_diff), collector); + p_obj = obj_end(p_obj); + } } } static volatile unsigned int num_space_changing_collectors = 0; -static volatile unsigned int num_recomputing_collectors = 0; + +#ifndef STATIC_NOS_MAPPING +void mspace_extend_compact(Collector *collector) +{ + GC_Gen *gc_gen = (GC_Gen *)collector->gc; + Mspace *mspace = gc_gen->mos; + Fspace *fspace = gc_gen->nos; + Lspace *lspace = gc_gen->los; + + unsigned int num_active_collectors = gc_gen->num_active_collectors; + unsigned int old_num; + atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1); + old_num = atomic_inc32(&num_space_changing_collectors); + if( ++old_num == num_active_collectors ){ + Block *old_nos_boundary = fspace->blocks; + nos_boundary = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + assert(nos_boundary > old_nos_boundary); + unsigned int mem_change_size = ((Block *)nos_boundary - old_nos_boundary) << GC_BLOCK_SHIFT_COUNT; + fspace->heap_start = nos_boundary; + fspace->blocks = (Block *)nos_boundary; + fspace->committed_heap_size -= mem_change_size; + fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + fspace->num_total_blocks = fspace->num_managed_blocks; + fspace->first_block_idx = ((Block_Header *)nos_boundary)->block_idx; + fspace->free_block_idx = fspace->first_block_idx; + + mspace->heap_end = nos_boundary; + mspace->committed_heap_size += mem_change_size; + mspace->num_managed_blocks = mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + mspace->num_total_blocks = mspace->num_managed_blocks; + mspace->ceiling_block_idx = ((Block_Header *)nos_boundary)->block_idx - 1; + + num_space_changing_collectors ++; + } + while(num_space_changing_collectors != num_active_collectors + 1); +} + +#else static volatile unsigned int num_refixing_collectors = 0; static volatile unsigned int num_moving_collectors = 0; -static volatile unsigned int num_restoring_collectors = 0; void mspace_extend_compact(Collector *collector) { @@ -225,10 +312,8 @@ unsigned int num_active_collectors = gc_gen->num_active_collectors; unsigned int old_num; - assert(gc_gen->tuner->kind == TRANS_NOTHING); - Block *nos_first_block = fspace->blocks; - Block *nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; assert(nos_first_free_block > nos_first_block); while(nos_first_free_block > nos_first_block){ @@ -236,42 +321,28 @@ atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1); old_num = atomic_inc32(&num_space_changing_collectors); if( old_num == 0 ){ - mem_changed_size = fspace_shrink(fspace, nos_first_free_block); - mos_first_new_block = mspace_extend(mspace, fspace, mem_changed_size); - - set_first_and_end_block_to_move(collector); - mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); + unsigned int mem_changed_size = fspace_shrink(fspace); + mos_first_new_block = mspace_extend_without_link(mspace, mem_changed_size); - num_restoring_collectors++; - } - while(num_restoring_collectors != num_active_collectors + 1); - - - atomic_cas32( &num_recomputing_collectors, 0, num_active_collectors+1); - - mspace_recompute_object_target(collector); - - old_num = atomic_inc32(&num_recomputing_collectors); - if( ++old_num == num_active_collectors ){ - /* init the iterator: prepare for refixing */ + set_first_and_end_block_to_move(collector, mem_changed_size); + //mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); mspace_block_iter_init_for_extension(mspace, (Block_Header *)mspace->blocks); - num_recomputing_collectors++; - } - while(num_recomputing_collectors != num_active_collectors + 1); + num_space_changing_collectors++; + } + while(num_space_changing_collectors != num_active_collectors + 1); atomic_cas32( &num_refixing_collectors, 0, num_active_collectors+1); - mspace_refix_repointed_refs(collector, mspace); + mspace_refix_repointed_refs(collector, mspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT); old_num = atomic_inc32(&num_refixing_collectors); if( ++old_num == num_active_collectors ){ /* init the iterator: prepare for refixing */ - lspace_fix_repointed_refs(collector, lspace); - gc_fix_rootset(collector); - + lspace_refix_repointed_refs(collector, lspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT); + gc_refix_rootset(collector, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT); + link_mspace_extended_blocks(mspace, fspace); mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); - num_refixing_collectors++; } while(num_refixing_collectors != num_active_collectors + 1); @@ -279,27 +351,21 @@ atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); - move_compacted_blocks_to_mspace(collector); + move_compacted_blocks_to_mspace(collector, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT); old_num = atomic_inc32(&num_moving_collectors); if( ++old_num == num_active_collectors ){ - /* init the iterator: prepare for refixing */ - lspace_fix_repointed_refs(collector, lspace); - gc_fix_rootset(collector); - + if(first_block_to_move == nos_first_block) { + void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); + Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1); + mspace->free_block_idx = new_last_mos_block->block_idx + 1; + }else{ + mspace->free_block_idx = ((Block_Header*)first_block_to_move)->block_idx; + } + nos_first_free_block =first_block_to_move; num_moving_collectors++; } while(num_moving_collectors != num_active_collectors + 1); - - - atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); - - collector_restore_obj_info(collector); - - atomic_inc32(&num_restoring_collectors); - while(num_restoring_collectors != num_active_collectors); - - - nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; } } +#endif diff -BburN gc_gen_old/src/mark_compact/mspace_move_compact.cpp gc_gen_new/src/mark_compact/mspace_move_compact.cpp --- gc_gen_old/src/mark_compact/mspace_move_compact.cpp 1970-01-01 08:00:00.000000000 +0800 +++ gc_gen_new/src/mark_compact/mspace_move_compact.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -0,0 +1,233 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Chunrong Lai, 2006/12/01 + */ + +#include "mspace_collect_compact.h" +#include "../trace_forward/fspace.h" +#include "../mark_sweep/lspace.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +struct GC_Gen; +Space* gc_get_nos(GC_Gen* gc); +Space* gc_get_mos(GC_Gen* gc); +Space* gc_get_los(GC_Gen* gc); + +#include "../verify/verify_live_heap.h" + +static void mspace_move_objects(Collector* collector, Mspace* mspace) +{ + Block_Header* curr_block = collector->cur_compact_block; + Block_Header* dest_block = collector->cur_target_block; + + void* dest_sector_addr = dest_block->base; + Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION); + + + while( curr_block ){ + void* start_pos; + Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); + + if( !p_obj ){ + curr_block = mspace_get_next_compact_block(collector, mspace); + continue; + } + + int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); + void* src_sector_addr = p_obj; + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */ + obj_unmark_in_oi(p_obj); + + unsigned int curr_sector_size = (unsigned int)start_pos - (unsigned int)src_sector_addr; + + /* check if dest block is not enough to hold this sector. If yes, grab next one */ + unsigned int block_end = (unsigned int)GC_BLOCK_END(dest_block); + if( ((unsigned int)dest_sector_addr + curr_sector_size) > block_end ){ + dest_block->new_free = dest_sector_addr; + dest_block = mspace_get_next_target_block(collector, mspace); + if(dest_block == NULL){ + collector->result = FALSE; + return; + } + block_end = (unsigned int)GC_BLOCK_END(dest_block); + dest_sector_addr = dest_block->base; + } + + assert(((unsigned int)dest_sector_addr + curr_sector_size) <= block_end ); + + /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */ + p_obj = block_get_next_marked_object(curr_block, &start_pos); + if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) + continue; + + /* current sector is done, let's move it. */ + unsigned int sector_distance = (unsigned int)src_sector_addr - (unsigned int)dest_sector_addr; + curr_block->table[curr_sector] = sector_distance; + + if (verify_live_heap) { + Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr; + void *rescan_pos = (Partial_Reveal_Object *)((unsigned int)rescan_obj + vm_object_size(rescan_obj)); + while ((unsigned int)rescan_obj < (unsigned int)src_sector_addr + curr_sector_size) { + Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((unsigned int)rescan_obj- sector_distance); + if(is_fallback) + event_collector_doublemove_obj(rescan_obj, targ_obj, collector); + else + event_collector_move_obj(rescan_obj, targ_obj, collector); + rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); + if(rescan_obj == NULL) break; + } + } + + memmove(dest_sector_addr, src_sector_addr, curr_sector_size); + + dest_sector_addr = (void*)((unsigned int) dest_sector_addr + curr_sector_size); + src_sector_addr = p_obj; + curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); + } + curr_block = mspace_get_next_compact_block(collector, mspace); + } + dest_block->new_free = dest_sector_addr; + + return; +} + +#include "../common/fix_repointed_refs.h" + +static void mspace_fix_repointed_refs(Collector *collector, Mspace *mspace) +{ + Block_Header* curr_block = mspace_block_iterator_next(mspace); + + while( curr_block){ + if(curr_block->block_idx >= mspace->free_block_idx) break; + curr_block->free = curr_block->new_free; // + block_fix_ref_after_marking(curr_block); + curr_block = mspace_block_iterator_next(mspace); + } + + return; +} + +static volatile unsigned int num_marking_collectors = 0; +static volatile unsigned int num_fixing_collectors = 0; +static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_extending_collectors = 0; + +void move_compact_mspace(Collector* collector) +{ + GC* gc = collector->gc; + Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); + Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); + Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); + + unsigned int num_active_collectors = gc->num_active_collectors; + + /* Pass 1: ************************************************** + mark all live objects in heap, and save all the slots that + have references that are going to be repointed */ + unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); + + if(gc->collect_kind != FALLBACK_COLLECTION) + mark_scan_heap(collector); + else + fallback_mark_scan_heap(collector); + + old_num = atomic_inc32(&num_marking_collectors); + if( ++old_num == num_active_collectors ){ + /* last collector's world here */ + /* prepare for next phase */ + gc_init_block_for_collectors(gc, mspace); + + if(!IGNORE_FINREF ) + collector_identify_finref(collector); +#ifndef BUILD_IN_REFERENT + else { + gc_set_weakref_sets(gc); + update_ref_ignore_finref(collector); + } +#endif + + + /* let other collectors go */ + num_marking_collectors++; + } + while(num_marking_collectors != num_active_collectors + 1); + + /* Pass 2: ************************************************** + move object and set the forwarding offset table */ + atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); + + mspace_move_objects(collector, mspace); + + old_num = atomic_inc32(&num_moving_collectors); + if( ++old_num == num_active_collectors ){ + /* single thread world */ + gc->collect_result = gc_collection_result(gc); + if(!gc->collect_result){ + num_moving_collectors++; + return; + } + + gc_reset_block_for_collectors(gc, mspace); + mspace_block_iterator_init(mspace); + num_moving_collectors++; + } + while(num_moving_collectors != num_active_collectors + 1); + if(!gc->collect_result) return; + + /* Pass 3: ************************************************** + update all references whose pointed objects were moved */ + old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); + + mspace_fix_repointed_refs(collector, mspace); + + old_num = atomic_inc32(&num_fixing_collectors); + if( ++old_num == num_active_collectors ){ + /* last collector's world here */ + lspace_fix_repointed_refs(collector, lspace); + gc_fix_rootset(collector); + update_mspace_info_for_los_extension(mspace); + num_fixing_collectors++; + } + while(num_fixing_collectors != num_active_collectors + 1); + + /* Dealing with out of space in mspace */ + if(mspace->free_block_idx > fspace->first_block_idx){ + atomic_cas32( &num_extending_collectors, 0, num_active_collectors); + mspace_extend_compact(collector); + atomic_inc32(&num_extending_collectors); + while(num_extending_collectors != num_active_collectors); + } + + /* Leftover: ************************************************** + */ + if( collector->thread_handle != 0 ) return; + + + if(!IGNORE_FINREF ) + gc_put_finref_to_vm(gc); + + mspace_reset_after_compaction(mspace); + fspace_reset_for_allocation(fspace); + + gc_set_pool_clear(gc->metadata->gc_rootset_pool); + + return; +} diff -BburN gc_gen_old/src/mark_compact/mspace_slide_compact.cpp gc_gen_new/src/mark_compact/mspace_slide_compact.cpp --- gc_gen_old/src/mark_compact/mspace_slide_compact.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/mark_compact/mspace_slide_compact.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -322,8 +322,8 @@ if(verify_live_heap){ /* we forwarded it, we need remember it for verification */ - if(is_fallback && src_block>=nos_fw_start_block && obj_belongs_to_space(p_obj, (Space*)mspace)) - event_collector_move_obj(p_obj, p_target_obj, collector); + if(is_fallback) + event_collector_doublemove_obj(p_obj, p_target_obj, collector); else event_collector_move_obj(p_obj, p_target_obj, collector); } diff -BburN gc_gen_old/src/verify/verify_live_heap.cpp gc_gen_new/src/verify/verify_live_heap.cpp --- gc_gen_old/src/verify/verify_live_heap.cpp 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/verify/verify_live_heap.cpp 2007-01-10 02:42:23.000000000 +0800 @@ -35,3 +35,6 @@ void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector) { return; } +void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector) +{ return; } + diff -BburN gc_gen_old/src/verify/verify_live_heap.h gc_gen_new/src/verify/verify_live_heap.h --- gc_gen_old/src/verify/verify_live_heap.h 2007-01-09 07:04:50.000000000 +0800 +++ gc_gen_new/src/verify/verify_live_heap.h 2007-01-10 02:42:23.000000000 +0800 @@ -29,4 +29,7 @@ void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector); void gc_verify_heap(GC* gc, Boolean is_before_gc); +/* functions used in fall back compaction and the out-of-space cases*/ +void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector); + #endif