Index: vm/gc_gen/src/common/gc_block.h =================================================================== --- vm/gc_gen/src/common/gc_block.h (revision 680279) +++ vm/gc_gen/src/common/gc_block.h (working copy) @@ -209,7 +209,10 @@ while(cur_obj < block_end){ if( obj_is_marked_in_vt(cur_obj)) return cur_obj; - cur_obj = obj_end(cur_obj); + if( obj_vt_is_to_next_obj(cur_obj)) + cur_obj = obj_get_next_obj_from_vt(cur_obj); + else + cur_obj = obj_end(cur_obj); PREFETCH( ((POINTER_SIZE_INT) cur_obj) + 64); } Index: vm/gc_gen/src/common/gc_common.h =================================================================== --- vm/gc_gen/src/common/gc_common.h (revision 680279) +++ vm/gc_gen/src/common/gc_common.h (working copy) @@ -296,6 +296,25 @@ #endif /* MARK_BIT_FLIPPING */ +inline Boolean obj_set_vt_to_next_obj(Partial_Reveal_Object* p_obj,Partial_Reveal_Object* next_obj) +{ + set_obj_info(p_obj, (Obj_Info_Type)-1); + obj_set_vt(p_obj,(VT)((VT_SIZE_INT)(POINTER_SIZE_INT)next_obj - (VT_SIZE_INT)(POINTER_SIZE_INT)p_obj)); + return TRUE; +} + +inline Boolean obj_vt_is_to_next_obj(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type info = get_obj_info_raw(p_obj); + info = ~info; + return (info == 0); +} + +inline Partial_Reveal_Object* obj_get_next_obj_from_vt(Partial_Reveal_Object* p_obj) +{ + return (Partial_Reveal_Object*)((VT_SIZE_INT)(POINTER_SIZE_INT)p_obj + (VT_SIZE_INT)obj_get_vt_raw(p_obj)); +} + /********************* for concurrent GC *******************************/ inline Boolean obj_is_dirty_in_oi(Partial_Reveal_Object* p_obj) { Index: vm/gc_gen/src/gen/gen.cpp =================================================================== --- vm/gc_gen/src/gen/gen.cpp (revision 680279) +++ vm/gc_gen/src/gen/gen.cpp (working copy) @@ -946,10 +946,14 @@ while(p_obj < block_end){ cont = vm_iterate_object((Managed_Object_Handle)p_obj); if (!cont) return; + if (obj_vt_is_to_next_obj((Partial_Reveal_Object *)p_obj)) + p_obj = (POINTER_SIZE_INT)obj_get_next_obj_from_vt((Partial_Reveal_Object *)p_obj); + else { #ifdef USE_32BITS_HASHCODE - hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_obj))?GC_OBJECT_ALIGNMENT:0; + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_obj))?GC_OBJECT_ALIGNMENT:0; #endif - p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj) + hash_extend_size; + p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj) + hash_extend_size; + } } curr_block = curr_block->next; if(curr_block == NULL) break; Index: vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (revision 680279) +++ vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (working copy) @@ -182,6 +182,8 @@ while(p_obj < block_end){ object_refix_ref_slots(p_obj, start_address, end_address, addr_diff); p_obj = obj_end(p_obj); + if(obj_vt_is_to_next_obj(p_obj)) + p_obj = obj_get_next_obj_from_vt(p_obj); } #ifdef USE_32BITS_HASHCODE /*repoint the p_obj in hashcode_table in the moved block.*/ Index: vm/gc_gen/src/mark_compact/mspace_move_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (revision 680279) +++ vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (working copy) @@ -124,10 +124,13 @@ assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end ); + Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos; /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */ p_obj = block_get_next_marked_object(curr_block, &start_pos); - if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) + if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) { + if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj); continue; + } /* current sector is done, let's move it. */ POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; Index: vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 680279) +++ vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -442,7 +442,7 @@ gc_identify_dead_weak_roots(gc); if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc); - assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); + //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace);