Index: src/common/fix_repointed_refs.h =================================================================== --- src/common/fix_repointed_refs.h (revision 559012) +++ src/common/fix_repointed_refs.h (working copy) @@ -24,6 +24,7 @@ #include "gc_common.h" #include "compressed_ref.h" extern Boolean IS_MOVE_COMPACT; +extern void* los_boundary; inline void slot_fix(REF* p_ref) { @@ -31,15 +32,21 @@ if(!p_obj) return; if(IS_MOVE_COMPACT){ - if(obj_is_moved(p_obj)) + /* This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. */ + //if(obj_is_moved(p_obj)) + /*Fixme: los_boundery ruined the modularity of gc_common.h*/ + if(p_obj < los_boundary){ + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + }else{ *p_ref = obj_get_fw_in_table(p_obj); + } }else{ - if(obj_is_fw_in_oi(p_obj) && obj_is_moved(p_obj)){ + if(obj_is_fw_in_oi(p_obj)){ /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. + * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, - * since those which can be scanned in MOS & NOS must have been set fw bit in oi. - */ + * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); write_slot(p_ref, obj_get_fw_in_oi(p_obj)); } Index: src/common/gc_block.h =================================================================== --- src/common/gc_block.h (revision 559012) +++ src/common/gc_block.h (working copy) @@ -77,7 +77,7 @@ #define OFFSET_TABLE_SIZE_WORDS (OFFSET_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD) #define OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> SECTOR_SIZE_SHIFT_COUNT) -#define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES) +#define GC_BLOCK_HEADER_SIZE_BYTES ((OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES + GC_OBJECT_ALIGN_MASK ) & (~GC_OBJECT_ALIGN_MASK)) #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES) #define GC_BLOCK_BODY(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_HEADER_SIZE_BYTES)) /*LOS_Shrink: We have some fake block headers when trying to compute mos object target, Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 559012) +++ src/common/gc_common.cpp (working copy) @@ -23,6 +23,7 @@ #include "../thread/mutator.h" #include "../finalizer_weakref/finalizer_weakref.h" #include "../gen/gen.h" +#include "../mark_sweep/gc_ms.h" #include "../common/space_tuner.h" #include "interior_pointer.h" @@ -279,7 +280,11 @@ gc->cause = gc_cause; gc_decide_collection_kind((GC_Gen*)gc, gc_cause); + gc_gen_update_space_before_gc((GC_Gen*)gc); + +#ifndef ONLY_SSPACE_IN_HEAP gc_compute_space_tune_size_before_marking(gc, gc_cause); +#endif #ifdef MARK_BIT_FLIPPING if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); @@ -301,7 +306,11 @@ if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); +#ifndef ONLY_SSPACE_IN_HEAP gc_gen_reclaim_heap((GC_Gen*)gc); +#else + gc_ms_reclaim_heap((GC_MS*)gc); +#endif gc_reset_interior_pointer_table(); @@ -310,9 +319,11 @@ int64 pause_time = time_now() - start_time; gc->time_collections += pause_time; +#ifndef ONLY_SSPACE_IN_HEAP gc_adjust_heap_size(gc, pause_time); gc_gen_adapt((GC_Gen*)gc, pause_time); +#endif if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); @@ -326,9 +337,11 @@ #endif } +#ifndef ONLY_SSPACE_IN_HEAP gc_space_tuner_reset(gc); - + gc_gen_update_space_after_gc((GC_Gen*)gc); gc_assign_free_area_to_mutators(gc); +#endif vm_reclaim_native_objs(); vm_resume_threads_after(); Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 559012) +++ src/common/gc_common.h (working copy) @@ -72,6 +72,8 @@ #define USE_32BITS_HASHCODE +//#define ONLY_SSPACE_IN_HEAP + typedef void (*TaskType)(void*); enum Collection_Algorithm{ @@ -100,7 +102,8 @@ MINOR_COLLECTION = 0x1, MAJOR_COLLECTION = 0x2, FALLBACK_COLLECTION = 0x4, - EXTEND_COLLECTION = 0x8 + EXTEND_COLLECTION = 0x8, + UNIQUE_SWEEP_COLLECTION = 0x10 }; extern Boolean IS_FALLBACK_COMPACTION; /* only for mark/fw bits debugging purpose */ Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 559012) +++ src/common/gc_for_vm.cpp (working copy) @@ -23,6 +23,7 @@ #include "compressed_ref.h" #include "../gen/gen.h" +#include "../mark_sweep/gc_ms.h" #include "interior_pointer.h" #include "../thread/collector.h" #include "../verify/verify_live_heap.h" @@ -42,7 +43,13 @@ int gc_init() { assert(p_global_gc == NULL); - GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen)); + +#ifndef ONLY_SSPACE_IN_HEAP + unsigned int gc_struct_size = sizeof(GC_Gen); +#else + unsigned int gc_struct_size = sizeof(GC_MS); +#endif + GC* gc = (GC*)STD_MALLOC(gc_struct_size); assert(gc); memset(gc, 0, sizeof(GC)); p_global_gc = gc; @@ -52,8 +59,15 @@ gc_tls_init(); gc_metadata_initialize(gc); /* root set and mark stack */ + +#ifndef ONLY_SSPACE_IN_HEAP + gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); +#else + gc_ms_initialize((GC_MS*)gc, min_heap_size_bytes, max_heap_size_bytes); +#endif - gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); + set_native_finalizer_thread_flag(!IGNORE_FINREF); + set_native_ref_enqueue_thread_flag(!IGNORE_FINREF); #ifndef BUILD_IN_REFERENT gc_finref_metadata_initialize(gc); @@ -69,7 +83,13 @@ void gc_wrapup() { GC* gc = p_global_gc; + +#ifndef ONLY_SSPACE_IN_HEAP gc_gen_destruct((GC_Gen*)gc); +#else + gc_ms_destruct((GC_MS*)gc); +#endif + gc_metadata_destruct(gc); /* root set and mark stack */ #ifndef BUILD_IN_REFERENT gc_finref_metadata_destruct(gc); @@ -154,20 +174,32 @@ void gc_thread_kill(void* gc_info) { mutator_destruct(p_global_gc, gc_info); } -int64 gc_free_memory() +int64 gc_free_memory() { +#ifndef ONLY_SSPACE_IN_HEAP return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc); +#else + return (int64)gc_ms_free_memory_size((GC_MS*)p_global_gc); +#endif } /* java heap size.*/ int64 gc_total_memory() { - return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); +#ifndef ONLY_SSPACE_IN_HEAP + return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); +#else + return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc)); +#endif } int64 gc_max_memory() { - return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); +#ifndef ONLY_SSPACE_IN_HEAP + return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); +#else + return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc)); +#endif } int64 gc_get_collection_count() @@ -233,6 +265,10 @@ #else //USE_32BITS_HASHCODE int32 gc_get_hashcode(Managed_Object_Handle p_object) { +#ifdef ONLY_SSPACE_IN_HEAP + return (int32)p_object; +#endif + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object; if(!p_obj) return 0; assert(address_belongs_to_gc_heap(p_obj, p_global_gc)); @@ -288,7 +324,11 @@ // data structures in not consistent for heap iteration if (!JVMTI_HEAP_ITERATION) return; +#ifndef ONLY_SSPACE_IN_HEAP gc_gen_iterate_heap((GC_Gen *)p_global_gc); +#else + gc_ms_iterate_heap((GC_MS*)p_global_gc); +#endif } void gc_set_mutator_block_flag() Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 559012) +++ src/common/gc_metadata.cpp (working copy) @@ -177,21 +177,26 @@ iter = vector_block_iterator_advance(root_set,iter); Partial_Reveal_Object* p_obj = read_slot(p_ref); - if(IS_MOVE_COMPACT){ - if(obj_is_moved(p_obj)) - *p_ref = obj_get_fw_in_table(p_obj); - } else { - if( // obj_is_fw_in_oi(p_obj) && //NOTE:: we removed the minor_copy algorithm at the moment, so we don't need this check - obj_is_moved(p_obj)){ - /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. - * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. - * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, - * since those which can be scanned in MOS & NOS must have been set fw bit in oi. - */ - assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc)); - write_slot(p_ref , obj_get_fw_in_oi(p_obj)); + if(IS_MOVE_COMPACT){ + /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/ + //if(obj_is_moved(p_obj)) + /*Fixme: los_boundery ruined the modularity of gc_common.h*/ + if(p_obj < los_boundary){ + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + }else{ + *p_ref = obj_get_fw_in_table(p_obj); + } + }else{ + if(obj_is_fw_in_oi(p_obj)){ + /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. + * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. + * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. + * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, + * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ + assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + } } - } } root_set = pool_iterator_next(pool); } Index: src/common/gc_space.h =================================================================== --- src/common/gc_space.h (revision 559012) +++ src/common/gc_space.h (working copy) @@ -38,10 +38,18 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; - /*Size allocted after last collection. */ - POINTER_SIZE_INT alloced_size; - /*For_statistic*/ - POINTER_SIZE_INT surviving_size; + + /* Size allocted since last minor collection. */ + volatile uint64 last_alloced_size; + /* Size allocted since last major collection. */ + uint64 accumu_alloced_size; + /* Total size allocated since VM starts. */ + uint64 total_alloced_size; + + /* Size survived from last collection. */ + uint64 last_surviving_size; + /* Size survived after a certain period. */ + uint64 period_surviving_size; }Space; inline POINTER_SIZE_INT space_committed_size(Space* space){ return space->committed_heap_size;} @@ -71,10 +79,19 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; - /*Size allocted after last collection. */ - POINTER_SIZE_INT alloced_size; - /*For_statistic*/ - POINTER_SIZE_INT surviving_size; + + /* Size allocted since last minor collection. */ + volatile uint64 last_alloced_size; + /* Size allocted since last major collection. */ + uint64 accumu_alloced_size; + /* Total size allocated since VM starts. */ + uint64 total_alloced_size; + + /* Size survived from last collection. */ + uint64 last_surviving_size; + /* Size survived after a certain period. */ + uint64 period_surviving_size; + /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ Index: src/common/hashcode.h =================================================================== --- src/common/hashcode.h (revision 559012) +++ src/common/hashcode.h (working copy) @@ -64,7 +64,7 @@ } inline int hashcode_gen(void* addr) -{ return (int)(POINTER_SIZE_INT)addr; } +{ return (int)(((POINTER_SIZE_INT)addr) >> 2); } typedef struct Hashcode_Buf{ Seq_List* list; Index: src/common/mark_scan_pool.cpp =================================================================== --- src/common/mark_scan_pool.cpp (revision 559012) +++ src/common/mark_scan_pool.cpp (working copy) @@ -37,6 +37,7 @@ static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { vm_notify_obj_alive( (void *)p_obj); + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); if( !object_has_ref_field(p_obj) ) return; REF *p_ref; Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 559012) +++ src/common/space_tuner.cpp (working copy) @@ -29,39 +29,38 @@ Space* gc_get_los(GC_Gen* gc); float mspace_get_expected_threshold_ratio(Mspace* mspace); POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); - + +/* Calculate speed of allocation and waste memory of specific space respectively, + * then decide whether to execute a space tuning according to the infomation.*/ void gc_decide_space_tune(GC* gc, unsigned int cause) { Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); Space_Tuner* tuner = gc->tuner; - //debug_adjust - assert(fspace->free_block_idx >= fspace->first_block_idx); - unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES; - fspace->alloced_size = nos_alloc_size; - /*Fixme: LOS_Adaptive: There should be a condition here, that fspace->collection_num != 0*/ - mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio); - /*For_statistic alloc speed: Speed could be represented by sum of alloced size. - *The right of this time los/mos alloc speed is the biggest. - */ - tuner->speed_los = lspace->alloced_size; - tuner->speed_los = (tuner->speed_los + tuner->old_speed_los) >> 1; - tuner->speed_mos = mspace->alloced_size; - tuner->speed_mos = (tuner->speed_mos + tuner->old_speed_mos) >> 1; + + tuner->speed_los = lspace->accumu_alloced_size; + tuner->speed_los = (tuner->speed_los + tuner->last_speed_los) >> 1; + /*The possible survivors from the newly allocated NOS should be counted into the speed of MOS*/ + tuner->speed_mos = mspace->accumu_alloced_size + (uint64)((float)fspace->last_alloced_size * fspace->survive_ratio);; + tuner->speed_mos = (tuner->speed_mos + tuner->last_speed_mos) >> 1; + tuner->speed_nos = fspace->accumu_alloced_size; + tuner->speed_nos = (tuner->speed_nos + tuner->last_speed_nos) >> 1; - /*For_statistic wasted memory*/ - POINTER_SIZE_INT curr_used_los = lspace->surviving_size + lspace->alloced_size; - POINTER_SIZE_INT curr_wast_los = 0; + /*Statistic wasted memory*/ + uint64 curr_used_los = lspace->last_surviving_size + lspace->last_alloced_size; + uint64 curr_wast_los = 0; if(gc->cause != GC_CAUSE_LOS_IS_FULL) curr_wast_los = lspace->committed_heap_size - curr_used_los; - tuner->wast_los += curr_wast_los; + tuner->wast_los += (POINTER_SIZE_INT)curr_wast_los; - POINTER_SIZE_INT curr_used_mos = mspace->surviving_size + mspace->alloced_size; + uint64 curr_used_mos = + mspace->period_surviving_size + mspace->accumu_alloced_size + (uint64)(fspace->last_alloced_size * fspace->survive_ratio); float expected_mos_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace); - POINTER_SIZE_INT expected_mos = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio); - POINTER_SIZE_INT curr_wast_mos = 0; + uint64 expected_mos = (uint64)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio); + uint64 curr_wast_mos = 0; if(expected_mos > curr_used_mos) curr_wast_mos = expected_mos - curr_used_mos; tuner->wast_mos += curr_wast_mos; + tuner->current_dw = ABS_DIFF(tuner->wast_mos, tuner->wast_los); /*For_statistic ds in heuristic*/ @@ -86,6 +85,14 @@ extern POINTER_SIZE_INT min_los_size_bytes; extern POINTER_SIZE_INT min_none_los_size_bytes; +/*Open this macro if we want to tune the space size according to allocation speed computed in major collection. + *By default, we will use allocation speed computed in minor collection. */ +//#define SPACE_TUNE_BY_MAJOR_SPEED + + +/* The tuning size computing before marking is not precise. We only estimate the probable direction of space tuning. + * If this function decide to set TRANS_NOTHING, then we just call the normal marking function. + * Else, we call the marking function for space tuning. */ void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause) { if(gc_match_kind(gc, MINOR_COLLECTION)) return; @@ -93,39 +100,50 @@ gc_decide_space_tune(gc, cause); Space_Tuner* tuner = gc->tuner; - if((tuner->speed_los == 0) && ( tuner->speed_mos == 0)) return; + assert((tuner->speed_los != 0) && ( tuner->speed_mos != 0)) ; if((!tuner->need_tune) && (!tuner->force_tune)) return; Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); + POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->last_surviving_size + lspace->last_alloced_size) * lspace->survive_ratio); POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? (lspace->committed_heap_size - los_expect_surviving_sz) : 0); - POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio); + + POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->period_surviving_size + mspace->accumu_alloced_size) * mspace->survive_ratio); float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace); POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio); POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)? (mos_expect_threshold - mos_expect_survive_sz) : 0); + + POINTER_SIZE_INT non_los_expect_surviving_sz = (POINTER_SIZE_INT)(mos_expect_survive_sz + fspace->last_alloced_size * fspace->survive_ratio); + POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size; + POINTER_SIZE_INT non_los_expect_free_sz = (non_los_committed_size > non_los_expect_surviving_sz) ? (non_los_committed_size - non_los_expect_surviving_sz):(0) ; + +#ifdef SPACE_TUNE_BY_MAJOR_SPEED + /*Fixme: tuner->speed_los here should be computed by sliding compact LOS, to be implemented!*/ POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz; - float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_mos); POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio); +#else + POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + non_los_expect_free_sz; + float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_nos); + POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio); +#endif + /*LOS_Extend:*/ if((new_free_los_sz > los_expect_free_sz) ) { tuner->kind = TRANS_FROM_MOS_TO_LOS; tuner->tuning_size = new_free_los_sz - los_expect_free_sz; - lspace->move_object = 0; } /*LOS_Shrink:*/ else if(new_free_los_sz < los_expect_free_sz) { tuner->kind = TRANS_FROM_LOS_TO_MOS; tuner->tuning_size = los_expect_free_sz - new_free_los_sz; - lspace->move_object = 1; } /*Nothing*/ else @@ -137,7 +155,6 @@ if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){ tuner->kind = TRANS_NOTHING; tuner->tuning_size = 0; - lspace->move_object = 0; } /*If los or non-los is already the smallest size, there is no need to tune anymore. @@ -148,14 +165,13 @@ assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes)); tuner->kind = TRANS_NOTHING; tuner->tuning_size = 0; - lspace->move_object = 0; } - + + /*If the strategy upward doesn't decide to extend los, but current GC is caused by los, force an extension here.*/ if(tuner->force_tune){ if(tuner->kind != TRANS_FROM_MOS_TO_LOS){ tuner->kind = TRANS_FROM_MOS_TO_LOS; tuner->tuning_size = 0; - tuner->reverse_1 = 1; } } @@ -167,6 +183,7 @@ static POINTER_SIZE_INT non_los_live_obj_size; static POINTER_SIZE_INT los_live_obj_size; +/* Only when we call the special marking function for space tuning, we can get the accumulation of the sizes. */ static void gc_compute_live_object_size_after_marking(GC* gc, POINTER_SIZE_INT non_los_size) { non_los_live_obj_size = 0; @@ -204,6 +221,12 @@ } +/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. + * Attention1: The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed. + * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in + * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the + * size of extention by this function. + * Attention2: The total heap size might extend in this function. */ static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los) { Space_Tuner* tuner = gc->tuner; @@ -219,7 +242,6 @@ if(lspace_free_size >= failure_size){ tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; - lspace->move_object = 1; }else{ tuner->tuning_size = failure_size -lspace_free_size; @@ -250,25 +272,24 @@ if(tuner->tuning_size > potential_max_tuning_size){ tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; }else{ /*We have tuner->tuning_size > max_tuning_size up there.*/ extend_heap_size = tuner->tuning_size - max_tuning_size; blocked_space_extend(fspace, (unsigned int)extend_heap_size); gc->committed_heap_size += extend_heap_size; tuner->kind = TRANS_FROM_MOS_TO_LOS; - lspace->move_object = 1; } - } else{ + } + else + { tuner->kind = TRANS_FROM_MOS_TO_LOS; - lspace->move_object = 1; } } return; } -static void make_sure_tuning_size(GC* gc) +static void check_tuning_size(GC* gc) { Space_Tuner* tuner = gc->tuner; Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc); @@ -277,41 +298,48 @@ POINTER_SIZE_INT los_free_sz = ((lspace->committed_heap_size > los_live_obj_size) ? (lspace->committed_heap_size - los_live_obj_size) : 0); + +#ifdef SPACE_TUNE_BY_MAJOR_SPEED float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace); POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio); POINTER_SIZE_INT mos_free_sz = ((mos_expect_threshold > non_los_live_obj_size)? (mos_expect_threshold - non_los_live_obj_size) : 0); POINTER_SIZE_INT total_free_sz = los_free_sz + mos_free_sz; - float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_mos); POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio); +#else + POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size; + POINTER_SIZE_INT non_los_free_sz = ((non_los_committed_size > non_los_live_obj_size)? + (non_los_committed_size - non_los_live_obj_size):0); + POINTER_SIZE_INT total_free_sz = los_free_sz + non_los_free_sz; + float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_nos); + POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio); +#endif /*LOS_Extend:*/ if((new_free_los_sz > los_free_sz) ) { tuner->kind = TRANS_FROM_MOS_TO_LOS; tuner->tuning_size = new_free_los_sz - los_free_sz; - lspace->move_object = 0; //This is necessary, because the flag might be set by gc_compute_space_tune_size_before_marking. } /*LOS_Shrink:*/ else if(new_free_los_sz < los_free_sz) { tuner->kind = TRANS_FROM_LOS_TO_MOS; tuner->tuning_size = los_free_sz - new_free_los_sz; - lspace->move_object = 1; } /*Nothing*/ else { tuner->tuning_size = 0; - tuner->kind = TRANS_NOTHING;//This is necessary, because the original value of kind might not be NOTHING. + /*This is necessary, because the original value of kind might not be NOTHING. */ + tuner->kind = TRANS_NOTHING; } /*If not force tune, and the tuning size is too small, tuner will not take effect.*/ if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){ tuner->kind = TRANS_NOTHING; tuner->tuning_size = 0; - lspace->move_object = 0; } /*If los or non-los is already the smallest size, there is no need to tune anymore. @@ -322,19 +350,19 @@ assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes)); tuner->kind = TRANS_NOTHING; tuner->tuning_size = 0; - lspace->move_object = 0; } if(tuner->force_tune){ if(tuner->kind != TRANS_FROM_MOS_TO_LOS){ tuner->kind = TRANS_FROM_MOS_TO_LOS; - tuner->reverse_2 = 1; + tuner->reverse = 1; } } return; } +/* This is the real function that decide tuning_size, because we have know the total size of living objects after "mark_scan_heap_for_space_tune". */ void gc_compute_space_tune_size_after_marking(GC *gc) { Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); @@ -347,7 +375,7 @@ gc_compute_live_object_size_after_marking(gc, non_los_size); - make_sure_tuning_size(gc); + check_tuning_size(gc); /*We should assure that the non_los area is no less than min_none_los_size_bytes*/ POINTER_SIZE_INT max_tune_for_min_non_los = 0; @@ -369,15 +397,11 @@ tuner->tuning_size = max_tuning_size; /*Round down so as not to break max_tuning_size*/ tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); - if(tuner->tuning_size == 0){ - //If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list. - tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; - } + /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.*/ + if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; }else{ tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; } } /*Not force tune, LOS Shrink*/ @@ -391,28 +415,22 @@ tuner->tuning_size = max_tuning_size; /*Round down so as not to break max_tuning_size*/ tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); - if(tuner->tuning_size == 0){ - tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; - } + if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; }else{ /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/ tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; } } /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/ Boolean doforce = TRUE; POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace); - if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse_2) && (tuner->tuning_size > failure_size) ) + if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) ) doforce = FALSE; - /*If force tune*/ - if( (tuner->force_tune) && (doforce) ){ + if( (tuner->force_tune) && (doforce) ) compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los); - } return; @@ -428,10 +446,12 @@ tuner->need_tune = FALSE; tuner->force_tune = FALSE; - tuner->old_speed_los = tuner->speed_los; - tuner->old_speed_mos = tuner->speed_mos; + tuner->last_speed_los = tuner->speed_los; + tuner->last_speed_mos = tuner->speed_mos; + tuner->last_speed_nos = tuner->speed_nos; tuner->speed_los = 0; tuner->speed_mos = 0; + tuner->speed_nos = 0; tuner->current_dw = 0; tuner->current_ds = 0; @@ -444,8 +464,7 @@ tuner->wast_mos = 0; } tuner->kind = TRANS_NOTHING; - tuner->reverse_1 = 0; - tuner->reverse_2 = 0; + tuner->reverse = 0; } return; Index: src/common/space_tuner.h =================================================================== --- src/common/space_tuner.h (revision 559012) +++ src/common/space_tuner.h (working copy) @@ -38,33 +38,33 @@ typedef struct Space_Tuner{ Transform_Kind kind; - /*Fixme: These flags is set if the los tuning status changes in the process of tuning, remove one of them*/ - Boolean reverse_1; - Boolean reverse_2; + /*This flags is set if the tuning direction changes in the process of tuning*/ + Boolean reverse; POINTER_SIZE_INT tuning_size; /*Used for LOS_Shrink*/ Block_Header* interim_blocks; /*This flag is set when tuning strategy decide to tune los size. - *i.e. wasted memory is greater than wast_threshold. - */ + *i.e. wasted memory is greater than wast_threshold. */ Boolean need_tune; /*This flag is set if gc is caused by los alloc failure.*/ Boolean force_tune; - /*LOS alloc speed since last major*/ - POINTER_SIZE_INT speed_los; - POINTER_SIZE_INT old_speed_los; - /*MOS alloc speed since last major*/ - POINTER_SIZE_INT speed_mos; - POINTER_SIZE_INT old_speed_mos; - + uint64 speed_los; + uint64 last_speed_los; + + uint64 speed_mos; + uint64 last_speed_mos; + + uint64 speed_nos; + uint64 last_speed_nos; + /*Total wasted memory of los science last los variation*/ - POINTER_SIZE_INT wast_los; + uint64 wast_los; /*Total wasted memory of mos science last los variation*/ - POINTER_SIZE_INT wast_mos; + uint64 wast_mos; - POINTER_SIZE_INT current_dw; + uint64 current_dw; /*NOS survive size of last minor, this could be the least meaningful space unit when talking about tuning.*/ POINTER_SIZE_INT current_ds; Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 559012) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -26,6 +26,7 @@ #include "../trace_forward/fspace.h" #include "../los/lspace.h" #include "../gen/gen.h" +#include "../mark_sweep/gc_ms.h" #include "../common/space_tuner.h" Boolean IGNORE_FINREF = FALSE; @@ -35,8 +36,10 @@ static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj) { /* - * The first condition is for supporting switch between nongen and gen minor collection - * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi + * The first condition is for supporting switch between nongen and gen minor collection. + * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi. + * The second condition is for supporting partially forwarding NOS. + * In partially forwarding situation live objects in the non-forwarding half NOS will only be marked but not forwarded. */ return obj_belongs_to_nos(p_obj) && !obj_is_marked_or_fw_in_oi(p_obj); } @@ -51,10 +54,23 @@ { return !obj_is_marked_in_vt(p_obj); } -// clear the two least significant bits of p_obj first + +#ifdef ONLY_SSPACE_IN_HEAP +extern Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj); +static inline Boolean obj_is_dead_in_unique_sweep_gc(Partial_Reveal_Object * p_obj) +{ + return !obj_is_marked_in_table(p_obj); +} +#endif + static inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj) { assert(p_obj); + +#ifdef ONLY_SSPACE_IN_HEAP + return obj_is_dead_in_unique_sweep_gc(p_obj); +#endif + if(gc_match_kind(gc, MINOR_COLLECTION)){ if(gc_is_gen_mode()) return obj_is_dead_in_gen_minor_gc(p_obj); @@ -68,11 +84,16 @@ static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj) { if(!obj_belongs_to_nos(p_obj)) return FALSE; - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); + return forward_first_half ? (p_obj < object_forwarding_boundary) : (p_obj>=object_forwarding_boundary); } static inline Boolean obj_need_move(GC *gc, Partial_Reveal_Object *p_obj) { assert(!gc_obj_is_dead(gc, p_obj)); + +#ifdef ONLY_SSPACE_IN_HEAP + Sspace *sspace = gc_ms_get_sspace((GC_MS*)gc); + return sspace->move_object; +#endif if(gc_is_gen_mode() && gc_match_kind(gc, MINOR_COLLECTION)) return fspace_obj_to_be_forwarded(p_obj); @@ -85,22 +106,23 @@ { finref_reset_repset(gc); pool_iterator_init(pool); - while(Vector_Block *block = pool_iterator_next(pool)){ + Vector_Block *block = pool_iterator_next(pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF*)iter; + REF *p_ref = (REF*)iter; Partial_Reveal_Object* p_obj = read_slot(p_ref); if(*p_ref && obj_need_move(gc, p_obj)) finref_repset_add_entry(gc, p_ref); } + block = pool_iterator_next(pool); } finref_put_repset(gc); } -static inline void fallback_update_fw_ref(REF* p_ref) +static inline void fallback_update_fw_ref(REF *p_ref) { - if(!IS_FALLBACK_COMPACTION) - return; + assert(IS_FALLBACK_COMPACTION); Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ @@ -120,11 +142,12 @@ gc_reset_finalizable_objects(gc); pool_iterator_init(obj_with_fin_pool); - while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){ + Vector_Block *block = pool_iterator_next(obj_with_fin_pool); + while(block){ unsigned int block_has_ref = 0; POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF *)iter; + REF *p_ref = (REF*)iter; if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_ref); // in case that this collection is FALLBACK_COLLECTION Partial_Reveal_Object *p_obj = read_slot(p_ref); @@ -143,10 +166,12 @@ } if(!block_has_ref) vector_block_clear(block); + + block = pool_iterator_next(obj_with_fin_pool); } gc_put_finalizable_objects(gc); - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_add_repset_from_pool(gc, obj_with_fin_pool); } @@ -155,15 +180,16 @@ extern void trace_obj_in_normal_marking(Collector *collector, void *p_obj); extern void trace_obj_in_fallback_marking(Collector *collector, void *p_ref); extern void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj); +extern void trace_obj_in_ms_marking(Collector *collector, void *p_obj); typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj); -// clear the two least significant bits of p_obj first -// add p_ref to repset -static inline void resurrect_obj_tree(Collector *collector, REF* p_ref) + +// Resurrect the obj tree whose root is the obj which p_ref points to +static inline void resurrect_obj_tree(Collector *collector, REF *p_ref) { GC *gc = collector->gc; - GC_Metadata* metadata = gc->metadata; + GC_Metadata *metadata = gc->metadata; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj && gc_obj_is_dead(gc, p_obj)); @@ -182,7 +208,7 @@ trace_object = trace_obj_in_space_tune_marking; unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE - obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; + obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0; #endif if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){ collector->non_los_live_obj_size += obj_size; @@ -194,9 +220,12 @@ trace_object = trace_obj_in_normal_marking; } obj_mark_in_vt(p_obj); + } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){ + trace_object = trace_obj_in_fallback_marking; } else { - assert(gc_match_kind(gc, FALLBACK_COLLECTION)); - trace_object = trace_obj_in_fallback_marking; + assert(gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION)); + p_ref_or_obj = p_obj; + trace_object = trace_obj_in_ms_marking; } collector->trace_stack = free_task_pool_get_entry(metadata); @@ -208,11 +237,12 @@ while(task_block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block); while(!vector_block_iterator_end(task_block, iter)){ - void* p_ref_or_obj = (void *)*iter; + void *p_ref_or_obj = (void*)*iter; assert((gc_match_kind(gc, MINOR_COLLECTION | FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj) - || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj)); + || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj) + || (gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); - if(collector->result == FALSE) break; /* force return */ + if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */ iter = vector_block_iterator_advance(task_block, iter); } @@ -244,13 +274,15 @@ DURING_RESURRECTION = TRUE; - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_reset_repset(gc); + pool_iterator_init(finalizable_obj_pool); - while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){ + Vector_Block *block = pool_iterator_next(finalizable_obj_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF *)iter; + REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); @@ -258,7 +290,7 @@ * Because it is outside heap, we can't update in ref fixing. * In minor collection p_ref of the root dead obj is automatically updated while tracing. */ - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_repset_add_entry(gc, p_ref); /* Perhaps obj has been resurrected by previous resurrections */ @@ -275,9 +307,13 @@ return; /* force return */ } } + + block = pool_iterator_next(finalizable_obj_pool); } - if(!gc_match_kind(gc, MINOR_COLLECTION)) + + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_put_repset(gc); + metadata->pending_finalizers = TRUE; DURING_RESURRECTION = FALSE; @@ -287,16 +323,17 @@ static void identify_dead_refs(GC *gc, Pool *pool) { - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_reset_repset(gc); pool_iterator_init(pool); - while(Vector_Block *block = pool_iterator_next(pool)){ + Vector_Block *block = pool_iterator_next(pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF*)iter; + REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); - REF* p_referent_field = obj_get_referent_field(p_obj); + REF *p_referent_field = obj_get_referent_field(p_obj); if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); @@ -306,20 +343,24 @@ continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive - if(obj_need_move(gc, p_referent)) + if(obj_need_move(gc, p_referent)){ if(gc_match_kind(gc, MINOR_COLLECTION)){ assert(obj_is_fw_in_oi(p_referent)); write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); } else { finref_repset_add_entry(gc, p_referent_field); } + } *p_ref = (REF)NULL; continue; } *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */ } + + block = pool_iterator_next(pool); } - if(!gc_match_kind(gc, MINOR_COLLECTION)){ + + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){ finref_put_repset(gc); finref_add_repset_from_pool(gc, pool); } @@ -347,7 +388,7 @@ /* * The reason why we don't use identify_dead_refs() to implement this function is - * that we will differentiate phanref from softref & weakref in the future. + * that we will differentiate phanref from weakref in the future. */ static void identify_dead_phanrefs(Collector *collector) { @@ -355,17 +396,18 @@ Finref_Metadata *metadata = gc->finref_metadata; Pool *phanref_pool = metadata->phanref_pool; - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_reset_repset(gc); // collector_reset_repset(collector); pool_iterator_init(phanref_pool); - while(Vector_Block *block = pool_iterator_next(phanref_pool)){ + Vector_Block *block = pool_iterator_next(phanref_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref); assert(p_obj); - REF* p_referent_field = obj_get_referent_field(p_obj); + REF *p_referent_field = obj_get_referent_field(p_obj); if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); @@ -376,7 +418,7 @@ } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)) - if(gc_match_kind(gc, MINOR_COLLECTION)){ + if(gc_match_kind(gc, MINOR_COLLECTION)){ assert(obj_is_fw_in_oi(p_referent)); write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); } else { @@ -394,9 +436,10 @@ * resurrect_obj_tree(collector, p_referent_field); */ } + block = pool_iterator_next(phanref_pool); } // collector_put_repset(collector); - if(!gc_match_kind(gc, MINOR_COLLECTION)){ + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){ finref_put_repset(gc); finref_add_repset_from_pool(gc, phanref_pool); } @@ -407,7 +450,8 @@ Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool; Pool *free_pool = gc->finref_metadata->free_pool; - while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ + Vector_Block *block = pool_get_entry(finalizable_obj_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ assert(*iter); @@ -417,6 +461,7 @@ } vector_block_clear(block); pool_put_entry(free_pool, block); + block = pool_get_entry(finalizable_obj_pool); } } @@ -424,7 +469,8 @@ { Pool *free_pool = gc->finref_metadata->free_pool; - while(Vector_Block *block = pool_get_entry(ref_pool)){ + Vector_Block *block = pool_get_entry(ref_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); @@ -434,6 +480,7 @@ } vector_block_clear(block); pool_put_entry(free_pool, block); + block = pool_get_entry(ref_pool); } } @@ -453,8 +500,11 @@ put_dead_weak_refs_to_vm(gc, metadata->weakref_pool); put_dead_weak_refs_to_vm(gc, metadata->phanref_pool); - if(/*IS_FALLBACK_COMPACTION && */!pool_is_empty(metadata->fallback_ref_pool)) + /* This is a major collection after resurrection fallback */ + if(!pool_is_empty(metadata->fallback_ref_pool)){ put_dead_weak_refs_to_vm(gc, metadata->fallback_ref_pool); + } + metadata->pending_weakrefs = TRUE; } @@ -464,15 +514,13 @@ Finref_Metadata *metadata = gc->finref_metadata; Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; - Vector_Block *obj_with_fin_block = pool_get_entry(obj_with_fin_pool); - assert(obj_with_fin_block); - - Boolean pending_finalizers = FALSE; - - while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ + Vector_Block *obj_with_fin_block = finref_get_free_block(gc); + + Vector_Block *block = pool_get_entry(finalizable_obj_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF*)iter; + REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); /* Perhaps obj has been resurrected by previous resurrections */ @@ -482,12 +530,14 @@ p_obj = read_slot(p_ref); } } - gc_add_finalizer(gc, obj_with_fin_block, p_obj); // Perhaps p_obj has been forwarded, so we use *p_ref rather than p_obj + /* Perhaps obj_with_fin_block has been allocated with a new free block if it is full */ + obj_with_fin_block = gc_add_finalizer(gc, obj_with_fin_block, p_obj); } + block = pool_get_entry(finalizable_obj_pool); } pool_put_entry(obj_with_fin_pool, obj_with_fin_block); - metadata->pending_finalizers = pending_finalizers; + metadata->pending_finalizers = FALSE; } static void dead_weak_refs_fallback(GC *gc, Pool *ref_pool) @@ -497,22 +547,26 @@ Pool *fallback_ref_pool = metadata->fallback_ref_pool; Vector_Block *fallback_ref_block = finref_get_free_block(gc); - while(Vector_Block *block = pool_get_entry(ref_pool)){ + Vector_Block *block = pool_get_entry(ref_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Partial_Reveal_Object *p_obj = read_slot((REF*)iter); + /* Perhaps fallback_ref_block has been allocated with a new free block if it is full */ if(p_obj) - finref_add_fallback_ref(gc, fallback_ref_block, p_obj); + fallback_ref_block = finref_add_fallback_ref(gc, fallback_ref_block, p_obj); iter = vector_block_iterator_advance(block, iter); } vector_block_clear(block); pool_put_entry(free_pool, block); + block = pool_get_entry(ref_pool); } pool_put_entry(fallback_ref_pool, fallback_ref_block); } -/* Record softrefs and weakrefs whose referents are dead. +/* Record softrefs and weakrefs whose referents are dead + * so that we can update their addr and put them to VM. * In fallback collection these refs will not be considered for enqueueing again, * since their referent fields have been cleared by identify_dead_refs(). */ @@ -523,17 +577,21 @@ if(!softref_pool_is_empty(gc) || !weakref_pool_is_empty(gc)) metadata->pending_weakrefs = TRUE; + /* We only use fallback_ref_pool in resurrection fallback so it must be empty */ + assert(pool_is_empty(metadata->fallback_ref_pool)); + dead_weak_refs_fallback(gc, metadata->softref_pool); dead_weak_refs_fallback(gc, metadata->weakref_pool); gc_clear_weakref_pools(gc); } +/* Deal with resurrection fallback */ static void resurrection_fallback_handler(GC *gc) { Finref_Metadata *metadata = gc->finref_metadata; - /* Repset pool should be empty, because we don't add anthing to this pool in Minor Collection. */ + /* Repset pool should be empty, because we don't add anything to this pool in Minor Collection. */ assert(pool_is_empty(metadata->repset_pool)); finalizable_objs_fallback(gc); @@ -571,6 +629,7 @@ gc_set_weakref_sets(gc); gc_clear_weakref_pools(gc); } + void gc_put_finref_to_vm(GC *gc) { put_dead_refs_to_vm(gc); @@ -582,6 +641,7 @@ Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool; Pool *free_pool = gc->finref_metadata->free_pool; + /* Because we are manipulating obj_with_fin_pool, GC lock must be hold in case that GC happens */ vm_gc_lock_enum(); /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer * could be fixed as this: @@ -591,7 +651,9 @@ lock(gc->mutator_list_lock); gc_set_obj_with_fin(gc); unlock(gc->mutator_list_lock); - while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ + + Vector_Block *block = pool_get_entry(obj_with_fin_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); @@ -601,22 +663,25 @@ } vector_block_clear(block); pool_put_entry(free_pool, block); + block = pool_get_entry(obj_with_fin_pool); } + vm_gc_unlock_enum(); } static void update_referent_field_ignore_finref(GC *gc, Pool *pool) { - while(Vector_Block *block = pool_get_entry(pool)){ + Vector_Block *block = pool_get_entry(pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - REF* p_ref = (REF*)iter; + REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); - REF* p_referent_field = obj_get_referent_field(p_obj); + REF *p_referent_field = obj_get_referent_field(p_obj); if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); - Partial_Reveal_Object* p_referent = read_slot(p_referent_field); + Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared *p_ref = (REF)NULL; @@ -635,6 +700,7 @@ } *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */ } + block = pool_get_entry(pool); } } @@ -642,27 +708,40 @@ { Finref_Metadata *metadata = gc->finref_metadata; - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_reset_repset(gc); update_referent_field_ignore_finref(gc, metadata->softref_pool); update_referent_field_ignore_finref(gc, metadata->weakref_pool); update_referent_field_ignore_finref(gc, metadata->phanref_pool); - if(!gc_match_kind(gc, MINOR_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) finref_put_repset(gc); } -static void move_compaction_update_ref(GC *gc, REF* p_ref) +extern void* los_boundary; +/* Move compaction needs special treament when updating referent field */ +static inline void move_compaction_update_ref(GC *gc, REF *p_ref) { - /* If p_ref belongs to heap, it must be a referent field pointer */ - if(address_belongs_to_gc_heap((void *)p_ref, gc) && (space_of_addr(gc, p_ref))->move_object){ + /* There are only two kinds of p_ref being added into finref_repset_pool: + * 1. p_ref is in a vector block from one finref pool; + * 2. p_ref is a referent field. + * So if p_ref belongs to heap, it must be a referent field pointer. + * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool. + */ +// if(address_belongs_to_gc_heap(p_ref, gc) && !address_belongs_to_space(p_ref, gc_get_los((GC_Gen*)gc))){ +// && space_of_addr(gc, p_ref)->move_object //comment this out because all spaces are movable in major collection. + if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){ unsigned int offset = get_gc_referent_offset(); - Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_ref - offset); + Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset); Partial_Reveal_Object *p_new_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_old_ref)); p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset); } - Partial_Reveal_Object* p_obj = read_slot(p_ref); + Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(space_of_addr(gc, (void*)p_obj)->move_object); - *p_ref = obj_get_fw_in_table(p_obj); +// if(obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))) + if(p_obj < los_boundary) + write_slot(p_ref , obj_get_fw_in_oi(p_obj)); + else + *p_ref = obj_get_fw_in_table(p_obj); } extern Boolean IS_MOVE_COMPACT; @@ -671,7 +750,7 @@ static void destructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool) { Finref_Metadata *metadata = gc->finref_metadata; - REF* p_ref; + REF *p_ref; Partial_Reveal_Object *p_obj; /* NOTE:: this is destructive to the root sets. */ @@ -703,12 +782,13 @@ static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool) { Finref_Metadata *metadata = gc->finref_metadata; - REF* p_ref; + REF *p_ref; Partial_Reveal_Object *p_obj; /* NOTE:: this is nondestructive to the root sets. */ pool_iterator_init(pool); - while(Vector_Block *repset = pool_iterator_next(pool)){ + Vector_Block *repset = pool_iterator_next(pool); + while(repset){ POINTER_SIZE_INT *iter = vector_block_iterator_init(repset); for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){ if(pointer_addr_in_pool) @@ -725,6 +805,7 @@ move_compaction_update_ref(gc, p_ref); } } + repset = pool_iterator_next(pool); } } @@ -732,13 +813,15 @@ { assert(!gc_match_kind(gc, MINOR_COLLECTION)); - Finref_Metadata* metadata = gc->finref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; Pool *repset_pool = metadata->repset_pool; Pool *fallback_ref_pool = metadata->fallback_ref_pool; destructively_fix_finref_pool(gc, repset_pool, TRUE); - if(IS_FALLBACK_COMPACTION && !pool_is_empty(fallback_ref_pool)) + if(!pool_is_empty(fallback_ref_pool)){ + assert(IS_FALLBACK_COMPACTION); nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE); + } } void gc_activate_finref_threads(GC *gc) Index: src/finalizer_weakref/finalizer_weakref.h =================================================================== --- src/finalizer_weakref/finalizer_weakref.h (revision 559012) +++ src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -47,7 +47,7 @@ * } */ -inline REF* obj_get_referent_field(Partial_Reveal_Object *p_obj) +inline REF *obj_get_referent_field(Partial_Reveal_Object *p_obj) { assert(p_obj); return (REF*)(( Byte*)p_obj+get_gc_referent_offset()); Index: src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 559012) +++ src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -118,8 +118,8 @@ } -/* called when there is no Vector_Block in finref_metadata->free_pool - * extend the pool by a segment +/* Extending the pool by a segment + * Called when there is no vector block in finref_metadata->free_pool */ Vector_Block *finref_metadata_extend(void) { @@ -157,14 +157,7 @@ return block; } -/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata - * shrink the free pool by half - */ -static void finref_metadata_shrink(GC *gc) -{ -} - /* reset obj_with_fin vector block of each mutator */ static void gc_reset_obj_with_fin(GC *gc) { @@ -216,7 +209,7 @@ unsigned int num_active_collectors = gc->num_active_collectors; for(unsigned int i = 0; i < num_active_collectors; i++) { - Collector* collector = gc->collectors[i]; + Collector *collector = gc->collectors[i]; pool_put_entry(metadata->softref_pool, collector->softref_set); pool_put_entry(metadata->weakref_pool, collector->weakref_set); pool_put_entry(metadata->phanref_pool, collector->phanref_set); @@ -241,7 +234,9 @@ assert(metadata->finalizable_obj_set == NULL); assert(metadata->repset == NULL); - while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ + /* Extract empty blocks in obj_with_fin_pool and put them into free_pool */ + Vector_Block *block = pool_get_entry(obj_with_fin_pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); if(vector_block_iterator_end(block, iter)){ vector_block_clear(block); @@ -249,6 +244,7 @@ } else { pool_put_entry(finalizable_obj_pool, block); } + block = pool_get_entry(obj_with_fin_pool); } assert(pool_is_empty(obj_with_fin_pool)); metadata->obj_with_fin_pool = finalizable_obj_pool; @@ -258,59 +254,60 @@ } -static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, POINTER_SIZE_INT value) +static inline Vector_Block *finref_metadata_add_entry(GC *gc, Vector_Block *vector_block_in_use, Pool *pool, POINTER_SIZE_INT value) { assert(vector_block_in_use); + assert(!vector_block_is_full(vector_block_in_use)); assert(value); - - Vector_Block* block = vector_block_in_use; - vector_block_add_entry(block, value); - if(!vector_block_is_full(block)) return; + vector_block_add_entry(vector_block_in_use, value); - pool_put_entry(pool, block); - vector_block_in_use = finref_get_free_block(gc); + if(!vector_block_is_full(vector_block_in_use)) + return vector_block_in_use; + + pool_put_entry(pool, vector_block_in_use); + return finref_get_free_block(gc); } void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *p_obj) { GC *gc = mutator->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); + mutator->obj_with_fin = finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } /* This function is only used by resurrection fallback */ -void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj) +Vector_Block *gc_add_finalizer(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_obj) { Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); + return finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *p_obj) { Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); + metadata->finalizable_obj_set = finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); + collector->softref_set = finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); + collector->weakref_set = finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); + collector->phanref_set = finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void finref_repset_add_entry(GC *gc, REF* p_ref) @@ -318,29 +315,32 @@ assert(*p_ref); assert(read_slot(p_ref)); Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, metadata->repset, metadata->repset_pool, (POINTER_SIZE_INT)p_ref); + metadata->repset = finref_metadata_add_entry(gc, metadata->repset, metadata->repset_pool, (POINTER_SIZE_INT)p_ref); } /* This function is only used by resurrection fallback */ -void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj) +Vector_Block *finref_add_fallback_ref(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_obj) { assert(p_obj); Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); + return finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } -static inline Boolean pool_has_no_ref(Pool *pool) +static Boolean pool_has_no_ref(Pool *pool) { if(pool_is_empty(pool)) return TRUE; + pool_iterator_init(pool); - while(Vector_Block *block = pool_iterator_next(pool)){ + Vector_Block *block = pool_iterator_next(pool); + while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ if(*iter) return FALSE; iter = vector_block_iterator_advance(block, iter); } + block = pool_iterator_next(pool); } return TRUE; } @@ -375,7 +375,7 @@ return pool_has_no_ref(gc->finref_metadata->repset_pool); } -static inline void finref_metadata_clear_pool(Pool *pool) +static void finref_metadata_clear_pool(Pool *pool) { while(Vector_Block* block = pool_get_entry(pool)) { @@ -390,8 +390,3 @@ finref_metadata_clear_pool(gc->finref_metadata->weakref_pool); finref_metadata_clear_pool(gc->finref_metadata->phanref_pool); } - -void gc_clear_finref_repset_pool(GC *gc) -{ - finref_metadata_clear_pool(gc->finref_metadata->repset_pool); -} Index: src/finalizer_weakref/finalizer_weakref_metadata.h =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.h (revision 559012) +++ src/finalizer_weakref/finalizer_weakref_metadata.h (working copy) @@ -34,8 +34,8 @@ Pool *free_pool; // list of free buffers for the five pools below - Pool *obj_with_fin_pool; // list of objects that have finalizer; - // these objects are added in when they are allocated + Pool *obj_with_fin_pool; // list of objects that have finalizers + // these objects are added in when they are being allocated Pool *finalizable_obj_pool; // temporary buffer for finalizable objects identified during one single GC Pool *softref_pool; // temporary buffer for soft references identified during one single GC @@ -46,13 +46,13 @@ Pool *fallback_ref_pool; // temporary buffer for weakref needing to be put to vm when resurrection fallback happens - Vector_Block *finalizable_obj_set; // buffer for finalizable_objects_pool + Vector_Block *finalizable_obj_set; // buffer for finalizable_obj_pool Vector_Block *repset; // buffer for repset_pool Boolean pending_finalizers; // there are objects waiting to be finalized Boolean pending_weakrefs; // there are weak references waiting to be enqueued - unsigned int gc_referent_offset; // the referent field's offset in Reference Class + unsigned int gc_referent_offset; // the referent field's offset in Reference Class; it is a constant during VM's liftime }Finref_Metadata; extern unsigned int get_gc_referent_offset(void); @@ -68,13 +68,13 @@ extern void gc_reset_finref_metadata(GC *gc); extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref); -extern void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *ref); +extern Vector_Block *gc_add_finalizer(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *ref); extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref); extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref); extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref); extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref); extern void finref_repset_add_entry(GC *gc, REF* ref); -extern void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_ref); +extern Vector_Block *finref_add_fallback_ref(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_ref); extern Boolean obj_with_fin_pool_is_empty(GC *gc); extern Boolean finalizable_obj_pool_is_empty(GC *gc); @@ -86,6 +86,7 @@ extern void gc_clear_weakref_pools(GC *gc); extern Vector_Block *finref_metadata_extend(void); +/* Every place requesting a free vector block in finref should call this function */ inline Vector_Block *finref_get_free_block(GC *gc) { Vector_Block *block = pool_get_entry(gc->finref_metadata->free_pool); @@ -104,7 +105,8 @@ Finref_Metadata *metadata = gc->finref_metadata; assert(!metadata->finalizable_obj_set); - metadata->finalizable_obj_set = pool_get_entry(metadata->free_pool); + metadata->finalizable_obj_set = finref_get_free_block(gc); + assert(metadata->finalizable_obj_set); } /* called after loop of recording finalizable objects */ inline void gc_put_finalizable_objects(GC *gc) @@ -121,7 +123,8 @@ Finref_Metadata *metadata = gc->finref_metadata; assert(!metadata->repset); - metadata->repset = pool_get_entry(metadata->free_pool); + metadata->repset = finref_get_free_block(gc); + assert(metadata->repset); } /* called after loop of recording repointed reference */ inline void finref_put_repset(GC *gc) Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 559012) +++ src/gen/gen.cpp (working copy) @@ -25,6 +25,7 @@ #include "../verify/verify_live_heap.h" #include "../common/space_tuner.h" #include "../common/compressed_ref.h" + #ifdef USE_32BITS_HASHCODE #include "../common/hashcode.h" #endif @@ -231,9 +232,6 @@ space_committed_size((Space*)gc_gen->mos) + space_committed_size((Space*)gc_gen->los); - set_native_finalizer_thread_flag(!IGNORE_FINREF); - set_native_ref_enqueue_thread_flag(!IGNORE_FINREF); - return; } @@ -244,8 +242,8 @@ Space* los = (Space*)gc_gen->los; POINTER_SIZE_INT nos_size = space_committed_size(nos); - POINTER_SIZE_INT mos_size = space_committed_size(nos); - POINTER_SIZE_INT los_size = space_committed_size(nos); + POINTER_SIZE_INT mos_size = space_committed_size(mos); + POINTER_SIZE_INT los_size = space_committed_size(los); void* nos_start = nos->heap_start; void* mos_start = mos->heap_start; @@ -270,11 +268,10 @@ Space* gc_get_nos(GC_Gen* gc){ return (Space*)gc->nos;} Space* gc_get_mos(GC_Gen* gc){ return (Space*)gc->mos;} Space* gc_get_los(GC_Gen* gc){ return (Space*)gc->los;} -Space* gc_get_pos(GC_Gen* gc) { return NULL; } + void gc_set_nos(GC_Gen* gc, Space* nos){ gc->nos = (Fspace*)nos;} void gc_set_mos(GC_Gen* gc, Space* mos){ gc->mos = (Mspace*)mos;} void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;} -void gc_set_pos(GC_Gen* gc, Space* pos) {} void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);} void* nos_alloc(unsigned size, Allocator *allocator){return fspace_alloc(size, allocator);} @@ -296,6 +293,9 @@ else gc->collect_kind = MINOR_COLLECTION; +#ifdef ONLY_SSPACE_IN_HEAP + gc->collect_kind = UNIQUE_SWEEP_COLLECTION; +#endif return; } @@ -372,15 +372,14 @@ Fspace* nos = gc->nos; Lspace* los = gc->los; /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously. - *Or, we must adjust heap size - */ + *Or, we must adjust heap size */ static unsigned int tolerate = 0; POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size; assert(heap_total_size == gc->committed_heap_size); - assert(nos->surviving_size == 0); - POINTER_SIZE_INT heap_surviving_size = mos->surviving_size + los->surviving_size; + assert(nos->last_surviving_size == 0); + POINTER_SIZE_INT heap_surviving_size = (POINTER_SIZE_INT)(mos->period_surviving_size + los->period_surviving_size); assert(heap_total_size > heap_surviving_size); float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size; @@ -428,7 +427,8 @@ } Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ - +static unsigned int mspace_num_used_blocks_before_minor; +static unsigned int mspace_num_used_blocks_after_minor; void gc_gen_reclaim_heap(GC_Gen* gc) { if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); @@ -442,13 +442,28 @@ if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ /* FIXME:: move_object is only useful for nongen_slide_copy */ - gc->mos->move_object = FALSE; + gc->mos->move_object = 0; + /* This is for compute mspace->last_alloced_size */ + + mspace_num_used_blocks_before_minor = mspace->free_block_idx - mspace->first_block_idx; fspace_collection(gc->nos); - gc->mos->move_object = TRUE; + mspace_num_used_blocks_after_minor = mspace->free_block_idx - mspace->first_block_idx; + assert( mspace_num_used_blocks_before_minor <= mspace_num_used_blocks_after_minor ); + mspace->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mspace_num_used_blocks_after_minor - mspace_num_used_blocks_before_minor ); + + /*If the current minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS*/ + if(gc->collect_result != FALSE) + lspace_collection(gc->los); + + gc->mos->move_object = 1; }else{ /* process mos and nos together in one compaction */ + gc->los->move_object = 1; + mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); + + gc->los->move_object = 0; } if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ @@ -463,10 +478,12 @@ gc->collect_kind = FALLBACK_COLLECTION; if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc); - + + gc->los->move_object = 1; mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); - + gc->los->move_object = 0; + IS_FALLBACK_COMPACTION = FALSE; } @@ -478,15 +495,49 @@ if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); - /*Fixme: clear root set here to support verify.*/ + /* FIXME:: clear root set here to support verify. */ #ifdef COMPRESS_REFERENCE gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); #endif - assert(!gc->los->move_object); return; } +void gc_gen_update_space_before_gc(GC_Gen *gc) +{ + /* Update before every GC to avoid the atomic operation in every fspace_alloc_block */ + assert( gc->nos->free_block_idx >= gc->nos->first_block_idx ); + gc->nos->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( gc->nos->free_block_idx - gc->nos->first_block_idx ); + + gc->nos->accumu_alloced_size += gc->nos->last_alloced_size; + gc->los->accumu_alloced_size += gc->los->last_alloced_size; +} + +void gc_gen_update_space_after_gc(GC_Gen *gc) +{ + /* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */ + if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + gc->mos->accumu_alloced_size += gc->mos->last_alloced_size; + /* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated. + * But los->last_alloced_size must be reset, because it is accumulated. */ + gc->los->last_alloced_size = 0; + /* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */ + }else{ + gc->mos->total_alloced_size += gc->mos->accumu_alloced_size; + gc->mos->last_alloced_size = 0; + gc->mos->accumu_alloced_size = 0; + + gc->nos->total_alloced_size += gc->nos->accumu_alloced_size; + gc->nos->last_alloced_size = 0; + gc->nos->accumu_alloced_size = 0; + + gc->los->total_alloced_size += gc->los->accumu_alloced_size; + gc->los->last_alloced_size = 0; + gc->los->accumu_alloced_size = 0; + + } +} + void gc_gen_iterate_heap(GC_Gen *gc) { /** the function is called after stoped the world **/ Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 559012) +++ src/gen/gen.h (working copy) @@ -155,11 +155,10 @@ Space* gc_get_nos(GC_Gen* gc); Space* gc_get_mos(GC_Gen* gc); Space* gc_get_los(GC_Gen* gc); -Space* gc_get_pos(GC_Gen* gc); + void gc_set_nos(GC_Gen* gc, Space* nos); void gc_set_mos(GC_Gen* gc, Space* mos); void gc_set_los(GC_Gen* gc, Space* los); -void gc_set_pos(GC_Gen* gc, Space* pos); unsigned int gc_get_processor_num(GC_Gen* gc); @@ -173,7 +172,10 @@ void gc_gen_assign_free_area_to_mutators(GC_Gen* gc); void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time); - + +void gc_gen_update_space_before_gc(GC_Gen* gc); +void gc_gen_update_space_after_gc(GC_Gen* gc); + void gc_gen_mode_adapt_init(GC_Gen *gc); void gc_gen_iterate_heap(GC_Gen *gc); Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 559012) +++ src/gen/gen_adapt.cpp (working copy) @@ -196,10 +196,11 @@ POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); + assert(nos_free_size == space_committed_size((Space*)fspace)); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE; if(!gc->force_gen_mode){ - /*For major collection:*/ + /*Major collection:*/ if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){ mspace->time_collections += pause_time; @@ -212,8 +213,7 @@ /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){ - POINTER_SIZE_INT major_surviving_size = space_committed_size((Space*)mspace) - mos_free_size; - survive_ratio = (float)major_surviving_size/(float)space_committed_size((Space*)mspace); + survive_ratio = (float)mspace->period_surviving_size/(float)mspace->committed_heap_size; mspace->survive_ratio = survive_ratio; } /*If there is no minor collection at all, we must give mspace expected threshold a reasonable value.*/ @@ -223,11 +223,14 @@ *a conservative and reasonable number to avoid next fall back. *In fallback compaction, the survive_ratio of mspace must be 1.*/ if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) fspace->survive_ratio = 1; - /*For minor collection:*/ - }else{ + + } + /*Minor collection:*/ + else + { /*Give a hint to mini_free_ratio. */ if(fspace->num_collections == 1){ - /*fixme: This is only set for tuning the first warehouse!*/ + /*Fixme: This is only set for tuning the first warehouse!*/ Tslow = pause_time / gc->survive_ratio; SMax = (POINTER_SIZE_INT)((float)(gc->committed_heap_size - gc->los->committed_heap_size) * ( 1 - gc->survive_ratio )); last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; @@ -237,6 +240,9 @@ POINTER_SIZE_INT free_size_threshold; POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size; + /*If the first GC is caused by LOS, mspace->last_alloced_size should be smaller than this minor_surviving_size + *Because the last_total_free_size is not accurate.*/ + if(fspace->num_collections != 1) assert(minor_surviving_size == mspace->last_alloced_size); float k = Tslow * fspace->num_collections/fspace->time_collections; float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); Index: src/los/lspace.cpp =================================================================== --- src/los/lspace.cpp (revision 559012) +++ src/los/lspace.cpp (working copy) @@ -49,7 +49,7 @@ lspace->gc = gc; /*LOS_Shrink:*/ - lspace->move_object = FALSE; + lspace->move_object = 0; /*Treat with free area buddies*/ lspace->free_pool = (Free_Area_Pool*)STD_MALLOC(sizeof(Free_Area_Pool)); @@ -61,7 +61,12 @@ lspace->num_collections = 0; lspace->time_collections = 0; lspace->survive_ratio = 0.5f; - + lspace->last_alloced_size = 0; + lspace->accumu_alloced_size = 0; + lspace->total_alloced_size = 0; + lspace->last_surviving_size = 0; + lspace->period_surviving_size = 0; + gc_set_los((GC_Gen*)gc, (Space*)lspace); p_global_lspace_move_obj = &(lspace->move_object); los_boundary = lspace->heap_end; @@ -104,15 +109,18 @@ void lspace_collection(Lspace* lspace) { - /* heap is marked already, we need only sweep here. */ lspace->num_collections ++; - lspace_reset_after_collection(lspace); - /*When sliding compacting lspace, we don't need to sweep it anymore. - What's more, the assumption that the first word of one KB must be zero when iterating - lspace in that function lspace_get_next_marked_object is not true*/ - if(!lspace->move_object) lspace_sweep(lspace); - else lspace->surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start; - lspace->move_object = FALSE; + + if(!lspace->move_object){ + lspace_reset_for_sweep(lspace); + lspace_sweep(lspace); + }else{ + /* The real action of LOS sliding compaction is done together with MOS compaction. */ + lspace_reset_for_slide(lspace); + /* When sliding compacting lspace, we don't need to sweep it anymore. + * What's more, the assumption that the first word of one KB must be zero when iterating + * lspace in that function lspace_get_next_marked_object is not true */ + } return; } Index: src/los/lspace.h =================================================================== --- src/los/lspace.h (revision 559012) +++ src/los/lspace.h (working copy) @@ -48,10 +48,18 @@ GC* gc; /*LOS_Shrink:This field stands for sliding compact to lspace */ Boolean move_object; - /*For_statistic: size allocated science last time collect los, ie. last major*/ - volatile POINTER_SIZE_INT alloced_size; - /*For_statistic: size survived after lspace_sweep*/ - POINTER_SIZE_INT surviving_size; + + /* Size allocted since last minor collection. */ + volatile uint64 last_alloced_size; + /* Size allocted since last major collection. */ + uint64 accumu_alloced_size; + /* Total size allocated since VM starts. */ + uint64 total_alloced_size; + + /* Size survived from last collection. */ + uint64 last_surviving_size; + /* Size survived after a certain period. */ + uint64 period_surviving_size; /* END of Space --> */ Free_Area_Pool* free_pool; @@ -72,8 +80,7 @@ void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size); void lspace_sliding_compact(Collector* collector, Lspace* lspace); void lspace_compute_object_target(Collector* collector, Lspace* lspace); -void lspace_sweep(Lspace* lspace); -void lspace_reset_after_collection(Lspace* lspace); +void lspace_reset_for_slide(Lspace* lspace); void lspace_collection(Lspace* lspace); inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; } @@ -126,4 +133,49 @@ POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); +inline Partial_Reveal_Object* lspace_get_next_marked_object_by_oi( Lspace* lspace, unsigned int* iterate_index) +{ + POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB; + BOOLEAN reach_heap_end = 0; + unsigned int hash_extend_size = 0; + + while(!reach_heap_end){ + //FIXME: This while shoudl be if, try it! + while(!*((POINTER_SIZE_INT*)next_area_start)){ + assert(((Free_Area*)next_area_start)->size); + next_area_start += ((Free_Area*)next_area_start)->size; + } + if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ + //If there is a living object at this addr, return it, and update iterate_index + +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0; +#endif + + if(obj_is_marked_in_oi((Partial_Reveal_Object*)next_area_start)){ + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size); + *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO); + return (Partial_Reveal_Object*)next_area_start; + //If this is a dead object, go on to find a living one. + }else{ + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)+ hash_extend_size); + next_area_start += obj_size; + } + }else{ + reach_heap_end = 1; + } + } + return NULL; + +} + +inline static Partial_Reveal_Object* lspace_get_first_marked_object_by_oi(Lspace* lspace, unsigned int* mark_bit_idx) +{ + return lspace_get_next_marked_object_by_oi(lspace, mark_bit_idx); +} + +void lspace_reset_for_sweep(Lspace* lspace); +void lspace_sweep(Lspace* lspace); + + #endif /*_LSPACE_H_ */ Index: src/los/lspace_alloc_collect.cpp =================================================================== --- src/los/lspace_alloc_collect.cpp (revision 559012) +++ src/los/lspace_alloc_collect.cpp (working copy) @@ -160,10 +160,10 @@ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); - POINTER_SIZE_INT vold = lspace->alloced_size; - POINTER_SIZE_INT vnew = vold + alloc_size; - while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){ - vold = lspace->alloced_size; + uint64 vold = lspace->last_alloced_size; + uint64 vnew = vold + alloc_size; + while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ + vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; @@ -179,10 +179,10 @@ p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); - POINTER_SIZE_INT vold = lspace->alloced_size; - POINTER_SIZE_INT vnew = vold + alloc_size; - while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){ - vold = lspace->alloced_size; + uint64 vold = lspace->last_alloced_size; + uint64 vnew = vold + alloc_size; + while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ + vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; @@ -308,7 +308,7 @@ return; } -void lspace_reset_after_collection(Lspace* lspace) +void lspace_reset_for_slide(Lspace* lspace) { GC* gc = lspace->gc; Space_Tuner* tuner = gc->tuner; @@ -321,22 +321,15 @@ switch(tuner->kind){ case TRANS_FROM_MOS_TO_LOS:{ - if(lspace->move_object){ - assert(tuner->force_tune); - Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; - lspace->heap_end = (void*)mos_first_block; - assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); - new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size; - Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); - if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); - }else{ - void* origin_end = lspace->heap_end; - lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks); - /*The assumption that the first word of one KB must be zero when iterating lspace in - that function lspace_get_next_marked_object is not true*/ - Free_Area* trans_fa = free_area_new(origin_end, trans_size); - if(trans_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, trans_fa); - } + /*Lspace collection in major collection must move object*/ + assert(lspace->move_object); + //debug_minor_sweep + Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; + lspace->heap_end = (void*)mos_first_block; + assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); + new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size; + Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); + if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); lspace->committed_heap_size += trans_size; break; } @@ -355,47 +348,51 @@ break; } default:{ - if(lspace->move_object){ - assert(tuner->kind == TRANS_NOTHING); - assert(!tuner->tuning_size); - new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; - Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); - if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); - } + assert(lspace->move_object); + assert(tuner->kind == TRANS_NOTHING); + assert(!tuner->tuning_size); + new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; + Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); + if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); break; } } - /*For_statistic los information.*/ - lspace->alloced_size = 0; - lspace->surviving_size = 0; +// lspace->accumu_alloced_size = 0; +// lspace->last_alloced_size = 0; + lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start; + lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size; los_boundary = lspace->heap_end; } + +void lspace_reset_for_sweep(Lspace* lspace) +{ +// lspace->last_alloced_size = 0; + lspace->last_surviving_size = 0; +} + void lspace_sweep(Lspace* lspace) { unsigned int mark_bit_idx = 0; POINTER_SIZE_INT cur_size = 0; void *cur_area_start, *cur_area_end; - /*If it is TRANS_FROM_MOS_TO_LOS now, we must clear the fa alread added in lspace_reset_after_collection*/ free_area_pool_reset(lspace->free_pool); Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; - Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx); + Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object_by_oi(lspace, &mark_bit_idx); if(p_next_obj){ - obj_unmark_in_vt(p_next_obj); - /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked - in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode, - the last time marked object is thought to be already marked and not scanned for this cycle. */ +// obj_unmark_in_vt(p_next_obj); + /*Fixme: This might not be necessary, for there is a bit clearing operation in forward_object->obj_mark_in_oi*/ obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ -unsigned int obj_size = vm_object_size(p_next_obj); + unsigned int obj_size = vm_object_size(p_next_obj); #ifdef USE_32BITS_HASHCODE obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif - lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size); + lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); @@ -416,16 +413,16 @@ /* successfully create an area */ p_prev_obj = p_next_obj; - p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx); + p_next_obj = lspace_get_next_marked_object_by_oi(lspace, &mark_bit_idx); if(p_next_obj){ - obj_unmark_in_vt(p_next_obj); +// obj_unmark_in_vt(p_next_obj); obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ unsigned int obj_size = vm_object_size(p_next_obj); #ifdef USE_32BITS_HASHCODE obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif - lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size); + lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); } #ifdef USE_32BITS_HASHCODE @@ -449,10 +446,6 @@ mark_bit_idx = 0; assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx)); - /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/ - /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/ - lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size; - return; - } + Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 559012) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -35,6 +35,7 @@ { Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); Index: src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- src/mark_compact/los_extention_mark_scan.cpp (revision 559012) +++ src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -1,212 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../common/gc_metadata.h" -#include "../thread/collector.h" -#include "../gen/gen.h" -#include "../finalizer_weakref/finalizer_weakref.h" - -static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) -{ - Partial_Reveal_Object *p_obj = read_slot(p_ref); - if(p_obj == NULL) return; - - if(obj_mark_in_vt(p_obj)){ - collector_tracestack_push(collector, p_obj); - unsigned int obj_size = vm_object_size(p_obj); -#ifdef USE_32BITS_HASHCODE - obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; -#endif - if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))){ - collector->non_los_live_obj_size += obj_size; - collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; - } else { - collector->los_live_obj_size += round_up_to_size(obj_size, KB); - } - } - - return; -} - -static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - vm_notify_obj_alive( (void *)p_obj); - if( !object_has_ref_field(p_obj) ) return; - - REF *p_ref; - - if (object_is_array(p_obj)) { /* scan array object */ - - Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; - unsigned int array_length = array->array_len; - - p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); - - for (unsigned int i = 0; i < array_length; i++) { - scan_slot(collector, p_ref+i); - } - - }else{ /* scan non-array object */ - - unsigned int num_refs = object_ref_field_num(p_obj); - - int* ref_iterator = object_ref_iterator_init(p_obj); - - for(unsigned int i=0; itrace_stack; - while( !vector_stack_is_empty(trace_stack)){ - p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); - scan_object(collector, p_obj); - trace_stack = collector->trace_stack; - } - - return; -} - -/* for marking phase termination detection */ -static volatile unsigned int num_finished_collectors = 0; - -/* NOTE:: Only marking in object header is idempotent. - Originally, we have to mark the object before put it into markstack, to - guarantee there is only one occurrance of an object in markstack. This is to - guarantee there is only one occurrance of a repointed ref slot in repset (they - are put to the set when the object is scanned). If the same object is put to - markstack twice, they will be scanned twice and their ref slots will be recorded twice. - Problem occurs when the ref slot is updated first time with new position, - the second time the value in the ref slot is not the old position as expected. - It needs to read the original obj header for forwarding pointer. With the new value, - it will read something nonsense since the obj is not moved yet. - This can be worked around if we want. - To do this we have to use atomic instruction for marking, which is undesirable. - So we abondoned this design. We no longer use the repset to remember repointed slots -*/ - -void mark_scan_heap_for_space_tune(Collector *collector) -{ - GC* gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ - unsigned int num_active_collectors = gc->num_active_collectors; - atomic_cas32( &num_finished_collectors, 0, num_active_collectors); - - collector->trace_stack = free_task_pool_get_entry(metadata); - - Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); - - /* first step: copy all root objects to mark tasks. - FIXME:: can be done sequentially before coming here to eliminate atomic ops */ - while(root_set){ - POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - REF *p_ref = (REF *)*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object *p_obj = read_slot(p_ref); - /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ - assert(p_obj!=NULL); - /* we have to mark the object before put it into marktask, because - it is possible to have two slots containing a same object. They will - be scanned twice and their ref slots will be recorded twice. Problem - occurs after the ref slot is updated first time with new position - and the second time the value is the ref slot is the old position as expected. - This can be worked around if we want. - */ - if(obj_mark_in_vt(p_obj)){ - collector_tracestack_push(collector, p_obj); - unsigned int obj_size = vm_object_size(p_obj); -#ifdef USE_32BITS_HASHCODE - obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; -#endif - if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){ - collector->non_los_live_obj_size += obj_size; - collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; - } else { - collector->los_live_obj_size += round_up_to_size(obj_size, KB); - } - } - - } - root_set = pool_iterator_next(metadata->gc_rootset_pool); - } - /* put back the last trace_stack task */ - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - - /* second step: iterate over the mark tasks and scan objects */ - /* get a task buf for the mark stack */ - collector->trace_stack = free_task_pool_get_entry(metadata); - -retry: - Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); - - while(mark_task){ - POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; - iter = vector_block_iterator_advance(mark_task,iter); - - /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. - degenerate my stack into mark_task, and grab another mark_task */ - trace_object(collector, p_obj); - } - /* run out one task, put back to the pool and grab another task */ - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - mark_task = pool_get_entry(metadata->mark_task_pool); - } - - /* termination detection. This is also a barrier. - NOTE:: We can simply spin waiting for num_finished_collectors, because each - generated new task would surely be processed by its generating collector eventually. - So code below is only for load balance optimization. */ - atomic_inc32(&num_finished_collectors); - while(num_finished_collectors != num_active_collectors){ - if( !pool_is_empty(metadata->mark_task_pool)){ - atomic_dec32(&num_finished_collectors); - goto retry; - } - } - - /* put back the last mark stack to the free pool */ - mark_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - collector->trace_stack = NULL; - - return; -} - -void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj) -{ - trace_object(collector, (Partial_Reveal_Object *)p_obj); -} Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 559012) +++ src/mark_compact/mspace.cpp (working copy) @@ -71,6 +71,12 @@ mspace->num_collections = 0; mspace->time_collections = 0; mspace->survive_ratio = 0.2f; + mspace->last_alloced_size = 0; + mspace->accumu_alloced_size = 0; + mspace->total_alloced_size = 0; + mspace->last_surviving_size = 0; + mspace->period_surviving_size = 0; + mspace->move_object = TRUE; mspace->gc = gc; Index: src/mark_compact/mspace.h =================================================================== --- src/mark_compact/mspace.h (revision 559012) +++ src/mark_compact/mspace.h (working copy) @@ -36,10 +36,18 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; - /*Size allocted after last collection.*/ - POINTER_SIZE_INT alloced_size; - /*For_statistic: size survived after major*/ - POINTER_SIZE_INT surviving_size; + + /* Size allocted since last minor collection. */ + volatile uint64 last_alloced_size; + /* Size allocted since last major collection. */ + uint64 accumu_alloced_size; + /* Total size allocated since VM starts. */ + uint64 total_alloced_size; + + /* Size survived from last collection. */ + uint64 last_surviving_size; + /* Size survived after a certain period. */ + uint64 period_surviving_size; /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ Index: src/mark_compact/mspace_alloc.cpp =================================================================== --- src/mark_compact/mspace_alloc.cpp (revision 559012) +++ src/mark_compact/mspace_alloc.cpp (working copy) @@ -36,10 +36,9 @@ } /* ok, got one */ Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]); - mspace->alloced_size += GC_BLOCK_SIZE_BYTES; allocator_init_free_block(allocator, alloc_block); - + return TRUE; } Index: src/mark_compact/mspace_collect_compact.cpp =================================================================== --- src/mark_compact/mspace_collect_compact.cpp (revision 559012) +++ src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -54,6 +54,8 @@ } } + +Space* gc_get_nos(GC_Gen* gc); void mspace_reset_after_compaction(Mspace* mspace) { unsigned int old_num_used = mspace->num_used_blocks; @@ -79,9 +81,8 @@ } mspace->num_used_blocks = new_num_used; /*For_statistic mos infomation*/ - mspace->surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES; - mspace->alloced_size = 0; - + mspace->period_surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES; + /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */ for(; i < mspace->num_managed_blocks; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 559012) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -105,6 +105,7 @@ /* current sector is done, let's move it. */ POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; + assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0); curr_block->table[curr_sector] = sector_distance; memmove(dest_sector_addr, src_sector_addr, curr_sector_size); @@ -151,6 +152,7 @@ static volatile unsigned int num_marking_collectors = 0; static volatile unsigned int num_fixing_collectors = 0; static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_restoring_collectors = 0; static volatile unsigned int num_extending_collectors = 0; void move_compact_mspace(Collector* collector) @@ -201,6 +203,7 @@ old_num = atomic_inc32(&num_moving_collectors); if( ++old_num == num_active_collectors ){ /* single thread world */ + if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_moving_collectors++; @@ -225,10 +228,22 @@ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); + if(lspace->move_object) lspace_sliding_compact(collector, lspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); + + /* Pass 4: ************************************************** + restore obj_info */ + atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); + + collector_restore_obj_info(collector); + + atomic_inc32(&num_restoring_collectors); + + while(num_restoring_collectors != num_active_collectors); + /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > fspace->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 559012) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -368,6 +368,7 @@ unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); if(p_obj != p_target_obj){ + assert((((POINTER_SIZE_INT)p_target_obj) % GC_OBJECT_ALIGNMENT) == 0); memmove(p_target_obj, p_obj, obj_size); } set_obj_info(p_target_obj, 0); @@ -466,7 +467,6 @@ old_num = atomic_inc32(&num_repointing_collectors); /*last collector's world here*/ if( ++old_num == num_active_collectors ){ - /*LOS_Shrink: but lspace->move_object could be set individually without shrinking LOS.*/ if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ Index: src/mark_compact/space_tune_mark_scan.cpp =================================================================== --- src/mark_compact/space_tune_mark_scan.cpp (revision 0) +++ src/mark_compact/space_tune_mark_scan.cpp (revision 0) @@ -0,0 +1,213 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../common/gc_metadata.h" +#include "../thread/collector.h" +#include "../gen/gen.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) +{ + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if(p_obj == NULL) return; + + if(obj_mark_in_vt(p_obj)){ + collector_tracestack_push(collector, p_obj); + unsigned int obj_size = vm_object_size(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))){ + collector->non_los_live_obj_size += obj_size; + collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; + } else { + collector->los_live_obj_size += round_up_to_size(obj_size, KB); + } + } + + return; +} + +static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + vm_notify_obj_alive( (void *)p_obj); + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); + if( !object_has_ref_field(p_obj) ) return; + + REF *p_ref; + + if (object_is_array(p_obj)) { /* scan array object */ + + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + + p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + scan_slot(collector, p_ref+i); + } + + }else{ /* scan non-array object */ + + unsigned int num_refs = object_ref_field_num(p_obj); + + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; itrace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); + scan_object(collector, p_obj); + trace_stack = collector->trace_stack; + } + + return; +} + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +/* NOTE:: Only marking in object header is idempotent. + Originally, we have to mark the object before put it into markstack, to + guarantee there is only one occurrance of an object in markstack. This is to + guarantee there is only one occurrance of a repointed ref slot in repset (they + are put to the set when the object is scanned). If the same object is put to + markstack twice, they will be scanned twice and their ref slots will be recorded twice. + Problem occurs when the ref slot is updated first time with new position, + the second time the value in the ref slot is not the old position as expected. + It needs to read the original obj header for forwarding pointer. With the new value, + it will read something nonsense since the obj is not moved yet. + This can be worked around if we want. + To do this we have to use atomic instruction for marking, which is undesirable. + So we abondoned this design. We no longer use the repset to remember repointed slots +*/ + +void mark_scan_heap_for_space_tune(Collector *collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF *p_ref = (REF *)*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object *p_obj = read_slot(p_ref); + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(p_obj!=NULL); + /* we have to mark the object before put it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + if(obj_mark_in_vt(p_obj)){ + collector_tracestack_push(collector, p_obj); + unsigned int obj_size = vm_object_size(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){ + collector->non_los_live_obj_size += obj_size; + collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; + } else { + collector->los_live_obj_size += round_up_to_size(obj_size, KB); + } + } + + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_obj); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + return; +} + +void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj) +{ + trace_object(collector, (Partial_Reveal_Object *)p_obj); +} Index: src/mark_sweep/gc_ms.cpp =================================================================== --- src/mark_sweep/gc_ms.cpp (revision 0) +++ src/mark_sweep/gc_ms.cpp (revision 0) @@ -0,0 +1,88 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../common/gc_common.h" + +#ifdef ONLY_SSPACE_IN_HEAP + +#include "gc_ms.h" +#include "port_sysinfo.h" + +#include "../finalizer_weakref/finalizer_weakref.h" +#include "../common/compressed_ref.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif + +static void gc_ms_get_system_info(GC_MS *gc_ms) +{ + gc_ms->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0]; + gc_ms->_num_processors = port_CPUs_number(); + gc_ms->_system_alloc_unit = vm_get_system_alloc_unit(); + SPACE_ALLOC_UNIT = max(gc_ms->_system_alloc_unit, GC_BLOCK_SIZE_BYTES); +} + +void gc_ms_initialize(GC_MS *gc_ms, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size) +{ + assert(gc_ms); + gc_ms_get_system_info(gc_ms); + + max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT); + min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT); + assert(max_heap_size <= max_heap_size_bytes); + assert(max_heap_size >= min_heap_size_bytes); + + void *sspace_base; + sspace_base = vm_reserve_mem(0, max_heap_size); + sspace_initialize((GC*)gc_ms, sspace_base, max_heap_size, max_heap_size); + + HEAP_NULL = (POINTER_SIZE_INT)sspace_base; + + gc_ms->heap_start = sspace_base; + gc_ms->heap_end = (void*)((POINTER_SIZE_INT)sspace_base + max_heap_size); + gc_ms->reserved_heap_size = max_heap_size; + gc_ms->committed_heap_size = max_heap_size; + gc_ms->num_collections = 0; + gc_ms->time_collections = 0; +} + +void gc_ms_destruct(GC_MS *gc_ms) +{ + Sspace *sspace = gc_ms->sspace; + void *sspace_start = sspace->heap_start; + sspace_destruct(sspace); + gc_ms->sspace = NULL; + vm_unmap_mem(sspace_start, space_committed_size((Space*)sspace)); +} + +void gc_ms_reclaim_heap(GC_MS *gc) +{ + sspace_collection(gc_ms_get_sspace(gc)); + + /* FIXME:: clear root set here to support verify */ +#ifdef COMPRESS_REFERENCE + gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); +#endif +} + +void gc_ms_iterate_heap(GC_MS *gc) +{ +} + +unsigned int gc_ms_get_processor_num(GC_MS *gc) +{ return gc->_num_processors; } + +#endif // ONLY_SSPACE_IN_HEAP Index: src/mark_sweep/gc_ms.h =================================================================== --- src/mark_sweep/gc_ms.h (revision 0) +++ src/mark_sweep/gc_ms.h (revision 0) @@ -0,0 +1,108 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _GC_MS_H_ +#define _GC_MS_H_ + +#ifdef ONLY_SSPACE_IN_HEAP + +#include "sspace.h" + + +/* heap size limit is not interesting. only for manual tuning purpose */ +extern POINTER_SIZE_INT min_heap_size_bytes; +extern POINTER_SIZE_INT max_heap_size_bytes; + +typedef struct GC_MS { + /* <-- First couple of fields overloaded as GC */ + void *heap_start; + void *heap_end; + POINTER_SIZE_INT reserved_heap_size; + POINTER_SIZE_INT committed_heap_size; + unsigned int num_collections; + int64 time_collections; + float survive_ratio; + + /* mutation related info */ + Mutator *mutator_list; + SpinLock mutator_list_lock; + unsigned int num_mutators; + + /* collection related info */ + Collector **collectors; + unsigned int num_collectors; + unsigned int num_active_collectors; /* not all collectors are working */ + + /* metadata is the pool for rootset, markstack, etc. */ + GC_Metadata *metadata; + Finref_Metadata *finref_metadata; + + unsigned int collect_kind; /* MAJOR or MINOR */ + unsigned int last_collect_kind; + unsigned int cause; /*GC_CAUSE_LOS_IS_FULL, GC_CAUSE_NOS_IS_FULL, or GC_CAUSE_RUNTIME_FORCE_GC*/ + Boolean collect_result; /* succeed or fail */ + + Boolean generate_barrier; + + /* FIXME:: this is wrong! root_set belongs to mutator */ + Vector_Block *root_set; + Vector_Block *uncompressed_root_set; + + //For_LOS_extend + Space_Tuner *tuner; + /* END of GC --> */ + + Sspace *sspace; + + /* system info */ + unsigned int _system_alloc_unit; + unsigned int _machine_page_size_bytes; + unsigned int _num_processors; + +} GC_MS; + +////////////////////////////////////////////////////////////////////////////////////////// + +inline void *gc_ms_fast_alloc(unsigned size, Allocator *allocator) +{ return sspace_thread_local_alloc(size, allocator); } + +inline void *gc_ms_alloc(unsigned size, Allocator *allocator) +{ return sspace_alloc(size, allocator); } + +inline Sspace *gc_ms_get_sspace(GC_MS *gc) +{ return gc->sspace; } + +inline void gc_ms_set_sspace(GC_MS *gc, Sspace *sspace) +{ gc->sspace = sspace; } + +inline POINTER_SIZE_INT gc_ms_free_memory_size(GC_MS *gc) +{ return sspace_free_memory_size(gc_ms_get_sspace(gc)); } + +inline POINTER_SIZE_INT gc_ms_total_memory_size(GC_MS *gc) +{ return space_committed_size((Space*)gc_ms_get_sspace(gc)); } + +///////////////////////////////////////////////////////////////////////////////////////// + +void gc_ms_initialize(GC_MS *gc, POINTER_SIZE_INT initial_heap_size, POINTER_SIZE_INT final_heap_size); +void gc_ms_destruct(GC_MS *gc); +void gc_ms_reclaim_heap(GC_MS *gc); +void gc_ms_iterate_heap(GC_MS *gc); +unsigned int gc_ms_get_processor_num(GC_MS *gc); + + +#endif // ONLY_SSPACE_IN_HEAP + +#endif // _GC_MS_H_ Index: src/mark_sweep/sspace.cpp =================================================================== --- src/mark_sweep/sspace.cpp (revision 559012) +++ src/mark_sweep/sspace.cpp (working copy) @@ -16,9 +16,9 @@ #include "sspace.h" #include "sspace_chunk.h" +#include "sspace_verify.h" +#include "gc_ms.h" #include "../gen/gen.h" -#include "../common/gc_space.h" -#include "sspace_verify.h" struct GC_Gen; @@ -56,8 +56,13 @@ sspace->gc = gc; sspace_init_chunks(sspace); - - gc_set_pos((GC_Gen*)gc, (Space*)sspace); + +#ifdef ONLY_SSPACE_IN_HEAP + gc_ms_set_sspace((GC_MS*)gc, sspace); +#else + gc_set_mos((GC_Gen*)gc, (Space*)sspace); +#endif + #ifdef SSPACE_VERIFY sspace_verify_init(gc); #endif @@ -73,15 +78,68 @@ STD_FREE(sspace); } -void mutator_init_small_chunks(Mutator *mutator) +void allocator_init_local_chunks(Allocator *allocator) { - unsigned int size = sizeof(Chunk_Header*) * (SMALL_LOCAL_CHUNK_NUM + MEDIUM_LOCAL_CHUNK_NUM); - Chunk_Header **chunks = (Chunk_Header**)STD_MALLOC(size); - memset(chunks, 0, size); - mutator->small_chunks = chunks; - mutator->medium_chunks = chunks + SMALL_LOCAL_CHUNK_NUM; + Sspace *sspace = gc_get_sspace(allocator->gc); + Size_Segment **size_segs = sspace->size_segments; + + /* Alloc mem for size segments (Chunk_Header**) */ + unsigned int seg_size = sizeof(Chunk_Header**) * SIZE_SEGMENT_NUM; + Chunk_Header ***local_chunks = (Chunk_Header***)STD_MALLOC(seg_size); + memset(local_chunks, 0, seg_size); + + /* Alloc mem for local chunk pointers */ + unsigned int chunk_ptr_size = 0; + for(unsigned int i = SIZE_SEGMENT_NUM; i--;){ + if(size_segs[i]->local_alloc){ + chunk_ptr_size += size_segs[i]->chunk_num; + } + } + chunk_ptr_size *= sizeof(Chunk_Header*); + Chunk_Header **chunk_ptrs = (Chunk_Header**)STD_MALLOC(chunk_ptr_size); + memset(chunk_ptrs, 0, chunk_ptr_size); + + for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){ + if(size_segs[i]->local_alloc){ + local_chunks[i] = chunk_ptrs; + chunk_ptrs += size_segs[i]->chunk_num; + } + } + + allocator->local_chunks = local_chunks; } +void allocactor_destruct_local_chunks(Allocator *allocator) +{ + Sspace *sspace = gc_get_sspace(allocator->gc); + Size_Segment **size_segs = sspace->size_segments; + Chunk_Header ***local_chunks = allocator->local_chunks; + Chunk_Header **chunk_ptrs = NULL; + unsigned int chunk_ptr_num = 0; + + /* Find local chunk pointers' head and their number */ + for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){ + if(size_segs[i]->local_alloc){ + chunk_ptr_num += size_segs[i]->chunk_num; + assert(local_chunks[i]); + if(!chunk_ptrs) + chunk_ptrs = local_chunks[i]; + } + } + + /* Put local pfc to the according pools */ + for(unsigned int i = 0; i < chunk_ptr_num; ++i){ + if(chunk_ptrs[i]) + sspace_put_pfc(sspace, chunk_ptrs[i]); + } + + /* Free mem for local chunk pointers */ + STD_FREE(chunk_ptrs); + + /* Free mem for size segments (Chunk_Header**) */ + STD_FREE(local_chunks); +} + extern void mark_sweep_sspace(Collector *collector); void sspace_collection(Sspace *sspace) Index: src/mark_sweep/sspace.h =================================================================== --- src/mark_sweep/sspace.h (revision 559012) +++ src/mark_sweep/sspace.h (working copy) @@ -26,8 +26,7 @@ * The sweep space accomodates objects collected by mark-sweep */ -#define ONLY_SSPACE_IN_HEAP - +struct Size_Segment; struct Free_Chunk_List; typedef struct Sspace { @@ -42,15 +41,23 @@ unsigned int collect_algorithm; GC *gc; Boolean move_object; - /* Size allocted after last collection. Not available in fspace now. */ - unsigned int alloced_size; - /* For_statistic: not available now for fspace */ - unsigned int surviving_size; + + /* Size allocted since last minor collection. */ + volatile POINTER_SIZE_INT last_alloced_size; + /* Size allocted since last major collection. */ + volatile POINTER_SIZE_INT accumu_alloced_size; + /* Total size allocated since VM starts. */ + volatile POINTER_SIZE_INT total_alloced_size; + + /* Size survived from last collection. */ + POINTER_SIZE_INT last_surviving_size; + /* Size survived after a certain period. */ + POINTER_SIZE_INT period_surviving_size; + /* END of Space --> */ - Pool **small_pfc_pools; - Pool **medium_pfc_pools; - Pool **large_pfc_pools; + Size_Segment **size_segments; + Pool ***pfc_pools; Free_Chunk_List *aligned_free_chunk_lists; Free_Chunk_List *unaligned_free_chunk_lists; Free_Chunk_List *hyper_free_chunk_list; @@ -59,16 +66,24 @@ void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size); void sspace_destruct(Sspace *sspace); -void *sspace_fast_alloc(unsigned size, Allocator *allocator); +void *sspace_thread_local_alloc(unsigned size, Allocator *allocator); void *sspace_alloc(unsigned size, Allocator *allocator); void sspace_reset_for_allocation(Sspace *sspace); void sspace_collection(Sspace *sspace); -void mutator_init_small_chunks(Mutator *mutator); +void allocator_init_local_chunks(Allocator *allocator); +void allocactor_destruct_local_chunks(Allocator *allocator); void collector_init_free_chunk_list(Collector *collector); POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace); + +#ifndef ONLY_SSPACE_IN_HEAP +#define gc_get_sspace(gc) ((Sspace*)gc_get_mos((GC_Gen*)(gc))) +#else +#define gc_get_sspace(gc) (gc_ms_get_sspace((GC_MS*)(gc))); +#endif + #endif // _SWEEP_SPACE_H_ Index: src/mark_sweep/sspace_alloc.cpp =================================================================== --- src/mark_sweep/sspace_alloc.cpp (revision 559012) +++ src/mark_sweep/sspace_alloc.cpp (working copy) @@ -17,6 +17,7 @@ #include "sspace.h" #include "sspace_chunk.h" #include "sspace_mark_sweep.h" +#include "gc_ms.h" #include "../gen/gen.h" static Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index) @@ -105,9 +106,9 @@ } -/* 1. No need of synchronization. This is a mutator local chunk no matter it is a small or medium obj chunk. +/* 1. No need of synchronization. This is a allocator local chunk no matter it is a small or medium obj chunk. * 2. If this chunk runs out of space, clear the chunk pointer. - * So it is important to give an argument which is a local chunk pointer of a mutator while invoking this func. + * So it is important to give an argument which is a local chunk pointer of a allocator while invoking this func. */ static void *alloc_in_chunk(Chunk_Header* &chunk) { @@ -132,143 +133,107 @@ } /* alloc small without-fin object in sspace without getting new free chunk */ -void *sspace_fast_alloc(unsigned size, Allocator *allocator) +void *sspace_thread_local_alloc(unsigned size, Allocator *allocator) { if(size > SUPER_OBJ_THRESHOLD) return NULL; - if(size <= MEDIUM_OBJ_THRESHOLD){ /* small object */ - size = SMALL_SIZE_ROUNDUP(size); - Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks; - unsigned int index = SMALL_SIZE_TO_INDEX(size); - - if(!small_chunks[index]){ - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - Chunk_Header *chunk = sspace_get_small_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_small_pfc(sspace, index); + Sspace *sspace = gc_get_sspace(allocator->gc); + void *p_obj = NULL; + + unsigned int seg_index = 0; + Size_Segment *size_seg = sspace->size_segments[0]; + + for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg) + if(size <= size_seg->size_max) break; + assert(seg_index < SIZE_SEGMENT_NUM); + + size = NORMAL_SIZE_ROUNDUP(size, size_seg); + unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg); + Boolean local_alloc = size_seg->local_alloc; + Chunk_Header *chunk = NULL; + + if(local_alloc){ + Chunk_Header **chunks = allocator->local_chunks[seg_index]; + chunk = chunks[index]; + if(!chunk){ + chunk = sspace_get_pfc(sspace, seg_index, index); + //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index); if(!chunk) return NULL; - small_chunks[index] = chunk; + chunk->status |= CHUNK_IN_USE; + chunks[index] = chunk; } - return alloc_in_chunk(small_chunks[index]); - } else if(size <= LARGE_OBJ_THRESHOLD){ /* medium object */ - size = MEDIUM_SIZE_ROUNDUP(size); - Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks; - unsigned int index = MEDIUM_SIZE_TO_INDEX(size); - - if(!medium_chunks[index]){ - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_medium_pfc(sspace, index); - if(!chunk) return NULL; - medium_chunks[index] = chunk; - } - return alloc_in_chunk(medium_chunks[index]); - } else { /* large object */ - assert(size <= SUPER_OBJ_THRESHOLD); - size = LARGE_SIZE_ROUNDUP(size); - unsigned int index = LARGE_SIZE_TO_INDEX(size); - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - Chunk_Header *chunk = sspace_get_large_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_large_pfc(sspace, index); + p_obj = alloc_in_chunk(chunks[index]); + } else { + chunk = sspace_get_pfc(sspace, seg_index, index); + //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index); if(!chunk) return NULL; - void *p_obj = alloc_in_chunk(chunk); + p_obj = alloc_in_chunk(chunk); if(chunk) - sspace_put_large_pfc(sspace, chunk, index); - return p_obj; + sspace_put_pfc(sspace, chunk); } -} + + assert(p_obj); -static void *alloc_small_obj(unsigned size, Allocator *allocator) +#ifdef SSPACE_ALLOC_INFO + sspace_alloc_info(size); +#endif +#ifdef SSPACE_VERIFY + sspace_verify_alloc(p_obj, size); +#endif + + return p_obj; +} +static void *sspace_alloc_normal_obj(Sspace *sspace, unsigned size, Allocator *allocator) { - assert(size <= MEDIUM_OBJ_THRESHOLD); - assert(!(size & SMALL_GRANULARITY_LOW_MASK)); + void *p_obj = NULL; - Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks; - unsigned int index = SMALL_SIZE_TO_INDEX(size); - if(!small_chunks[index]){ - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - Chunk_Header *chunk = sspace_get_small_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_small_pfc(sspace, index); + unsigned int seg_index = 0; + Size_Segment *size_seg = sspace->size_segments[0]; + + for(; seg_index < SIZE_SEGMENT_NUM; ++seg_index, ++size_seg) + if(size <= size_seg->size_max) break; + assert(seg_index < SIZE_SEGMENT_NUM); + + size = NORMAL_SIZE_ROUNDUP(size, size_seg); + unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg); + Boolean local_alloc = size_seg->local_alloc; + Chunk_Header *chunk = NULL; + + if(local_alloc){ + Chunk_Header **chunks = allocator->local_chunks[seg_index]; + chunk = chunks[index]; if(!chunk){ - chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace); - if(chunk){ - normal_chunk_init(chunk, size); - } else { - /*chunk = sspace_steal_small_pfc(sspace, index); - if(!chunk)*/ return NULL; + chunk = sspace_get_pfc(sspace, seg_index, index); + if(!chunk){ + chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace); + if(chunk) normal_chunk_init(chunk, size); } + //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index); + if(!chunk) return NULL; + chunk->status |= CHUNK_IN_USE; + chunks[index] = chunk; } - chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL; - small_chunks[index] = chunk; - } - - return alloc_in_chunk(small_chunks[index]); -} - -static void *alloc_medium_obj(unsigned size, Allocator *allocator) -{ - assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD)); - assert(!(size & MEDIUM_GRANULARITY_LOW_MASK)); - - Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks; - unsigned int index = MEDIUM_SIZE_TO_INDEX(size); - if(!medium_chunks[index]){ - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_medium_pfc(sspace, index); + p_obj = alloc_in_chunk(chunks[index]); + } else { + chunk = sspace_get_pfc(sspace, seg_index, index); if(!chunk){ chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace); - if(chunk){ - normal_chunk_init(chunk, size); - } else { - /*chunk = sspace_steal_medium_pfc(sspace, index); - if(!chunk) */return NULL; - } + if(chunk) normal_chunk_init(chunk, size); } - chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL; - medium_chunks[index] = chunk; + //if(!chunk) chunk = sspace_steal_pfc(sspace, seg_index, index); + if(!chunk) return NULL; + p_obj = alloc_in_chunk(chunk); + if(chunk) + sspace_put_pfc(sspace, chunk); } - return alloc_in_chunk(medium_chunks[index]); -} - -/* FIXME:: this is a simple version. It may return NULL while there are still pfc in pool put by other mutators */ -static void *alloc_large_obj(unsigned size, Allocator *allocator) -{ - assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD)); - assert(!(size & LARGE_GRANULARITY_LOW_MASK)); - - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); - unsigned int index = LARGE_SIZE_TO_INDEX(size); - Chunk_Header *chunk = sspace_get_large_pfc(sspace, index); - //if(!chunk) - //chunk = sspace_steal_large_pfc(sspace, index); - if(!chunk){ - chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace); - if(chunk){ - normal_chunk_init(chunk, size); - } else { - /*chunk = sspace_steal_large_pfc(sspace, index); - if(!chunk)*/ return NULL; - } - } - chunk->status |= CHUNK_NORMAL; - - void *p_obj = alloc_in_chunk(chunk); - if(chunk) - sspace_put_large_pfc(sspace, chunk, index); return p_obj; } -static void *alloc_super_obj(unsigned size, Allocator *allocator) +static void *sspace_alloc_super_obj(Sspace *sspace, unsigned size, Allocator *allocator) { assert(size > SUPER_OBJ_THRESHOLD); - - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc); + unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size); assert(chunk_size > SUPER_OBJ_THRESHOLD); assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK)); @@ -281,7 +246,6 @@ if(!chunk) return NULL; abnormal_chunk_init(chunk, chunk_size, size); - chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL; chunk->table[0] = cur_alloc_color; set_super_obj_mask(chunk->base); assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK); @@ -291,14 +255,22 @@ static void *sspace_try_alloc(unsigned size, Allocator *allocator) { - if(size <= MEDIUM_OBJ_THRESHOLD) - return alloc_small_obj(SMALL_SIZE_ROUNDUP(size), allocator); - else if(size <= LARGE_OBJ_THRESHOLD) - return alloc_medium_obj(MEDIUM_SIZE_ROUNDUP(size), allocator); - else if(size <= SUPER_OBJ_THRESHOLD) - return alloc_large_obj(LARGE_SIZE_ROUNDUP(size), allocator); + Sspace *sspace = gc_get_sspace(allocator->gc); + void *p_obj = NULL; + + if(size <= SUPER_OBJ_THRESHOLD) + p_obj = sspace_alloc_normal_obj(sspace, size, allocator); else - return alloc_super_obj(size, allocator); + p_obj = sspace_alloc_super_obj(sspace, size, allocator); + +#ifdef SSPACE_ALLOC_INFO + if(p_obj) sspace_alloc_info(size); +#endif +#ifdef SSPACE_VERIFY + if(p_obj) sspace_verify_alloc(p_obj, size); +#endif + + return p_obj; } /* FIXME:: the collection should be seperated from the alloation */ Index: src/mark_sweep/sspace_chunk.cpp =================================================================== --- src/mark_sweep/sspace_chunk.cpp (revision 559012) +++ src/mark_sweep/sspace_chunk.cpp (working copy) @@ -16,63 +16,68 @@ #include "sspace_chunk.h" -/* PFC stands for partially free chunk */ -#define SMALL_PFC_POOL_NUM SMALL_LOCAL_CHUNK_NUM -#define MEDIUM_PFC_POOL_NUM MEDIUM_LOCAL_CHUNK_NUM -#define LARGE_PFC_POOL_NUM ((SUPER_OBJ_THRESHOLD - LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS) #define NUM_ALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT) #define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS) - /* PFC stands for partially free chunk */ -static Pool *small_pfc_pools[SMALL_PFC_POOL_NUM]; -static Pool *medium_pfc_pools[MEDIUM_PFC_POOL_NUM]; -static Pool *large_pfc_pools[LARGE_PFC_POOL_NUM]; +static Size_Segment *size_segments[SIZE_SEGMENT_NUM]; +static Pool **pfc_pools[SIZE_SEGMENT_NUM]; +static Boolean *pfc_steal_flags[SIZE_SEGMENT_NUM]; + static Free_Chunk_List aligned_free_chunk_lists[NUM_ALIGNED_FREE_CHUNK_BUCKET]; static Free_Chunk_List unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET]; static Free_Chunk_List hyper_free_chunk_list; -static Boolean small_pfc_steal_flags[SMALL_PFC_POOL_NUM]; -static Boolean medium_pfc_steal_flags[MEDIUM_PFC_POOL_NUM]; -static Boolean large_pfc_steal_flags[LARGE_PFC_POOL_NUM]; +static void init_size_segment(Size_Segment *seg, unsigned int size_min, unsigned int size_max, unsigned int gran_shift_bits, Boolean local_alloc) +{ + seg->size_min = size_min; + seg->size_max = size_max; + seg->local_alloc = local_alloc; + seg->chunk_num = (seg->size_max - seg->size_min) >> gran_shift_bits; + seg->gran_shift_bits = gran_shift_bits; + seg->granularity = (POINTER_SIZE_INT)(1 << gran_shift_bits); + seg->gran_low_mask = seg->granularity - 1; + seg->gran_high_mask = ~seg->gran_low_mask; +} + void sspace_init_chunks(Sspace *sspace) { - unsigned int i; + unsigned int i, j; - /* Init small obj partially free chunk pools */ - for(i=SMALL_PFC_POOL_NUM; i--;){ - small_pfc_steal_flags[i] = FALSE; - small_pfc_pools[i] = sync_pool_create(); + /* Init size segments */ + Size_Segment *size_seg_start = (Size_Segment*)STD_MALLOC(sizeof(Size_Segment) * SIZE_SEGMENT_NUM); + for(i = SIZE_SEGMENT_NUM; i--;){ + size_segments[i] = size_seg_start + i; + size_segments[i]->seg_index = i; } + init_size_segment(size_segments[0], 0, MEDIUM_OBJ_THRESHOLD, SMALL_GRANULARITY_BITS, SMALL_IS_LOCAL_ALLOC); + init_size_segment(size_segments[1], MEDIUM_OBJ_THRESHOLD, LARGE_OBJ_THRESHOLD, MEDIUM_GRANULARITY_BITS, MEDIUM_IS_LOCAL_ALLOC); + init_size_segment(size_segments[2], LARGE_OBJ_THRESHOLD, SUPER_OBJ_THRESHOLD, LARGE_GRANULARITY_BITS, LARGE_IS_LOCAL_ALLOC); - /* Init medium obj partially free chunk pools */ - for(i=MEDIUM_PFC_POOL_NUM; i--;){ - medium_pfc_steal_flags[i] = FALSE; - medium_pfc_pools[i] = sync_pool_create(); + /* Init partially free chunk pools */ + for(i = SIZE_SEGMENT_NUM; i--;){ + pfc_pools[i] = (Pool**)STD_MALLOC(sizeof(Pool*) * size_segments[i]->chunk_num); + pfc_steal_flags[i] = (Boolean*)STD_MALLOC(sizeof(Boolean) * size_segments[i]->chunk_num); + for(j=size_segments[i]->chunk_num; j--;){ + pfc_pools[i][j] = sync_pool_create(); + pfc_steal_flags[i][j] = FALSE; + } } - /* Init large obj partially free chunk pools */ - for(i=LARGE_PFC_POOL_NUM; i--;){ - large_pfc_steal_flags[i] = FALSE; - large_pfc_pools[i] = sync_pool_create(); - } - /* Init aligned free chunk lists */ - for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;) + for(i = NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;) free_chunk_list_init(&aligned_free_chunk_lists[i]); /* Init nonaligned free chunk lists */ - for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;) + for(i = NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;) free_chunk_list_init(&unaligned_free_chunk_lists[i]); /* Init super free chunk lists */ free_chunk_list_init(&hyper_free_chunk_list); - - /* Init Sspace struct's chunk fields */ - sspace->small_pfc_pools = small_pfc_pools; - sspace->medium_pfc_pools = medium_pfc_pools; - sspace->large_pfc_pools = large_pfc_pools; + + sspace->size_segments = size_segments; + sspace->pfc_pools = pfc_pools; sspace->aligned_free_chunk_lists = aligned_free_chunk_lists; sspace->unaligned_free_chunk_lists = unaligned_free_chunk_lists; sspace->hyper_free_chunk_list = &hyper_free_chunk_list; @@ -85,7 +90,7 @@ sspace_put_free_chunk(sspace, free_chunk); } -static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, unsigned int &steal_flag) +static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, Boolean &steal_flag) { Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pool); while(chunk){ @@ -105,31 +110,18 @@ void sspace_clear_chunk_list(GC *gc) { - unsigned int i; + unsigned int i, j; unsigned int collector_num = gc->num_collectors; - unsigned int steal_threshold; + unsigned int steal_threshold = collector_num << PFC_STEAL_THRESHOLD; - steal_threshold = collector_num << SMALL_PFC_STEAL_THRESHOLD; - for(i=SMALL_PFC_POOL_NUM; i--;){ - Pool *pool = small_pfc_pools[i]; - pfc_pool_set_steal_flag(pool, steal_threshold, small_pfc_steal_flags[i]); - empty_pool(pool); + for(i = SIZE_SEGMENT_NUM; i--;){ + for(j = size_segments[i]->chunk_num; j--;){ + Pool *pool = pfc_pools[i][j]; + pfc_pool_set_steal_flag(pool, steal_threshold, pfc_steal_flags[i][j]); + empty_pool(pool); + } } - steal_threshold = collector_num << MEDIUM_PFC_STEAL_THRESHOLD; - for(i=MEDIUM_PFC_POOL_NUM; i--;){ - Pool *pool = medium_pfc_pools[i]; - pfc_pool_set_steal_flag(pool, steal_threshold, medium_pfc_steal_flags[i]); - empty_pool(pool); - } - - steal_threshold = collector_num << LARGE_PFC_STEAL_THRESHOLD; - for(i=LARGE_PFC_POOL_NUM; i--;){ - Pool *pool = large_pfc_pools[i]; - pfc_pool_set_steal_flag(pool, steal_threshold, large_pfc_steal_flags[i]); - empty_pool(pool); - } - for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;) free_chunk_list_clear(&aligned_free_chunk_lists[i]); @@ -141,12 +133,17 @@ /* release small obj chunks of each mutator */ Mutator *mutator = gc->mutator_list; while(mutator){ - Chunk_Header **chunks = mutator->small_chunks; - for(i=SMALL_LOCAL_CHUNK_NUM; i--;) - chunks[i] = NULL; - chunks = mutator->medium_chunks; - for(i=MEDIUM_LOCAL_CHUNK_NUM; i--;) - chunks[i] = NULL; + Chunk_Header ***local_chunks = mutator->local_chunks; + for(i = SIZE_SEGMENT_NUM; i--;){ + if(!size_segments[i]->local_alloc){ + assert(!local_chunks[i]); + continue; + } + Chunk_Header **chunks = local_chunks[i]; + assert(chunks); + for(j = size_segments[i]->chunk_num; j--;) + chunks[j] = NULL; + } mutator = mutator->next; } } @@ -377,42 +374,19 @@ #define min_value(x, y) (((x) < (y)) ? (x) : (y)) -Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index) +Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index) { + Size_Segment *size_seg = sspace->size_segments[seg_index]; Chunk_Header *pfc = NULL; - unsigned int max_index = min_value(index + SMALL_PFC_STEAL_NUM + 1, SMALL_PFC_POOL_NUM); + unsigned int max_index = min_value(index + PFC_STEAL_NUM + 1, size_seg->chunk_num); ++index; for(; index < max_index; ++index){ - if(!small_pfc_steal_flags[index]) continue; - pfc = sspace_get_small_pfc(sspace, index); + if(!pfc_steal_flags[seg_index][index]) continue; + pfc = sspace_get_pfc(sspace, seg_index, index); if(pfc) return pfc; } return NULL; } -Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index) -{ - Chunk_Header *pfc = NULL; - unsigned int max_index = min_value(index + MEDIUM_PFC_STEAL_NUM + 1, MEDIUM_PFC_POOL_NUM); - ++index; - for(; index < max_index; ++index){ - if(!medium_pfc_steal_flags[index]) continue; - pfc = sspace_get_medium_pfc(sspace, index); - if(pfc) return pfc; - } - return NULL; -} -Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index) -{ - Chunk_Header *pfc = NULL; - unsigned int max_index = min_value(index + LARGE_PFC_STEAL_NUM + 1, LARGE_PFC_POOL_NUM); - ++index; - for(; index < max_index; ++index){ - if(!large_pfc_steal_flags[index]) continue; - pfc = sspace_get_large_pfc(sspace, index); - if(pfc) return pfc; - } - return NULL; -} /* Because this computation doesn't use lock, its result is not accurate. And it is enough. */ POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace) @@ -479,43 +453,28 @@ return live_num; } -enum Obj_Type { - SMALL_OBJ, - MEDIUM_OBJ, - LARGE_OBJ -}; -static unsigned int index_to_size(unsigned int index, Obj_Type type) +static void pfc_pools_info(Sspace *sspace, Boolean before_gc) { - if(type == SMALL_OBJ) - return SMALL_INDEX_TO_SIZE(index); - if(type == MEDIUM_OBJ) - return MEDIUM_INDEX_TO_SIZE(index); - assert(type == LARGE_OBJ); - return LARGE_INDEX_TO_SIZE(index); -} - -static void pfc_pools_info(Sspace *sspace, Pool **pools, unsigned int pool_num, Obj_Type type, Boolean before_gc) -{ - unsigned int index; - - for(index = 0; index < pool_num; ++index){ - Pool *pool = pools[index]; - Chunk_Header *chunk = NULL; - unsigned int chunk_counter = 0; - unsigned int slot_num = 0; - unsigned int live_num = 0; - pool_iterator_init(pool); - while(chunk = (Chunk_Header*)pool_iterator_next(pool)){ - ++chunk_counter; - slot_num += chunk->slot_num; - live_num += pfc_info(chunk, before_gc); + for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){ + for(unsigned int j = 0; j < size_segments[i]->chunk_num; ++j){ + Pool *pool = pfc_pools[i][j]; + Chunk_Header *chunk = NULL; + unsigned int chunk_counter = 0; + unsigned int slot_num = 0; + unsigned int live_num = 0; + pool_iterator_init(pool); + while(chunk = (Chunk_Header*)pool_iterator_next(pool)){ + ++chunk_counter; + slot_num += chunk->slot_num; + live_num += pfc_info(chunk, before_gc); + } + if(slot_num){ + printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segments[i]), chunk_counter, live_num, slot_num, (float)live_num/slot_num); + assert(live_num < slot_num); + free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segments[i]) * (slot_num-live_num); + assert(free_mem_size < sspace->committed_heap_size); + } } - if(slot_num){ - printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", index_to_size(index, type), chunk_counter, live_num, slot_num, (float)live_num/slot_num); - assert(live_num < slot_num); - free_mem_size += index_to_size(index, type) * (slot_num-live_num); - assert(free_mem_size < sspace->committed_heap_size); - } } } @@ -554,15 +513,9 @@ { if(!before_gc) return; - printf("\n\nSMALL PFC INFO:\n\n"); - pfc_pools_info(sspace, small_pfc_pools, SMALL_PFC_POOL_NUM, SMALL_OBJ, before_gc); + printf("\n\nPFC INFO:\n\n"); + pfc_pools_info(sspace, before_gc); - printf("\n\nMEDIUM PFC INFO:\n\n"); - pfc_pools_info(sspace, medium_pfc_pools, MEDIUM_PFC_POOL_NUM, MEDIUM_OBJ, before_gc); - - printf("\n\nLARGE PFC INFO:\n\n"); - pfc_pools_info(sspace, large_pfc_pools, LARGE_PFC_POOL_NUM, LARGE_OBJ, before_gc); - printf("\n\nALIGNED FREE CHUNK INFO:\n\n"); free_lists_info(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, ALIGNED_CHUNK); Index: src/mark_sweep/sspace_chunk.h =================================================================== --- src/mark_sweep/sspace_chunk.h (revision 559012) +++ src/mark_sweep/sspace_chunk.h (working copy) @@ -154,7 +154,7 @@ assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES); chunk->next = NULL; - chunk->status = CHUNK_NEED_ZEROING; + chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING; chunk->slot_size = slot_size; chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk); chunk->slot_index = 0; @@ -170,7 +170,7 @@ assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size); chunk->next = NULL; - chunk->status = CHUNK_NIL; + chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL; chunk->slot_size = obj_size; chunk->slot_num = 1; chunk->slot_index = 0; @@ -194,130 +194,69 @@ #define LARGE_GRANULARITY_BITS 7 #define CHUNK_GRANULARITY_BITS 10 -#define SMALL_GRANULARITY (1 << SMALL_GRANULARITY_BITS) -#define MEDIUM_GRANULARITY (1 << MEDIUM_GRANULARITY_BITS) -#define LARGE_GRANULARITY (1 << LARGE_GRANULARITY_BITS) #define CHUNK_GRANULARITY (1 << CHUNK_GRANULARITY_BITS) - -#define SMALL_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(SMALL_GRANULARITY-1)) -#define SMALL_GRANULARITY_HIGH_MASK (~SMALL_GRANULARITY_LOW_MASK) -#define MEDIUM_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(MEDIUM_GRANULARITY-1)) -#define MEDIUM_GRANULARITY_HIGH_MASK (~MEDIUM_GRANULARITY_LOW_MASK) -#define LARGE_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(LARGE_GRANULARITY-1)) -#define LARGE_GRANULARITY_HIGH_MASK (~LARGE_GRANULARITY_LOW_MASK) #define CHUNK_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(CHUNK_GRANULARITY-1)) #define CHUNK_GRANULARITY_HIGH_MASK (~CHUNK_GRANULARITY_LOW_MASK) -#define SMALL_LOCAL_CHUNK_NUM (MEDIUM_OBJ_THRESHOLD >> SMALL_GRANULARITY_BITS) -#define MEDIUM_LOCAL_CHUNK_NUM ((LARGE_OBJ_THRESHOLD - MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS) +#define SMALL_IS_LOCAL_ALLOC TRUE +#define MEDIUM_IS_LOCAL_ALLOC TRUE +#define LARGE_IS_LOCAL_ALLOC FALSE -#define SMALL_SIZE_ROUNDUP(size) (size) -#define MEDIUM_SIZE_ROUNDUP(size) (((size) + MEDIUM_GRANULARITY-1) & MEDIUM_GRANULARITY_HIGH_MASK) -#define LARGE_SIZE_ROUNDUP(size) (((size) + LARGE_GRANULARITY-1) & LARGE_GRANULARITY_HIGH_MASK) +#define NORMAL_SIZE_ROUNDUP(size, seg) (((size) + seg->granularity-1) & seg->gran_high_mask) #define SUPER_OBJ_TOTAL_SIZE(size) (sizeof(Chunk_Header) + (size)) #define SUPER_SIZE_ROUNDUP(size) ((SUPER_OBJ_TOTAL_SIZE(size) + CHUNK_GRANULARITY-1) & CHUNK_GRANULARITY_HIGH_MASK) -#define SMALL_SIZE_TO_INDEX(size) (((size) >> SMALL_GRANULARITY_BITS) - 1) -#define MEDIUM_SIZE_TO_INDEX(size) ((((size)-MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS) - 1) -#define LARGE_SIZE_TO_INDEX(size) ((((size)-LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS) - 1) +#define NORMAL_SIZE_TO_INDEX(size, seg) ((((size)-(seg)->size_min) >> (seg)->gran_shift_bits) - 1) #define ALIGNED_CHUNK_SIZE_TO_INDEX(size) (((size) >> NORMAL_CHUNK_SHIFT_COUNT) - 1) #define UNALIGNED_CHUNK_SIZE_TO_INDEX(size) (((size) >> CHUNK_GRANULARITY_BITS) - 1) -#define SMALL_INDEX_TO_SIZE(index) (((index) + 1) << SMALL_GRANULARITY_BITS) -#define MEDIUM_INDEX_TO_SIZE(index) ((((index) + 1) << MEDIUM_GRANULARITY_BITS) + MEDIUM_OBJ_THRESHOLD) -#define LARGE_INDEX_TO_SIZE(index) ((((index) + 1) << LARGE_GRANULARITY_BITS) + LARGE_OBJ_THRESHOLD) +#define NORMAL_INDEX_TO_SIZE(index, seg) ((((index) + 1) << (seg)->gran_shift_bits) + (seg)->size_min) #define ALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT) #define UNALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << CHUNK_GRANULARITY_BITS) -#define SMALL_PFC_STEAL_NUM 3 -#define MEDIUM_PFC_STEAL_NUM 3 -#define LARGE_PFC_STEAL_NUM 3 -#define SMALL_PFC_STEAL_THRESHOLD 3 -#define MEDIUM_PFC_STEAL_THRESHOLD 3 -#define LARGE_PFC_STEAL_THRESHOLD 3 +#define PFC_STEAL_NUM 3 +#define PFC_STEAL_THRESHOLD 3 +#define SIZE_SEGMENT_NUM 3 +typedef struct Size_Segment { + unsigned int size_min; + unsigned int size_max; + unsigned int seg_index; + Boolean local_alloc; + unsigned int chunk_num; + unsigned int gran_shift_bits; + POINTER_SIZE_INT granularity; + POINTER_SIZE_INT gran_low_mask; + POINTER_SIZE_INT gran_high_mask; +} Size_Segment; -inline Chunk_Header *sspace_get_small_pfc(Sspace *sspace, unsigned int index) -{ - Pool *pfc_pool = sspace->small_pfc_pools[index]; - Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool); - assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); - return chunk; -} -inline void sspace_put_small_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index) -{ - assert(chunk); - - Pool *pfc_pool = sspace->small_pfc_pools[index]; - pool_put_entry(pfc_pool, chunk); -} -inline Chunk_Header *sspace_get_medium_pfc(Sspace *sspace, unsigned int index) +inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int seg_index, unsigned int index) { - Pool *pfc_pool = sspace->medium_pfc_pools[index]; + Pool *pfc_pool = sspace->pfc_pools[seg_index][index]; Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool); assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); return chunk; } -inline void sspace_put_medium_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index) -{ - assert(chunk); - - Pool *pfc_pool = sspace->medium_pfc_pools[index]; - pool_put_entry(pfc_pool, chunk); -} -inline Chunk_Header *sspace_get_large_pfc(Sspace *sspace, unsigned int index) +inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk) { - Pool *pfc_pool = sspace->large_pfc_pools[index]; - Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool); - assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); - return chunk; -} -inline void sspace_put_large_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index) -{ - assert(chunk); + unsigned int size = chunk->slot_size; + assert(chunk && (size <= SUPER_OBJ_THRESHOLD)); - Pool *pfc_pool = sspace->large_pfc_pools[index]; - pool_put_entry(pfc_pool, chunk); -} - -/* -inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int size) -{ - assert(size <= SUPER_OBJ_THRESHOLD); - - if(size > LARGE_OBJ_THRESHOLD) - return sspace_get_large_pfc(sspace, size); - else if(size > MEDIUM_OBJ_THRESHOLD) - return sspace_get_medium_pfc(sspace, size); - return sspace_get_small_pfc(sspace, size); -} -*/ - -inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int size) -{ - assert(size <= SUPER_OBJ_THRESHOLD); - + Size_Segment **size_segs = sspace->size_segments; chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING; - unsigned int index; - if(size > LARGE_OBJ_THRESHOLD){ - assert(!(size & LARGE_GRANULARITY_LOW_MASK)); - assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD)); - index = LARGE_SIZE_TO_INDEX(size); - sspace_put_large_pfc(sspace, chunk, index); - } else if(size > MEDIUM_OBJ_THRESHOLD){ - assert(!(size & MEDIUM_GRANULARITY_LOW_MASK)); - assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD)); - index = MEDIUM_SIZE_TO_INDEX(size); - sspace_put_medium_pfc(sspace, chunk, index); - } else { - assert(!(size & SMALL_GRANULARITY_LOW_MASK)); - assert(size <= MEDIUM_OBJ_THRESHOLD); - index = SMALL_SIZE_TO_INDEX(size); - sspace_put_small_pfc(sspace, chunk, index); + for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){ + if(size <= size_segs[i]->size_max){ + assert(!(size & size_segs[i]->gran_low_mask)); + assert(size > size_segs[i]->size_min); + unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_segs[i]); + Pool *pfc_pool = sspace->pfc_pools[i][index]; + pool_put_entry(pfc_pool, chunk); + return; + } } } @@ -328,9 +267,7 @@ extern Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace); extern Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size); extern Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk); -extern Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index); -extern Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index); -extern Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index); +extern Chunk_Header *sspace_steal_pfc(Sspace *sspace, unsigned int index); extern void zeroing_free_chunk(Free_Chunk *chunk); Index: src/mark_sweep/sspace_mark.cpp =================================================================== --- src/mark_sweep/sspace_mark.cpp (revision 559012) +++ src/mark_sweep/sspace_mark.cpp (working copy) @@ -17,6 +17,18 @@ #include "sspace_mark_sweep.h" #include "../finalizer_weakref/finalizer_weakref.h" +Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj) +{ + unsigned int index_in_word; + volatile POINTER_SIZE_INT *p_color_word = get_color_word_in_table(obj, index_in_word); + assert(p_color_word); + + POINTER_SIZE_INT color_word = *p_color_word; + POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word; + + return color_word & mark_color; +} + static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) { Partial_Reveal_Object *p_obj = read_slot(p_ref); @@ -31,6 +43,7 @@ static FORCE_INLINE void scan_object(Collector *collector, Partial_Reveal_Object *p_obj) { + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); if(!object_has_ref_field(p_obj)) return; REF *p_ref; @@ -174,3 +187,8 @@ return; } + +void trace_obj_in_ms_marking(Collector *collector, void *p_obj) +{ + trace_object(collector, (Partial_Reveal_Object *)p_obj); +} Index: src/mark_sweep/sspace_mark_sweep.cpp =================================================================== --- src/mark_sweep/sspace_mark_sweep.cpp (revision 559012) +++ src/mark_sweep/sspace_mark_sweep.cpp (working copy) @@ -16,6 +16,7 @@ #include "sspace_mark_sweep.h" #include "sspace_verify.h" +#include "gc_ms.h" #include "../gen/gen.h" #include "../thread/collector.h" #include "../finalizer_weakref/finalizer_weakref.h" @@ -64,7 +65,7 @@ void mark_sweep_sspace(Collector *collector) { GC *gc = collector->gc; - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc); + Sspace *sspace = gc_get_sspace(gc); unsigned int num_active_collectors = gc->num_active_collectors; Index: src/mark_sweep/sspace_sweep.cpp =================================================================== --- src/mark_sweep/sspace_sweep.cpp (revision 559012) +++ src/mark_sweep/sspace_sweep.cpp (working copy) @@ -171,7 +171,7 @@ //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num); #endif chunk_pad_last_index_word((Chunk_Header*)chunk, mark_mask_in_table); - sspace_put_pfc(sspace, chunk, chunk->slot_size); + sspace_put_pfc(sspace, chunk); } /* the rest: chunks with free rate < 0.1. we don't use them */ #ifdef SSPACE_VERIFY Index: src/mark_sweep/sspace_verify.cpp =================================================================== --- src/mark_sweep/sspace_verify.cpp (revision 559012) +++ src/mark_sweep/sspace_verify.cpp (working copy) @@ -47,8 +47,12 @@ void sspace_verify_init(GC *gc) { gc_in_verify = gc; - + +#ifndef ONLY_SSPACE_IN_HEAP POINTER_SIZE_INT heap_size = gc_gen_total_memory_size((GC_Gen*)gc); +#else + POINTER_SIZE_INT heap_size = gc_ms_total_memory_size((GC_MS*)gc); +#endif card_num = heap_size >> VERIFY_CARD_SIZE_BYTES_SHIFT; POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num; @@ -268,7 +272,7 @@ clear_verify_cards(); - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc); + Sspace *sspace = gc_get_sspace(gc); Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace); Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace); POINTER_SIZE_INT total_live_obj = 0; @@ -314,7 +318,7 @@ /* void sspace_verify_super_obj(GC *gc) { - Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc); + Sspace *sspace = gc_get_sspace(gc); Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace); Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace); Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 559012) +++ src/thread/collector.cpp (working copy) @@ -24,6 +24,7 @@ #include "../mark_compact/mspace.h" #include "../finalizer_weakref/finalizer_weakref.h" #include "../common/space_tuner.h" +#include "../mark_sweep/sspace.h" unsigned int MINOR_COLLECTORS = 0; unsigned int MAJOR_COLLECTORS = 0; @@ -99,11 +100,14 @@ collector_reset_weakref_sets(collector); #endif +#ifndef ONLY_SSPACE_IN_HEAP /*For LOS_Shrink and LOS_Extend*/ if(collector->gc->tuner->kind != TRANS_NOTHING){ collector->non_los_live_obj_size = 0; collector->los_live_obj_size = 0; } +#endif + collector->result = TRUE; return; } @@ -238,11 +242,19 @@ struct GC_Gen; unsigned int gc_get_processor_num(GC_Gen*); +#ifdef ONLY_SSPACE_IN_HEAP +struct GC_MS; +unsigned int gc_ms_get_processor_num(GC_MS *gc); +#endif void collector_initialize(GC* gc) { //FIXME:: +#ifndef ONLY_SSPACE_IN_HEAP unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc); +#else + unsigned int num_processors = gc_ms_get_processor_num((GC_MS*)gc); +#endif unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); @@ -259,6 +271,10 @@ collector->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i; collector->gc = gc; collector_init_thread(collector); + +#ifdef ONLY_SSPACE_IN_HEAP + collector_init_free_chunk_list(collector); +#endif gc->collectors[i] = collector; } Index: src/thread/collector.h =================================================================== --- src/thread/collector.h (revision 559012) +++ src/thread/collector.h (working copy) @@ -26,6 +26,7 @@ struct Block_Header; struct Stealable_Stack; +struct Chunk_Header; struct Free_Chunk_List; #define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS 8 @@ -40,6 +41,7 @@ void *ceiling; void *end; void *alloc_block; + Chunk_Header ***local_chunks; Space* alloc_space; GC* gc; VmThreadHandle thread_handle; /* This thread; */ Index: src/thread/collector_alloc.h =================================================================== --- src/thread/collector_alloc.h (revision 559012) +++ src/thread/collector_alloc.h (working copy) @@ -68,6 +68,7 @@ return NULL; } +assert((((POINTER_SIZE_INT)p_targ_obj) % GC_OBJECT_ALIGNMENT) == 0); #ifdef USE_32BITS_HASHCODE if(obj_is_set_hashcode){ memcpy(p_targ_obj, p_obj, size-GC_OBJECT_ALIGNMENT); Index: src/thread/gc_thread.h =================================================================== --- src/thread/gc_thread.h (revision 559012) +++ src/thread/gc_thread.h (working copy) @@ -47,6 +47,7 @@ void *ceiling; void* end; Block *alloc_block; + Chunk_Header ***local_chunks; Space* alloc_space; GC *gc; VmThreadHandle thread_handle; /* This thread; */ Index: src/thread/mutator.cpp =================================================================== --- src/thread/mutator.cpp (revision 559012) +++ src/thread/mutator.cpp (working copy) @@ -20,6 +20,7 @@ #include "mutator.h" #include "../trace_forward/fspace.h" +#include "../mark_sweep/sspace.h" #include "../finalizer_weakref/finalizer_weakref.h" struct GC_Gen; @@ -41,7 +42,11 @@ mutator->obj_with_fin = finref_get_free_block(gc); else mutator->obj_with_fin = NULL; - + +#ifdef ONLY_SSPACE_IN_HEAP + allocator_init_local_chunks((Allocator*)mutator); +#endif + lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv mutator->next = (Mutator *)gc->mutator_list; @@ -63,6 +68,10 @@ alloc_context_reset((Allocator*)mutator); +#ifdef ONLY_SSPACE_IN_HEAP + allocactor_destruct_local_chunks((Allocator*)mutator); +#endif + if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */ pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set); mutator->rem_set = NULL; Index: src/thread/mutator.h =================================================================== --- src/thread/mutator.h (revision 559012) +++ src/thread/mutator.h (working copy) @@ -32,6 +32,7 @@ void* ceiling; void* end; void* alloc_block; + Chunk_Header ***local_chunks; Space* alloc_space; GC* gc; VmThreadHandle thread_handle; /* This thread; */ @@ -39,8 +40,6 @@ Vector_Block* rem_set; Vector_Block* obj_with_fin; - Chunk_Header **small_chunks; - Chunk_Header **medium_chunks; Mutator* next; /* The gc info area associated with the next active thread. */ } Mutator; Index: src/thread/mutator_alloc.cpp =================================================================== --- src/thread/mutator_alloc.cpp (revision 559012) +++ src/thread/mutator_alloc.cpp (working copy) @@ -19,9 +19,8 @@ */ #include "gc_thread.h" - #include "../gen/gen.h" - +#include "../mark_sweep/gc_ms.h" #include "../finalizer_weakref/finalizer_weakref.h" //#define GC_OBJ_SIZE_STATISTIC @@ -73,14 +72,19 @@ gc_alloc_statistic_obj_distrubution(size); #endif +#ifndef ONLY_SSPACE_IN_HEAP if ( size > GC_OBJ_SIZE_THRESHOLD ) p_obj = (Managed_Object_Handle)los_alloc(size, allocator); - else{ + else p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); - } - +#else + p_obj = (Managed_Object_Handle)gc_ms_alloc(size, allocator); +#endif + if( p_obj == NULL ) return NULL; + + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah); @@ -111,9 +115,14 @@ /* Try to allocate an object from the current Thread Local Block */ Managed_Object_Handle p_obj; +#ifndef ONLY_SSPACE_IN_HEAP p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator); +#else + p_obj = (Managed_Object_Handle)gc_ms_fast_alloc(size, allocator); +#endif if(p_obj == NULL) return NULL; - + + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah); return p_obj; Index: src/trace_forward/fspace.cpp =================================================================== --- src/trace_forward/fspace.cpp (revision 559012) +++ src/trace_forward/fspace.cpp (working copy) @@ -19,6 +19,7 @@ */ #include "fspace.h" +#include "../gen/gen.h" Boolean NOS_PARTIAL_FORWARD = FALSE; @@ -75,6 +76,11 @@ fspace->num_collections = 0; fspace->time_collections = 0; fspace->survive_ratio = 0.2f; + fspace->last_alloced_size = 0; + fspace->accumu_alloced_size = 0; + fspace->total_alloced_size = 0; + fspace->last_surviving_size = 0; + fspace->period_surviving_size = 0; fspace->gc = gc; gc_set_nos((GC_Gen*)gc, (Space*)fspace); @@ -98,7 +104,7 @@ fspace_destruct_blocks(fspace); STD_FREE(fspace); } - + void fspace_reset_for_allocation(Fspace* fspace) { unsigned int first_idx = fspace->first_block_idx; @@ -106,7 +112,7 @@ unsigned int marked_last_idx = 0; Boolean is_major_collection = !gc_match_kind(fspace->gc, MINOR_COLLECTION); Boolean gen_mode = gc_is_gen_mode(); - + if( is_major_collection || NOS_PARTIAL_FORWARD == FALSE || !gen_mode) { @@ -176,14 +182,13 @@ void collector_execute_task(GC* gc, TaskType task_func, Space* space); -#include "../gen/gen.h" unsigned int mspace_free_block_idx; /* world is stopped when starting fspace_collection */ void fspace_collection(Fspace *fspace) { fspace->num_collections++; - + GC* gc = fspace->gc; mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx; Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 559012) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -38,7 +38,7 @@ Block_Header* alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]); allocator_init_free_block(allocator, alloc_block); - + return TRUE; } Index: src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_gen_forward_pool.cpp (revision 559012) +++ src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -46,6 +46,7 @@ static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); if (!object_has_ref_field(p_obj)) return; REF *p_ref; Index: src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_nongen_forward_pool.cpp (revision 559012) +++ src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -37,6 +37,7 @@ static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { + assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0); if (!object_has_ref_field_before_scan(p_obj)) return; REF *p_ref; Index: src/verify/verifier_metadata.cpp =================================================================== --- src/verify/verifier_metadata.cpp (revision 559012) +++ src/verify/verifier_metadata.cpp (working copy) @@ -68,7 +68,9 @@ heap_verifier_metadata->new_objects_pool = sync_pool_create(); heap_verifier_metadata->hashcode_pool_before_gc = sync_pool_create(); heap_verifier_metadata->hashcode_pool_after_gc = sync_pool_create(); - + heap_verifier_metadata->obj_with_fin_pool= sync_pool_create(); + heap_verifier_metadata->finalizable_obj_pool= sync_pool_create(); + verifier_metadata = heap_verifier_metadata; heap_verifier->heap_verifier_metadata = heap_verifier_metadata; return; @@ -90,6 +92,9 @@ sync_pool_destruct(metadata->new_objects_pool); sync_pool_destruct(metadata->hashcode_pool_before_gc); sync_pool_destruct(metadata->hashcode_pool_after_gc); + + sync_pool_destruct(metadata->obj_with_fin_pool); + sync_pool_destruct(metadata->finalizable_obj_pool); for(unsigned int i=0; inum_alloc_segs; i++){ assert(metadata->segments[i]); @@ -160,9 +165,14 @@ } } -Pool* verifier_copy_pool_reverse_order(Pool* source_pool) +void verifier_remove_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack) { - Pool* dest_pool = sync_pool_create(); + verifier_clear_pool(working_pool, free_pool, is_vector_stack); + sync_pool_destruct(working_pool); +} + +void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool) +{ pool_iterator_init(source_pool); Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); @@ -176,5 +186,33 @@ pool_put_entry(dest_pool, dest_set); dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); } - return dest_pool; + return ; } + +/*copy dest pool to source pool, ignore NULL slot*/ +void verifier_copy_pool(Pool* dest_pool, Pool* source_pool) +{ + Pool* temp_pool = sync_pool_create(); + + Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); + pool_iterator_init(source_pool); + while(Vector_Block *source_set = pool_iterator_next(source_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set); + while( !vector_block_iterator_end(source_set, iter)){ + assert(!vector_block_is_full(dest_set)); + if(*iter) vector_block_add_entry(dest_set, *iter); + iter = vector_block_iterator_advance(source_set, iter); + } + pool_put_entry(temp_pool, dest_set); + dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); + } + + dest_set = NULL; + pool_iterator_init(temp_pool); + while(dest_set = pool_iterator_next(temp_pool)){ + pool_put_entry(dest_pool, dest_set); + } + + sync_pool_destruct(temp_pool); + return; +} Index: src/verify/verifier_metadata.h =================================================================== --- src/verify/verifier_metadata.h (revision 559012) +++ src/verify/verifier_metadata.h (working copy) @@ -46,6 +46,9 @@ Pool* hashcode_pool_after_gc; Pool* new_objects_pool; + + Pool* obj_with_fin_pool; + Pool* finalizable_obj_pool; } Heap_Verifier_Metadata; extern Heap_Verifier_Metadata* verifier_metadata; @@ -56,8 +59,11 @@ Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool); void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack); -Pool* verifier_copy_pool_reverse_order(Pool* source_pool); +void verifier_remove_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack); +void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool); +void verifier_copy_pool(Pool* dest_pool, Pool* source_pool); + inline Vector_Block* verifier_free_set_pool_get_entry(Pool* free_pool) { assert(free_pool); Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 559012) +++ src/verify/verifier_scanner.cpp (working copy) @@ -171,6 +171,7 @@ /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/ //assert(p_obj != NULL); if(p_obj == NULL) continue; + if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue; verifier_tracestack_push(p_obj, gc_verifier->trace_stack); } obj_set = pool_iterator_next(obj_set_pool); @@ -203,21 +204,23 @@ void verifier_scan_resurrect_objects(Heap_Verifier* heap_verifier) { GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; verifier_update_info_before_resurrect(heap_verifier); #ifndef BUILD_IN_REFERENT heap_verifier->gc_verifier->is_tracing_resurrect_obj = TRUE; if(heap_verifier->is_before_gc){ - verifier_trace_objsets(heap_verifier, gc->finref_metadata->obj_with_fin_pool); + verifier_copy_pool(verifier_metadata->obj_with_fin_pool, gc->finref_metadata->obj_with_fin_pool); + verifier_trace_objsets(heap_verifier, verifier_metadata->obj_with_fin_pool); }else{ if(!heap_verifier->gc_verifier->is_before_fallback_collection){ verify_live_finalizable_obj(heap_verifier, gc->finref_metadata->obj_with_fin_pool); - Pool* finalizable_obj_pool = verifier_copy_pool_reverse_order(gc->finref_metadata->finalizable_obj_pool); - verifier_trace_objsets(heap_verifier, finalizable_obj_pool); - verifier_clear_pool(finalizable_obj_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE); - sync_pool_destruct(finalizable_obj_pool); + verifier_copy_pool_reverse_order(verifier_metadata->finalizable_obj_pool, gc->finref_metadata->finalizable_obj_pool); + verifier_trace_objsets(heap_verifier, verifier_metadata->finalizable_obj_pool); + verifier_clear_pool(verifier_metadata->finalizable_obj_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE); }else{ - verifier_trace_objsets(heap_verifier, gc->finref_metadata->obj_with_fin_pool); + verifier_trace_objsets(heap_verifier, verifier_metadata->obj_with_fin_pool ); } + verifier_clear_pool(verifier_metadata->obj_with_fin_pool, heap_verifier->heap_verifier_metadata->free_set_pool, FALSE); } heap_verifier->gc_verifier->is_tracing_resurrect_obj = FALSE; verifier_update_info_after_resurrect(heap_verifier); @@ -225,10 +228,14 @@ } void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier); - +void verifier_scan_prepare() +{ + verifier_reset_hash_distance(); +} void verifier_scan_live_objects(Heap_Verifier* heap_verifier) { Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + verifier_scan_prepare(); verifier_trace_rootsets(heap_verifier, verifier_metadata->root_set_pool); verifier_scan_resurrect_objects(heap_verifier); verifier_scan_unreachable_objects(heap_verifier); Index: src/verify/verify_gc_effect.cpp =================================================================== --- src/verify/verify_gc_effect.cpp (revision 559012) +++ src/verify/verify_gc_effect.cpp (working copy) @@ -73,6 +73,7 @@ void verify_live_finalizable_obj(Heap_Verifier* heap_verifier, Pool* live_finalizable_objs_pool) { + if(heap_verifier->gc_is_gen_mode) return; pool_iterator_init(live_finalizable_objs_pool); Vector_Block* live_fin_objs = pool_iterator_next(live_finalizable_objs_pool); while(live_fin_objs){ @@ -255,19 +256,21 @@ return obj_hash_info; } #else +#define GCGEN_HASH_MASK 0x1fc inline Object_Hashcode_Inform* verifier_copy_hashcode(Partial_Reveal_Object* p_obj, Heap_Verifier* heap_verifier, Boolean is_before_gc) { hash_obj_distance ++; - if(!hashcode_is_set(p_obj)) return NULL; + Obj_Info_Type info = get_obj_info_raw(p_obj); + int hash = info & GCGEN_HASH_MASK; + + if(!hash) return NULL; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; if(is_before_gc) gc_verifier->num_hash_before_gc++; else gc_verifier->num_hash_after_gc++; - Obj_Info_Type info = get_obj_info_raw(p_obj); - - int hash = info & GCGEN_HASH_MASK; unsigned int size = sizeof(Object_Hashcode_Inform); Object_Hashcode_Inform* obj_hash_info = (Object_Hashcode_Inform*) STD_MALLOC(size); assert(obj_hash_info); @@ -526,5 +529,7 @@ verifier_set_fallback_collection(heap_verifier->gc_verifier, FALSE); } +void verifier_reset_hash_distance() +{ hash_obj_distance = 0;} Index: src/verify/verify_gc_effect.h =================================================================== --- src/verify/verify_gc_effect.h (revision 559012) +++ src/verify/verify_gc_effect.h (working copy) @@ -85,8 +85,8 @@ void verify_gc_effect(Heap_Verifier* heap_verifier); +void verifier_reset_hash_distance(); - inline unsigned int verifier_get_gc_collect_kind(GC_Verifier* gc_verifier) { return gc_verifier->gc_collect_kind; } inline void verifier_set_gc_collect_kind(GC_Verifier* gc_verifier, unsigned int collect_kind)