Index: gc_gen/src/common/compressed_ref.cpp =================================================================== --- gc_gen/src/common/compressed_ref.cpp (revision 529215) +++ gc_gen/src/common/compressed_ref.cpp (working copy) @@ -69,4 +69,3 @@ } } - Index: gc_gen/src/common/gc_block.h =================================================================== --- gc_gen/src/common/gc_block.h (revision 529215) +++ gc_gen/src/common/gc_block.h (working copy) @@ -74,7 +74,10 @@ #define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES) #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES) #define GC_BLOCK_BODY(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_HEADER_SIZE_BYTES)) -#define GC_BLOCK_END(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_SIZE_BYTES)) +/*LOS_Shrink: We have some fake block headers when trying to compute mos object target, +so we must have some other methods to compute block end.*/ +//#define GC_BLOCK_END(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_SIZE_BYTES)) +#define GC_BLOCK_END(block) (((Block_Header*)(block))->ceiling) #define GC_BLOCK_LOW_MASK ((POINTER_SIZE_INT)(GC_BLOCK_SIZE_BYTES - 1)) #define GC_BLOCK_HIGH_MASK (~GC_BLOCK_LOW_MASK) @@ -267,4 +270,3 @@ #endif //#ifndef _BLOCK_H_ - Index: gc_gen/src/common/gc_common.cpp =================================================================== --- gc_gen/src/common/gc_common.cpp (revision 529215) +++ gc_gen/src/common/gc_common.cpp (working copy) @@ -35,7 +35,7 @@ extern POINTER_SIZE_INT NOS_SIZE; extern POINTER_SIZE_INT MIN_NOS_SIZE; -extern POINTER_SIZE_INT MIN_LOS_SIZE; +extern POINTER_SIZE_INT INIT_LOS_SIZE; extern Boolean FORCE_FULL_COMPACT; extern Boolean MINOR_ALGORITHM; @@ -161,8 +161,8 @@ MIN_NOS_SIZE = get_size_property("gc.min_nos_size"); } - if (is_property_set("gc.min_los_size", VM_PROPERTIES) == 1) { - MIN_LOS_SIZE = get_size_property("gc.min_los_size"); + if (is_property_set("gc.init_los_size", VM_PROPERTIES) == 1) { + INIT_LOS_SIZE = get_size_property("gc.init_los_size"); } if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) { Index: gc_gen/src/common/gc_common.h =================================================================== --- gc_gen/src/common/gc_common.h (revision 529215) +++ gc_gen/src/common/gc_common.h (working copy) @@ -436,7 +436,7 @@ extern void* los_boundary; +extern Boolean* p_global_lspace_move_obj; inline Boolean obj_is_moved(Partial_Reveal_Object* p_obj) -{ return p_obj >= los_boundary; } - +{ return ((p_obj >= los_boundary) || (*p_global_lspace_move_obj)); } #endif //_GC_COMMON_H_ Index: gc_gen/src/common/gc_for_vm.cpp =================================================================== --- gc_gen/src/common/gc_for_vm.cpp (revision 529215) +++ gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -204,7 +204,7 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } -#define GCGEN_HASH_MASK 0x7c +#define GCGEN_HASH_MASK 0x1fc int32 gc_get_hashcode(Managed_Object_Handle p_object) { Partial_Reveal_Object *obj = (Partial_Reveal_Object *)p_object; @@ -214,7 +214,7 @@ int hash = info & GCGEN_HASH_MASK; if (!hash) { hash = (int)((((POINTER_SIZE_INT)obj) >> 3) & GCGEN_HASH_MASK); - if(!hash) hash = (23 & GCGEN_HASH_MASK); + if(!hash) hash = (0x173 & GCGEN_HASH_MASK); unsigned int new_info = (unsigned int)(info | hash); while (true) { unsigned int temp = atomic_cas32(&obj->obj_info, new_info, info); @@ -239,8 +239,6 @@ * Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_reference); * *p_referent_field = (Partial_Reveal_Object *)((unsigned int)*p_referent_field | PHANTOM_REF_ENQUEUED_MASK | ~PHANTOM_REF_PENDING_MASK); * } - - * } */ Index: gc_gen/src/common/gc_platform.h =================================================================== --- gc_gen/src/common/gc_platform.h (revision 529215) +++ gc_gen/src/common/gc_platform.h (working copy) @@ -57,6 +57,7 @@ #endif +#define ABS_DIFF(x, y) (((x)>(y))?((x)-(y)):((y)-(x))) #define USEC_PER_SEC INT64_C(1000000) #define VmThreadHandle void* Index: gc_gen/src/common/interior_pointer.cpp =================================================================== --- gc_gen/src/common/interior_pointer.cpp (revision 529215) +++ gc_gen/src/common/interior_pointer.cpp (working copy) @@ -91,4 +91,3 @@ } - Index: gc_gen/src/common/space_tuner.cpp =================================================================== --- gc_gen/src/common/space_tuner.cpp (revision 529215) +++ gc_gen/src/common/space_tuner.cpp (working copy) @@ -19,7 +19,6 @@ */ #include "space_tuner.h" - #include struct GC_Gen; @@ -31,7 +30,7 @@ POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace); POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); -/*Now just prepare the alloc_size field of mspace, used to compute new los size.*/ +/*Prepare the paramenters which are to be used to compute new los size.*/ void gc_space_tune_prepare(GC* gc, unsigned int cause) { if(gc_match_kind(gc, MINOR_COLLECTION)) @@ -42,49 +41,92 @@ Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); Space_Tuner* tuner = gc->tuner; - assert(fspace->free_block_idx > fspace->first_block_idx); + assert(fspace->free_block_idx >= fspace->first_block_idx); unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES; fspace->alloced_size = nos_alloc_size; + /*Fixme: LOS_Adaptive: There should be a condition here, that fspace->collection_num != 0*/ mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio); - /*For_statistic alloc speed: Speed could be represented by sum of alloced size.*/ tuner->speed_los += lspace->alloced_size; + tuner->speed_los = (tuner->speed_los + tuner->old_speed_los) >> 1; tuner->speed_mos += mspace->alloced_size; - + tuner->speed_mos = (tuner->speed_mos + tuner->old_speed_mos) >> 1; + /*For_statistic wasted memory*/ POINTER_SIZE_INT curr_used_los = lspace->surviving_size + lspace->alloced_size; - assert(curr_used_los < lspace->committed_heap_size); + assert(curr_used_los <= lspace->committed_heap_size); POINTER_SIZE_INT curr_wast_los = lspace->committed_heap_size - curr_used_los; tuner->wast_los += curr_wast_los; POINTER_SIZE_INT curr_used_mos = mspace->surviving_size + mspace->alloced_size; - POINTER_SIZE_INT curr_wast_mos = mspace_get_expected_threshold((Mspace*)mspace) - curr_used_mos; + POINTER_SIZE_INT expected_mos = mspace_get_expected_threshold((Mspace*)mspace); + POINTER_SIZE_INT curr_wast_mos = 0; + if(expected_mos > curr_used_mos) + curr_wast_mos = expected_mos - curr_used_mos; tuner->wast_mos += curr_wast_mos; - tuner->current_dw = abs((int)tuner->wast_mos - (int)tuner->wast_los); + tuner->current_dw = ABS_DIFF(tuner->wast_mos, tuner->wast_los); /*For_statistic ds in heuristic*/ tuner->current_ds = (unsigned int)((float)fspace->committed_heap_size * fspace->survive_ratio); - /*Fixme: Threshold should be computed by heuristic. tslow, total recycled heap size shold be statistic.*/ tuner->threshold = tuner->current_ds; - //For debug if(tuner->threshold > 8 * MB) tuner->threshold = 8 * MB; - tuner->min_tuning_size = tuner->current_ds; - //For debug if(tuner->min_tuning_size > 4 * MB) tuner->min_tuning_size = 4 * MB; + return; } +/*Check the tuning size, if too small, cancle the tuning.*/ +void check_space_tuner(GC* gc) +{ + Space_Tuner* tuner = gc->tuner; + if((!tuner->need_tune) && (!tuner->force_tune)){ + assert(tuner->kind == TRANS_NOTHING); + assert(tuner->tuning_size == 0); + return; + } + Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); + if((!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size)){ + tuner->tuning_size = 0; + goto check_size; + } + if((tuner->need_tune) && (!tuner->force_tune)) goto check_size; + /*tuner->force_tune must be true here!*/ + POINTER_SIZE_INT los_fail_sz_uped = lspace_get_failure_size((Lspace*)lspace); + assert(!(los_fail_sz_uped % GC_BLOCK_SIZE_BYTES)); + + if(tuner->kind == TRANS_FROM_LOS_TO_MOS){ + tuner->kind = TRANS_FROM_MOS_TO_LOS; + tuner->tuning_size = 0; + lspace->move_object = 0; + } + if(tuner->tuning_size < los_fail_sz_uped){ + tuner->tuning_size = los_fail_sz_uped; + } + +check_size: + tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + lspace->move_object = 0; + } + + return; +} + + +extern POINTER_SIZE_INT min_los_size_bytes; +extern POINTER_SIZE_INT min_none_los_size_bytes; +/*Give the tuning kind, and tuning size hint*/ void gc_space_tune_before_gc(GC* gc, unsigned int cause) { if(gc_match_kind(gc, MINOR_COLLECTION)) return; - Space_Tuner* tuner = gc->tuner; - /*Only tune when LOS need extend*/ - if( tuner->wast_los > tuner->wast_mos ) return; - /*Needn't tune if dw does not reach threshold.*/ - if(tuner->current_dw < tuner->threshold) return; + if(tuner->current_dw > tuner->threshold) tuner->need_tune = 1; + /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/ + if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1; + if((!tuner->need_tune) && (!tuner->force_tune)) return; Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); @@ -92,78 +134,63 @@ POINTER_SIZE_INT los_expect_survive_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); POINTER_SIZE_INT los_expect_free_sz = lspace->committed_heap_size - los_expect_survive_sz; - POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio); POINTER_SIZE_INT mos_expect_free_sz = mspace_get_expected_threshold((Mspace*)mspace) - mos_expect_survive_sz; - POINTER_SIZE_INT total_free = los_expect_free_sz + mos_expect_free_sz; - float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_mos); POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free * new_los_ratio); - - if(new_free_los_sz > los_expect_free_sz && - (new_free_los_sz - los_expect_free_sz > tuner->min_tuning_size)){ + POINTER_SIZE_INT max_tuning_size = 0; + /*LOS_Extend:*/ + if((new_free_los_sz > los_expect_free_sz) ) + { + if ( (!tuner->force_tune) && (new_free_los_sz - los_expect_free_sz < tuner->min_tuning_size) ){ + tuner->kind = TRANS_NOTHING; + tuner->tuning_size = 0; + return; + } tuner->kind = TRANS_FROM_MOS_TO_LOS; - tuner->tuning_size = round_up_to_size(new_free_los_sz - los_expect_free_sz, SPACE_ALLOC_UNIT); - tuner->least_tuning_size = round_up_to_size(lspace_get_failure_size((Lspace*)lspace), SPACE_ALLOC_UNIT); - tuner->conservative_tuning_size = round_up_to_size(((tuner->tuning_size + tuner->least_tuning_size) >> 1), SPACE_ALLOC_UNIT); - - POINTER_SIZE_INT none_los_size; - #ifdef STATIC_NOS_MAPPING - none_los_size = mspace->committed_heap_size; - #else - /*Fixme: There should be a minimal remain size like heap_size >> 3.*/ - none_los_size = mspace->committed_heap_size + fspace->committed_heap_size; - #endif - - if(tuner->tuning_size < none_los_size) return; - - tuner->tuning_size = tuner->conservative_tuning_size; - - if(tuner->tuning_size < none_los_size) return; - - tuner->tuning_size = tuner->least_tuning_size; - - if((tuner->tuning_size + gc->num_active_collectors * GC_BLOCK_SIZE_BYTES) >= none_los_size){ - tuner->tuning_size = 0; - } - - if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; + tuner->tuning_size = round_down_to_size(new_free_los_sz - los_expect_free_sz, GC_BLOCK_SIZE_BYTES); + POINTER_SIZE_INT non_los_sz = mspace->committed_heap_size + fspace->committed_heap_size; + if(non_los_sz > min_none_los_size_bytes) + max_tuning_size = non_los_sz - min_none_los_size_bytes; + if(tuner->tuning_size > max_tuning_size) tuner->tuning_size = max_tuning_size; } + /*LOS_Shrink:*/ + if((new_free_los_sz < los_expect_free_sz)) + { + if ( (!tuner->force_tune) && (los_expect_free_sz - new_free_los_sz < tuner->min_tuning_size) ){ + tuner->kind = TRANS_NOTHING; + tuner->tuning_size = 0; + return; + } + tuner->kind = TRANS_FROM_LOS_TO_MOS; + lspace->move_object = 1; + assert(lspace->committed_heap_size >= min_los_size_bytes); + max_tuning_size = lspace->committed_heap_size - min_los_size_bytes; + POINTER_SIZE_INT tuning_size = los_expect_free_sz - new_free_los_sz; + if(tuning_size > max_tuning_size) tuning_size = max_tuning_size; + tuner->tuning_size = round_down_to_size(tuning_size, GC_BLOCK_SIZE_BYTES); + } + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + lspace->move_object = 0; + return; + } + check_space_tuner(gc); + return; } void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause) { - if(gc_match_kind(gc, MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) ) - return; - + if(gc_match_kind(gc, MINOR_COLLECTION)) return; Space_Tuner* tuner = gc->tuner; - tuner->kind = TRANS_FROM_MOS_TO_LOS; - Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - /*Fixme: this branch should be modified after the policy of gen major is decieded!*/ - if(false){ - unsigned int mos_free_sz = ((mspace->ceiling_block_idx - mspace->free_block_idx + 1) << GC_BLOCK_SHIFT_COUNT); - unsigned int nos_survive_sz = - (unsigned int)((float)((fspace->free_block_idx - fspace->first_block_idx) << GC_BLOCK_SHIFT_COUNT) * fspace->survive_ratio); - int mos_wast_sz = mos_free_sz - nos_survive_sz; - - if( mos_wast_sz > GC_LOS_MIN_VARY_SIZE){ - tuner->tuning_size = GC_LOS_MIN_VARY_SIZE; - }else if(mos_wast_sz > 0){ - tuner->tuning_size = mos_wast_sz; - }else - tuner->tuning_size = 0; - - } - /*For non gen virable sized NOS*/ - else - { + if(cause == GC_CAUSE_LOS_IS_FULL){ + tuner->kind = TRANS_FROM_MOS_TO_LOS; POINTER_SIZE_INT los_fail_sz = lspace_get_failure_size((Lspace*)lspace); - if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){ /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/ tuner->tuning_size = los_fail_sz; @@ -174,14 +201,12 @@ tuner->least_tuning_size = los_fail_sz; tuner->conservative_tuning_size = ((tuner->tuning_size + tuner->min_tuning_size) >> 1); } - POINTER_SIZE_INT none_los_size; #ifdef STATIC_NOS_MAPPING none_los_size = mspace->committed_heap_size; #else none_los_size = mspace->committed_heap_size + fspace->committed_heap_size; #endif - if(tuner->tuning_size > none_los_size){ tuner->tuning_size = tuner->conservative_tuning_size; } @@ -192,59 +217,110 @@ tuner->tuning_size = 0; } } - + else + /*LOS_Shrink: Fixme: Very simple strategy now. */ + { + return; + tuner->kind = TRANS_FROM_LOS_TO_MOS; + lspace->move_object = TRUE; + tuner->tuning_size = GC_LOS_MIN_VARY_SIZE >> 1; + } + /*Fixme: Should MOS heap_start must be 64k aligned?*/ - tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); - if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; + tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + lspace->move_object = 0; + } return; } #include "../thread/collector.h" #include "../mark_sweep/lspace.h" -Boolean retune_los_size(GC *gc) +Boolean gc_space_retune(GC *gc) { - POINTER_SIZE_INT non_los_live_obj_size = 0; - unsigned int collector_num = gc->num_active_collectors; - - for(unsigned int i = collector_num; i--;){ - Collector *collector = gc->collectors[i]; - non_los_live_obj_size += collector->non_los_live_obj_size; - } - POINTER_SIZE_INT non_los_live_block_num = (non_los_live_obj_size + GC_BLOCK_SIZE_BYTES) >> GC_BLOCK_SHIFT_COUNT; - non_los_live_block_num += collector_num << 2; - Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); - Space_Tuner *tuner = gc->tuner; - POINTER_SIZE_INT failure_size = los->failure_size; - POINTER_SIZE_INT min_tuning_block_num = round_up_to_size(failure_size, SPACE_ALLOC_UNIT) >> GC_BLOCK_SHIFT_COUNT; - POINTER_SIZE_INT tuning_block_num = tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT; - POINTER_SIZE_INT heap_block_num = gc->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; - POINTER_SIZE_INT los_block_num = los->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; - POINTER_SIZE_INT live_block_num = los_block_num + non_los_live_block_num; + Space_Tuner* tuner = gc->tuner; + /*LOS_Extend:*/ + if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ + POINTER_SIZE_INT non_los_live_obj_size = 0; + unsigned int collector_num = gc->num_active_collectors; + for(unsigned int i = collector_num; i--;){ + Collector *collector = gc->collectors[i]; + non_los_live_obj_size += collector->non_los_live_obj_size; + } + non_los_live_obj_size += GC_BLOCK_SIZE_BYTES * collector_num * 4; + non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, GC_BLOCK_SIZE_BYTES); + POINTER_SIZE_INT max_free_for_tuning = 0; + if (gc->committed_heap_size > los->committed_heap_size + non_los_live_obj_size) + max_free_for_tuning = gc->committed_heap_size - los->committed_heap_size - non_los_live_obj_size; - while(live_block_num + tuning_block_num > heap_block_num){ - if(tuning_block_num == min_tuning_block_num){ //has not enough space to extend los + if(!tuner->force_tune){ + /*This should not happen! If GC is not issued by los, then it's not necessary to extend it*/ + if(max_free_for_tuning < tuner->tuning_size) + tuner->tuning_size = max_free_for_tuning; + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + los->move_object = 0; + } + return TRUE; + } + /*force tune here!*/ + POINTER_SIZE_INT min_tuning_uped = los->failure_size; + if(min_tuning_uped > max_free_for_tuning){ tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; + los->move_object = 0; return FALSE; } - tuning_block_num -= (SPACE_ALLOC_UNIT >> GC_BLOCK_SHIFT_COUNT) << 2; - if(tuning_block_num < min_tuning_block_num) - tuning_block_num = min_tuning_block_num; + if(tuner->tuning_size < min_tuning_uped){ + assert(tuner->tuning_size < max_free_for_tuning); + tuner->tuning_size = min_tuning_uped; + return TRUE; + }else/*tuner->tuning_size >= min_tuning_uped*/{ + if(tuner->tuning_size > max_free_for_tuning) + tuner->tuning_size = max_free_for_tuning; + return TRUE; + } } - - POINTER_SIZE_INT tuning_size = tuning_block_num << GC_BLOCK_SHIFT_COUNT; - if(tuner->tuning_size != tuning_size) // retune los extension size - tuner->tuning_size = tuning_size; - return TRUE; + else// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) + { + POINTER_SIZE_INT los_live_obj_size = 0; + unsigned int collector_num = gc->num_active_collectors; + for(unsigned int i = collector_num; i--;){ + Collector *collector = gc->collectors[i]; + los_live_obj_size += collector->los_live_obj_size; + } + los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES); + los_live_obj_size += (collector_num << 2 << GC_BLOCK_SHIFT_COUNT); + + Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); + Space_Tuner *tuner = gc->tuner; + POINTER_SIZE_INT los_max_shrink_size = 0; + if(los->committed_heap_size > los_live_obj_size) + los_max_shrink_size = los->committed_heap_size - los_live_obj_size; + if(tuner->tuning_size > los_max_shrink_size) + tuner->tuning_size = los_max_shrink_size; + assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + los->move_object = 0; + return TRUE; + }else + return TRUE; + } } void gc_space_tuner_reset(GC* gc) { - if( !gc_match_kind(gc, MINOR_COLLECTION) && (gc->tuner->kind != TRANS_NOTHING)){ + if( !gc_match_kind(gc, MINOR_COLLECTION)){ Space_Tuner* tuner = gc->tuner; + POINTER_SIZE_INT old_slos = tuner->speed_los; + POINTER_SIZE_INT old_smos = tuner->speed_mos; memset(tuner, 0, sizeof(Space_Tuner)); + tuner->old_speed_los = old_slos; + tuner->old_speed_mos = old_smos; } } Index: gc_gen/src/common/space_tuner.h =================================================================== --- gc_gen/src/common/space_tuner.h (revision 529215) +++ gc_gen/src/common/space_tuner.h (working copy) @@ -25,7 +25,7 @@ #include "gc_space.h" #define GC_LOS_MIN_VARY_SIZE ( 2 * MB ) -#define GC_FIXED_SIZE_TUNER +//#define GC_FIXED_SIZE_TUNER //For_LOS_extend enum Transform_Kind { @@ -40,13 +40,18 @@ POINTER_SIZE_INT tuning_size; POINTER_SIZE_INT conservative_tuning_size; POINTER_SIZE_INT least_tuning_size; - unsigned int force_tune; + /*Used for LOS_Shrink*/ + Block_Header* interim_blocks; + Boolean need_tune; + Boolean force_tune; /*LOS alloc speed sciecne last los variation*/ POINTER_SIZE_INT speed_los; + POINTER_SIZE_INT old_speed_los; /*MOS alloc speed sciecne last los variation*/ POINTER_SIZE_INT speed_mos; - + POINTER_SIZE_INT old_speed_mos; + /*Total wasted memory of los science last los variation*/ POINTER_SIZE_INT wast_los; /*Total wasted memory of mos science last los variation*/ @@ -70,7 +75,7 @@ void gc_space_tune_prepare(GC* gc, unsigned int cause); void gc_space_tune_before_gc(GC* gc, unsigned int cause); void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause); -Boolean retune_los_size(GC *gc); +Boolean gc_space_retune(GC *gc); void gc_space_tuner_reset(GC* gc); void gc_space_tuner_initialize(GC* gc); Index: gc_gen/src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 529215) +++ gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -28,6 +28,7 @@ #include "../gen/gen.h" Boolean IGNORE_FINREF = FALSE; +Boolean DURING_RESURRECTION = FALSE; static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj) @@ -224,6 +225,8 @@ if(finalizable_obj_pool_is_empty(gc)) return; + DURING_RESURRECTION = TRUE; + if(!gc_match_kind(gc, MINOR_COLLECTION)) finref_reset_repset(gc); pool_iterator_init(finalizable_obj_pool); @@ -260,6 +263,8 @@ finref_put_repset(gc); metadata->pending_finalizers = TRUE; + DURING_RESURRECTION = FALSE; + /* fianlizable objs have been added to finref repset pool or updated by tracing */ } Index: gc_gen/src/finalizer_weakref/finalizer_weakref.h =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 529215) +++ gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -53,15 +53,20 @@ return (REF*)(( Byte*)p_obj+get_gc_referent_offset()); } -typedef void (* Scan_Slot_Func)(Collector *collector, REF* p_ref); +extern Boolean DURING_RESURRECTION; +typedef void (* Scan_Slot_Func)(Collector *collector, REF *p_ref); inline void scan_weak_reference(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot) { WeakReferenceType type = special_reference_type(p_obj); if(type == NOT_REFERENCE) return; - REF* p_referent_field = obj_get_referent_field(p_obj); + REF *p_referent_field = obj_get_referent_field(p_obj); REF p_referent = *p_referent_field; if (!p_referent) return; + if(DURING_RESURRECTION){ + write_slot(p_referent_field, NULL); + return; + } switch(type){ case SOFT_REFERENCE : if(gc_match_kind(collector->gc, MINOR_COLLECTION)) Index: gc_gen/src/gen/gen.cpp =================================================================== --- gc_gen/src/gen/gen.cpp (revision 529215) +++ gc_gen/src/gen/gen.cpp (working copy) @@ -30,8 +30,9 @@ POINTER_SIZE_INT min_nos_size_bytes = 16 * MB; POINTER_SIZE_INT max_nos_size_bytes = 256 * MB; POINTER_SIZE_INT min_los_size_bytes = 4*MB; +POINTER_SIZE_INT min_none_los_size_bytes = 4*MB; POINTER_SIZE_INT NOS_SIZE = 0; -POINTER_SIZE_INT MIN_LOS_SIZE = 0; +POINTER_SIZE_INT INIT_LOS_SIZE = 0; POINTER_SIZE_INT MIN_NOS_SIZE = 0; POINTER_SIZE_INT MAX_NOS_SIZE = 0; @@ -78,7 +79,7 @@ if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; POINTER_SIZE_INT los_size = max_heap_size >> 7; - if(MIN_LOS_SIZE) min_los_size_bytes = MIN_LOS_SIZE; + if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE; if(los_size < min_los_size_bytes ) los_size = min_los_size_bytes ; @@ -352,7 +353,6 @@ } if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ - if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); @@ -388,6 +388,8 @@ gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); #endif + assert(!gc->los->move_object); + return; } Index: gc_gen/src/gen/gen_adapt.cpp =================================================================== --- gc_gen/src/gen/gen_adapt.cpp (revision 529215) +++ gc_gen/src/gen/gen_adapt.cpp (working copy) @@ -217,7 +217,8 @@ survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); mspace->survive_ratio = survive_ratio; } - if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){ + /*For LOS_Shrink:*/ + if(gc->tuner->kind != TRANS_NOTHING){ POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1; mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); } @@ -251,9 +252,14 @@ survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); fspace->survive_ratio = survive_ratio; - /*For_LOS adaptive*/ - POINTER_SIZE_INT mspace_size_threshold = space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace) - free_size_threshold; - mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); + /*For LOS_Adaptive*/ + POINTER_SIZE_INT mspace_committed_size = space_committed_size((Space*)mspace); + POINTER_SIZE_INT fspace_committed_size = space_committed_size((Space*)fspace); + if(mspace_committed_size + fspace_committed_size > free_size_threshold){ + POINTER_SIZE_INT mspace_size_threshold; + mspace_size_threshold = mspace_committed_size + fspace_committed_size - free_size_threshold; + mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); + } } gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; @@ -262,7 +268,11 @@ } gc_gen_mode_adapt(gc,pause_time); - + /* a heuristic: when no free block at all after this collection, we can't + do any allocation at all. The first allocation will trigger a major collection */ + if( fspace->num_managed_blocks == 0 ) + gc->force_major_collect = TRUE; + return; } @@ -338,7 +348,7 @@ POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - if( abs((int)(new_nos_size - curr_nos_size)) < NOS_COPY_RESERVE_DELTA ) + if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA ) return; /* below are ajustment */ Index: gc_gen/src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- gc_gen/src/mark_compact/los_extention_mark_scan.cpp (revision 529215) +++ gc_gen/src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -28,6 +28,8 @@ collector_tracestack_push(collector, p_obj); if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))) collector->non_los_live_obj_size += vm_object_size(p_obj); + else + collector->los_live_obj_size += round_up_to_size(vm_object_size(p_obj), KB); } return; @@ -102,7 +104,7 @@ So we abondoned this design. We no longer use the repset to remember repointed slots */ -void los_extention_mark_scan_heap(Collector *collector) +void los_adaptation_mark_scan_heap(Collector *collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; @@ -137,6 +139,8 @@ collector_tracestack_push(collector, p_obj); if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))) collector->non_los_live_obj_size += vm_object_size(p_obj); + else + collector->los_live_obj_size += round_up_to_size(vm_object_size(p_obj), KB); } } @@ -189,4 +193,3 @@ return; } - Index: gc_gen/src/mark_compact/mspace.cpp =================================================================== --- gc_gen/src/mark_compact/mspace.cpp (revision 529215) +++ gc_gen/src/mark_compact/mspace.cpp (working copy) @@ -173,4 +173,3 @@ return mspace->expected_threshold; } - Index: gc_gen/src/mark_compact/mspace_collect_compact.cpp =================================================================== --- gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 529215) +++ gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -29,12 +29,11 @@ static volatile Block_Header* next_block_for_compact; static volatile Block_Header* next_block_for_target; -void update_mspace_info_for_los_extension(Mspace *mspace) +void mspace_update_info_for_los_extension(Mspace *mspace) { Space_Tuner *tuner = mspace->gc->tuner; - if(tuner->kind != TRANS_FROM_MOS_TO_LOS) - return; + if(tuner->kind != TRANS_FROM_MOS_TO_LOS) return; POINTER_SIZE_INT tune_size = tuner->tuning_size; unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); @@ -52,6 +51,44 @@ mspace->num_used_blocks = 0; } +void mspace_update_info_for_los_shrink(Mspace* mspace) +{ + Space_Tuner *tuner = mspace->gc->tuner; + if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return; + + POINTER_SIZE_INT tune_size = tuner->tuning_size; + unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); + + /*Update mspace infomation.*/ + mspace->blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); + mspace->heap_start = (void*)(mspace->blocks); + mspace->committed_heap_size += tune_size; + mspace->first_block_idx -= tune_blocks; + mspace->num_managed_blocks += tune_blocks; + mspace->num_total_blocks += tune_blocks; +} + +/*Copy the fake blocks into real blocks, reconnect these new block into main list of mspace.*/ +void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace) +{ + Space_Tuner *tuner = mspace->gc->tuner; + if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return; + + POINTER_SIZE_INT tune_size = tuner->tuning_size; + unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); + + Block* blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); + unsigned int i; + for(i=0; i < tune_blocks; i++){ + Block_Header* real_block = (Block_Header*)&(blocks[i]); + Block_Header* fake_block = &tuner->interim_blocks[i]; + memcpy((void*)real_block, (void*)fake_block, sizeof(Block_Header)); + real_block->next = (Block_Header*)((POINTER_SIZE_INT)real_block + GC_BLOCK_SIZE_BYTES); + } + + return; +} + void mspace_reset_after_compaction(Mspace* mspace) { unsigned int old_num_used = mspace->num_used_blocks; @@ -62,6 +99,7 @@ unsigned int i; for(i=0; i < num_used; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); + assert(!((POINTER_SIZE_INT)block % GC_BLOCK_SIZE_BYTES)); block->status = BLOCK_USED; block->free = block->new_free; block->new_free = block->base; @@ -148,6 +186,45 @@ next_block_for_target = block; next_block_for_compact = block; return; + }else + { + Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; + unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + gc->tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header)); + Block_Header* los_trans_fake_blocks = gc->tuner->interim_blocks; + memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header)); + void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size); + unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base); + Block_Header* last_block = los_trans_fake_blocks; + + for(i = 0; i < trans_blocks; i ++){ + Block_Header* curr_block = &los_trans_fake_blocks[i]; + curr_block->block_idx = start_idx + i; + curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES); + curr_block->free = curr_block->base ; + curr_block->new_free = curr_block->free; + curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES); + curr_block->status = BLOCK_COMPACTED; + last_block->next = curr_block; + last_block = curr_block; + } + last_block->next = mos_first_block; + + Collector* collector = gc->collectors[0]; + collector->cur_target_block = los_trans_fake_blocks; + collector->cur_target_block->status = BLOCK_TARGET; + collector->cur_compact_block = mos_first_block; + collector->cur_compact_block->status = BLOCK_IN_COMPACT; + + for(i=1; i< gc->num_active_collectors; i++){ + collector = gc->collectors[i]; + collector->cur_target_block = gc->collectors[i - 1]->cur_target_block->next; + collector->cur_target_block->status = BLOCK_TARGET; + collector->cur_compact_block = gc->collectors[i - 1]->cur_compact_block->next; + collector->cur_compact_block->status = BLOCK_IN_COMPACT; + } + next_block_for_target = collector->cur_target_block->next; + next_block_for_compact = collector->cur_compact_block->next; } } @@ -187,9 +264,6 @@ return NULL; } -#include "../trace_forward/fspace.h" -#include "../gen/gen.h" - Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace) { Block_Header* cur_target_block = (Block_Header*)next_block_for_target; @@ -216,8 +290,14 @@ * but we can't use the blocks which are given to los when los extension happens. * in this case, an out-of-mem should be given to user. */ - Fspace *nos = ((GC_Gen*)collector->gc)->nos; - Block_Header *nos_end = ((Block_Header *)&nos->blocks[nos->num_managed_blocks-1])->next; + GC* gc = collector->gc; + Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Block_Header *nos_end; + if( nos->num_managed_blocks != 0) + nos_end = ((Block_Header *)&nos->blocks[nos->num_managed_blocks-1])->next; + else + nos_end = ((Block_Header *)&mspace->blocks[mspace->num_managed_blocks-1])->next; + while( cur_target_block != nos_end){ //For_LOS_extend //assert( cur_target_block <= collector->cur_compact_block); Index: gc_gen/src/mark_compact/mspace_collect_compact.h =================================================================== --- gc_gen/src/mark_compact/mspace_collect_compact.h (revision 529215) +++ gc_gen/src/mark_compact/mspace_collect_compact.h (working copy) @@ -28,7 +28,9 @@ void gc_reset_block_for_collectors(GC* gc, Mspace* mspace); void gc_init_block_for_collectors(GC* gc, Mspace* mspace); -void update_mspace_info_for_los_extension(Mspace* mspace); +void mspace_update_info_for_los_extension(Mspace* mspace); +void mspace_update_info_for_los_shrink(Mspace* mspace); +void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace); void mspace_reset_after_compaction(Mspace* mspace); Block_Header* mspace_get_first_compact_block(Mspace* mspace); @@ -40,7 +42,7 @@ void move_compact_mspace(Collector* collector); void fallback_mark_scan_heap(Collector* collector); -void los_extention_mark_scan_heap(Collector *collector); +void los_adaptation_mark_scan_heap(Collector *collector); void mspace_extend_compact(Collector *collector); @@ -48,4 +50,3 @@ #endif /* _MSPACE_COLLECT_COMPACT_H_ */ - Index: gc_gen/src/mark_compact/mspace_move_compact.cpp =================================================================== --- gc_gen/src/mark_compact/mspace_move_compact.cpp (revision 529215) +++ gc_gen/src/mark_compact/mspace_move_compact.cpp (working copy) @@ -32,6 +32,7 @@ { Block_Header* curr_block = collector->cur_compact_block; Block_Header* dest_block = collector->cur_target_block; + Block_Header *local_last_dest = dest_block; void* dest_sector_addr = dest_block->base; Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION); @@ -65,6 +66,8 @@ collector->result = FALSE; return; } + if(dest_block > local_last_dest) + local_last_dest = dest_block; block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); dest_sector_addr = dest_block->base; } @@ -89,6 +92,7 @@ curr_block = mspace_get_next_compact_block(collector, mspace); } dest_block->new_free = dest_sector_addr; + collector->cur_target_block = local_last_dest; return; } @@ -148,7 +152,6 @@ } #endif - /* let other collectors go */ num_marking_collectors++; } @@ -187,7 +190,7 @@ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); - update_mspace_info_for_los_extension(mspace); + mspace_update_info_for_los_extension(mspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); Index: gc_gen/src/mark_compact/mspace_slide_compact.cpp =================================================================== --- gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 529215) +++ gc_gen/src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -35,6 +35,7 @@ { Block_Header *curr_block = collector->cur_compact_block; Block_Header *dest_block = collector->cur_target_block; + Block_Header *local_last_dest = dest_block; void *dest_addr = dest_block->base; Block_Header *last_src; @@ -66,6 +67,8 @@ collector->result = FALSE; return; } + if(dest_block > local_last_dest) + local_last_dest = dest_block; dest_addr = dest_block->base; dest_block->src = p_obj; last_src = curr_block; @@ -97,8 +100,9 @@ dest_block->new_free = dest_addr; Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest; - while(dest_block > last_block_for_dest){ - atomic_casptr((volatile void **)&last_block_for_dest, dest_block, cur_last_dest); + collector->cur_target_block = local_last_dest; + while(local_last_dest > cur_last_dest){ + atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest); cur_last_dest = (Block_Header *)last_block_for_dest; } @@ -161,7 +165,9 @@ } unsigned int total_dest_counter = 0; - Block_Header *last_dest_block = (Block_Header *)last_block_for_dest; + /*For LOS_Shrink: last_dest_block might point to a fake block*/ + Block_Header *last_dest_block = + (Block_Header *)round_down_to_size((POINTER_SIZE_INT)(last_block_for_dest->base), GC_BLOCK_SIZE_BYTES); for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){ if(!cur_dest_block) return NULL; if(cur_dest_block->status == BLOCK_DEST){ @@ -340,16 +346,27 @@ if(gc_match_kind(gc, FALLBACK_COLLECTION)) fallback_mark_scan_heap(collector); - else if(gc->cause == GC_CAUSE_LOS_IS_FULL) - los_extention_mark_scan_heap(collector); + else if(gc->tuner->kind != TRANS_NOTHING) + los_adaptation_mark_scan_heap(collector); else mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ - if(gc->cause == GC_CAUSE_LOS_IS_FULL) - retune_los_size(gc); + /*Retune space tuner to insure the tuning size is not to great*/ +// Boolean retune_result; + if(gc->tuner->kind != TRANS_NOTHING) gc_space_retune(gc); +// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) printf("los shrink...\n"); +// if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) printf("los extend...\n"); + +/* if(!retune_result){ + gc->collect_result = FALSE; + num_marking_collectors++; + return; + }*/ + + assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); @@ -368,6 +385,7 @@ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); +// if(!gc->collect_result) return; /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ @@ -378,6 +396,8 @@ old_num = atomic_inc32(&num_repointing_collectors); if( ++old_num == num_active_collectors ){ /* single thread world */ + /*LOS_Shrink: */ + if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_repointing_collectors++; @@ -401,7 +421,13 @@ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); gc_init_block_for_sliding_compact(gc, mspace); - num_fixing_collectors++; + num_fixing_collectors++; + /*LOS_Shrink: Fixme: This operation moves objects in LOS, and should be part of Pass 4*/ + if(lspace->move_object) lspace_sliding_compact(collector, lspace); + mspace_settle_fake_blocks_for_los_shrink(mspace); + /*Fixme: LOS_Shrink: set dest block for sliding compact*/ + if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) + mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks - (mspace->gc)->tuner->tuning_size); } while(num_fixing_collectors != num_active_collectors + 1); @@ -423,7 +449,8 @@ old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ){ - update_mspace_info_for_los_extension(mspace); + mspace_update_info_for_los_extension(mspace); + mspace_update_info_for_los_shrink(mspace); num_restoring_collectors++; } Index: gc_gen/src/mark_sweep/lspace.cpp =================================================================== --- gc_gen/src/mark_sweep/lspace.cpp (revision 529215) +++ gc_gen/src/mark_sweep/lspace.cpp (working copy) @@ -21,16 +21,12 @@ #include "lspace.h" void* los_boundary = NULL; +Boolean* p_global_lspace_move_obj; + struct GC_Gen; void gc_set_los(GC_Gen* gc, Space* lspace); -/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/ -#ifdef COMPRESS_REFERENCE - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (4*KB) -#else - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (0*KB) -#endif - +extern POINTER_SIZE_INT min_los_size_bytes; void lspace_initialize(GC* gc, void* start, POINTER_SIZE_INT lspace_size) { Lspace* lspace = (Lspace*)STD_MALLOC( sizeof(Lspace)); @@ -44,13 +40,15 @@ vm_commit_mem(reserved_base, lspace_size); memset(reserved_base, 0, lspace_size); + min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_NULL; lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL; lspace->reserved_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL; lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL); lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size); + lspace->gc = gc; + /*LOS_Shrink:*/ lspace->move_object = FALSE; - lspace->gc = gc; /*Treat with free area buddies*/ lspace->free_pool = (Free_Area_Pool*)STD_MALLOC(sizeof(Free_Area_Pool)); @@ -64,6 +62,7 @@ lspace->survive_ratio = 0.5f; gc_set_los((GC_Gen*)gc, (Space*)lspace); + p_global_lspace_move_obj = &(lspace->move_object); los_boundary = lspace->heap_end; return; @@ -106,8 +105,13 @@ { /* heap is marked already, we need only sweep here. */ lspace->num_collections ++; - lspace_reset_after_collection(lspace); - lspace_sweep(lspace); + lspace_reset_after_collection(lspace); + /*When sliding compacting lspace, we don't need to sweep it anymore. + What's more, the assumption that the first word of one KB must be zero when iterating + lspace in that function lspace_get_next_marked_object is not true*/ + if(!lspace->move_object) lspace_sweep(lspace); + lspace->move_object = FALSE; +// printf("lspace: %d MB \n", lspace->committed_heap_size / MB); return; } Index: gc_gen/src/mark_sweep/lspace.h =================================================================== --- gc_gen/src/mark_sweep/lspace.h (revision 529215) +++ gc_gen/src/mark_sweep/lspace.h (working copy) @@ -25,6 +25,13 @@ #include "../thread/gc_thread.h" #include "free_area_pool.h" +/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/ +#ifdef COMPRESS_REFERENCE + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (GC_BLOCK_SIZE_BYTES ) +#else + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (0*KB) +#endif + typedef struct Lspace{ /* <-- first couple of fields are overloadded as Space */ void* heap_start; @@ -36,6 +43,7 @@ float survive_ratio; unsigned int collect_algorithm; GC* gc; + /*LOS_Shrink:This field stands for sliding compact to lspace */ Boolean move_object; /*For_statistic: size allocated science last time collect los, ie. last major*/ POINTER_SIZE_INT alloced_size; @@ -46,11 +54,15 @@ Free_Area_Pool* free_pool; /*Size of allocation which caused lspace alloc failure.*/ POINTER_SIZE_INT failure_size; + void* scompact_fa_start; + void* scompact_fa_end; }Lspace; void lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size); void lspace_destruct(Lspace* lspace); Managed_Object_Handle lspace_alloc(POINTER_SIZE_INT size, Allocator* allocator); +void lspace_sliding_compact(Collector* collector, Lspace* lspace); +void lspace_compute_object_target(Collector* collector, Lspace* lspace); void lspace_sweep(Lspace* lspace); void lspace_reset_after_collection(Lspace* lspace); void lspace_collection(Lspace* lspace); @@ -65,8 +77,8 @@ while(!reach_heap_end){ //FIXME: This while shoudl be if, try it! - while(!*((unsigned int *)next_area_start)){ - next_area_start += ((Free_Area*)next_area_start)->size; + while(!*((POINTER_SIZE_INT*)next_area_start)){ + next_area_start += ((Free_Area*)next_area_start)->size; } if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ //If there is a living object at this addr, return it, and update iterate_index Index: gc_gen/src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (revision 529215) +++ gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -199,7 +199,7 @@ /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/ if(try_count == 0){ vm_gc_lock_enum(); - lspace->failure_size = alloc_size; + lspace->failure_size = round_up_to_size(alloc_size, GC_BLOCK_SIZE_BYTES); gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL); vm_gc_unlock_enum(); try_count ++; @@ -210,42 +210,138 @@ return NULL; } +void lspace_compute_object_target(Collector* collector, Lspace* lspace) +{ + void* dest_addr = lspace->heap_start; + unsigned int iterate_index = 0; + Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); + + assert(!collector->rem_set); + collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + unsigned int obj_size = vm_object_size(p_obj); + assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end); + Obj_Info_Type obj_info = get_obj_info_raw(p_obj); + if( obj_info != 0 ) { + collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); + collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); + } + + obj_set_fw_in_oi(p_obj, dest_addr); + dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size)); + p_obj = lspace_get_next_marked_object(lspace, &iterate_index); + } + + pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + + lspace->scompact_fa_start = dest_addr; + lspace->scompact_fa_end= lspace->heap_end; + return; +} + +void lspace_sliding_compact(Collector* collector, Lspace* lspace) +{ + unsigned int iterate_index = 0; + Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + obj_unmark_in_vt(p_obj); + + unsigned int obj_size = vm_object_size(p_obj); + Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); + POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size; + if( p_obj != p_target_obj){ + memmove(p_target_obj, p_obj, obj_size); + /*Fixme: For LOS_Shrink debug*/ +// unsigned int padding_lenth = ALIGN_UP_TO_KILO(target_obj_end) - target_obj_end; +// memset(p_target_obj, 0, padding_lenth); + } + set_obj_info(p_target_obj, 0); + p_obj = lspace_get_next_marked_object(lspace, &iterate_index); + } + + return; +} + void lspace_reset_after_collection(Lspace* lspace) { GC* gc = lspace->gc; Space_Tuner* tuner = gc->tuner; POINTER_SIZE_INT trans_size = tuner->tuning_size; + POINTER_SIZE_INT new_fa_size = 0; assert(!(trans_size%GC_BLOCK_SIZE_BYTES)); - //For_LOS_extend - if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ + + /* Reset the pool first because its info is useless now. */ + free_area_pool_reset(lspace->free_pool); + + switch(tuner->kind){ + case TRANS_FROM_MOS_TO_LOS:{ + assert(!lspace->move_object); void* origin_end = lspace->heap_end; lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks); - + /*The assumption that the first word of one KB must be zero when iterating lspace in + that function lspace_get_next_marked_object is not true*/ Free_Area* trans_fa = free_area_new(origin_end, trans_size); free_pool_add_area(lspace->free_pool, trans_fa); lspace->committed_heap_size += trans_size; lspace->reserved_heap_size += trans_size; + if(lspace->move_object){ + Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; + lspace->heap_end = (void*)mos_first_block; + new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; + Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); + if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); + } + break; + } + case TRANS_FROM_LOS_TO_MOS:{ + assert(lspace->move_object); + assert(tuner->tuning_size); + Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks; + assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block ); + lspace->heap_end = (void*)mos_first_block; + lspace->committed_heap_size -= trans_size; + lspace->reserved_heap_size -= trans_size; + /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/ + assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size); + new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size; + Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); + free_pool_add_area(lspace->free_pool, fa); + break; + } + default:{ + if(lspace->move_object){ + assert(tuner->kind == TRANS_NOTHING); + assert(!tuner->tuning_size); + new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; + Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); + free_pool_add_area(lspace->free_pool, fa); + } + break; + } } + /*For_statistic los information.*/ lspace->alloced_size = 0; - lspace->failure_size = 0; + lspace->surviving_size = 0; los_boundary = lspace->heap_end; } void lspace_sweep(Lspace* lspace) { - - lspace->surviving_size = 0; - - /* reset the pool first because its info is useless now. */ - free_area_pool_reset(lspace->free_pool); - unsigned int mark_bit_idx = 0; POINTER_SIZE_INT cur_size = 0; void *cur_area_start, *cur_area_end; + /*If it is TRANS_FROM_MOS_TO_LOS now, we must clear the fa alread added in lspace_reset_after_collection*/ + free_area_pool_reset(lspace->free_pool); + Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx); if(p_next_obj){ Index: gc_gen/src/thread/collector.cpp =================================================================== --- gc_gen/src/thread/collector.cpp (revision 529215) +++ gc_gen/src/thread/collector.cpp (working copy) @@ -23,6 +23,7 @@ #include "collector.h" #include "../mark_compact/mspace.h" #include "../finalizer_weakref/finalizer_weakref.h" +#include "../common/space_tuner.h" unsigned int MINOR_COLLECTORS = 0; unsigned int MAJOR_COLLECTORS = 0; @@ -76,9 +77,11 @@ collector_reset_weakref_sets(collector); #endif - if(collector->gc->cause == GC_CAUSE_LOS_IS_FULL) + /*For LOS_Shrink and LOS_Extend*/ + if(collector->gc->tuner->kind != TRANS_NOTHING){ collector->non_los_live_obj_size = 0; - + collector->los_live_obj_size = 0; + } collector->result = TRUE; return; } Index: gc_gen/src/thread/collector.h =================================================================== --- gc_gen/src/thread/collector.h (revision 529215) +++ gc_gen/src/thread/collector.h (working copy) @@ -57,6 +57,7 @@ void(*task_func)(void*) ; /* current task */ POINTER_SIZE_INT non_los_live_obj_size; + POINTER_SIZE_INT los_live_obj_size; unsigned int result; }Collector; Index: gc_gen/src/thread/mutator_alloc.cpp =================================================================== --- gc_gen/src/thread/mutator_alloc.cpp (revision 529215) +++ gc_gen/src/thread/mutator_alloc.cpp (working copy) @@ -79,7 +79,8 @@ p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); } - if( p_obj == NULL ) return NULL; + if( p_obj == NULL ) + return NULL; obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah); Index: gc_gen/src/trace_forward/fspace_alloc.cpp =================================================================== --- gc_gen/src/trace_forward/fspace_alloc.cpp (revision 529215) +++ gc_gen/src/trace_forward/fspace_alloc.cpp (working copy) @@ -99,4 +99,3 @@ } - Index: gc_gen/src/verify/verifier_common.cpp =================================================================== --- gc_gen/src/verify/verifier_common.cpp (revision 529215) +++ gc_gen/src/verify/verifier_common.cpp (working copy) @@ -191,4 +191,3 @@ } - Index: gc_gen/src/verify/verifier_scanner.cpp =================================================================== --- gc_gen/src/verify/verifier_scanner.cpp (revision 529215) +++ gc_gen/src/verify/verifier_scanner.cpp (working copy) @@ -388,4 +388,3 @@ - Index: gc_gen/src/verify/verify_mutator_effect.cpp =================================================================== --- gc_gen/src/verify/verify_mutator_effect.cpp (revision 529215) +++ gc_gen/src/verify/verify_mutator_effect.cpp (working copy) @@ -395,4 +395,3 @@ } - Index: vmcore/src/init/finalizer_thread.cpp =================================================================== --- vmcore/src/init/finalizer_thread.cpp (revision 529215) +++ vmcore/src/init/finalizer_thread.cpp (working copy) @@ -155,7 +155,7 @@ { hymutex_lock(&fin_thread_info->end_mutex); while(unsigned int fin_obj_num = vm_get_finalizable_objects_quantity()){ - unsigned int wait_time = restrict_wait_time(fin_obj_num + 100, FIN_MAX_WAIT_TIME << 7); + unsigned int wait_time = restrict_wait_time(fin_obj_num + 1000, FIN_MAX_WAIT_TIME << 7); atomic_inc32(&fin_thread_info->end_waiting_num); IDATA status = hycond_wait_timed(&fin_thread_info->end_cond, &fin_thread_info->end_mutex, (I_64)wait_time, 0); atomic_dec32(&fin_thread_info->end_waiting_num);