Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 541183) +++ src/common/gc_common.cpp (working copy) @@ -132,23 +132,34 @@ if (is_property_set("gc.mx", VM_PROPERTIES) == 1) { max_heap_size = get_size_property("gc.mx"); - if (max_heap_size < min_heap_size) + if (max_heap_size < min_heap_size){ max_heap_size = min_heap_size; - if (0 == max_heap_size) + printf("Max heap size: too small, reset to %d MB!\n", max_heap_size/MB); + } + if (0 == max_heap_size){ max_heap_size = HEAP_SIZE_DEFAULT; + printf("Max heap size: zero, reset to %d MB! \n", max_heap_size/MB); + } min_heap_size = max_heap_size / 10; - if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes; + if (min_heap_size < min_heap_size_bytes){ + min_heap_size = min_heap_size_bytes; +// printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB); + } } if (is_property_set("gc.ms", VM_PROPERTIES) == 1) { min_heap_size = get_size_property("gc.ms"); - if (min_heap_size < min_heap_size_bytes) + if (min_heap_size < min_heap_size_bytes){ min_heap_size = min_heap_size_bytes; + printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB); + } } - if (min_heap_size > max_heap_size) + if (min_heap_size > max_heap_size){ max_heap_size = min_heap_size; + printf("Max heap size: too small, reset to %d MB\n", max_heap_size / MB); + } min_heap_size_bytes = min_heap_size; max_heap_size_bytes = max_heap_size; @@ -251,6 +262,11 @@ gc_gen_assign_free_area_to_mutators((GC_Gen*)gc); } +void gc_adjust_heap_size(GC* gc, int64 pause_time) +{ + gc_gen_adjust_heap_size((GC_Gen*)gc, pause_time); +} + void gc_copy_interior_pointer_table_to_rootset(); void gc_reclaim_heap(GC* gc, unsigned int gc_cause) @@ -263,18 +279,10 @@ gc->cause = gc_cause; gc_decide_collection_kind((GC_Gen*)gc, gc_cause); + gc_compute_space_tune_size_before_marking(gc, gc_cause); - //For_LOS_extend! -#ifdef GC_FIXED_SIZE_TUNER - gc_space_tune_before_gc_fixed_size(gc, gc_cause); -#else - gc_space_tune_prepare(gc, gc_cause); - gc_space_tune_before_gc(gc, gc_cause); -#endif - #ifdef MARK_BIT_FLIPPING - if(gc_match_kind(gc, MINOR_COLLECTION)) - mark_bit_flip(); + if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); #endif gc_metadata_verify(gc, TRUE); @@ -291,21 +299,22 @@ /* this has to be done after all mutators are suspended */ gc_reset_mutator_context(gc); - if(!IGNORE_FINREF ) - gc_set_obj_with_fin(gc); + if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); gc_gen_reclaim_heap((GC_Gen*)gc); gc_reset_interior_pointer_table(); - + gc_metadata_verify(gc, FALSE); int64 pause_time = time_now() - start_time; gc->time_collections += pause_time; + + gc_adjust_heap_size(gc, pause_time); + gc_gen_adapt((GC_Gen*)gc, pause_time); - if(gc_is_gen_mode()) - gc_prepare_mutator_remset(gc); + if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); if(!IGNORE_FINREF ){ gc_put_finref_to_vm(gc); @@ -317,16 +326,11 @@ #endif } - //For_LOS_extend! gc_space_tuner_reset(gc); - + gc_assign_free_area_to_mutators(gc); - + vm_resume_threads_after(); return; } - - - - Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 541183) +++ src/common/gc_common.h (working copy) @@ -388,7 +388,6 @@ Vector_Block* root_set; Vector_Block* uncompressed_root_set; - //For_LOS_extend Space_Tuner* tuner; }GC; @@ -443,6 +442,9 @@ extern void* los_boundary; +/*This flag indicate whether lspace is using a sliding compaction + *Fixme: check if the performance is a problem with this global flag. + */ extern Boolean* p_global_lspace_move_obj; inline Boolean obj_is_moved(Partial_Reveal_Object* p_obj) { return ((p_obj >= los_boundary) || (*p_global_lspace_move_obj)); } Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 541183) +++ src/common/gc_for_vm.cpp (working copy) @@ -237,6 +237,7 @@ if(!p_obj) return 0; assert(address_belongs_to_gc_heap(p_obj, p_global_gc)); Obj_Info_Type info = get_obj_info_raw(p_obj); + unsigned int new_info = 0; int hash; switch(info & HASHCODE_MASK){ @@ -250,7 +251,13 @@ hash = hashcode_lookup(p_obj,info); break; case HASHCODE_UNSET: - set_obj_info(p_obj, info | HASHCODE_SET_BIT); + new_info = (unsigned int)(info | HASHCODE_SET_BIT); + while (true) { + unsigned int temp = atomic_cas32(&p_obj->obj_info, new_info, info); + if (temp == info) break; + info = get_obj_info_raw(p_obj); + new_info = (unsigned int)(info | HASHCODE_SET_BIT); + } hash = hashcode_gen((void*)p_obj); break; default: Index: src/common/gc_space.h =================================================================== --- src/common/gc_space.h (revision 541183) +++ src/common/gc_space.h (working copy) @@ -161,7 +161,8 @@ void* new_end = (void*)((POINTER_SIZE_INT)commit_base + commit_size); space->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)space->heap_start; - + /*Fixme: For_Heap_Adjust, but need fix if static mapping.*/ + space->heap_end = new_end; /* init the grown blocks */ Block_Header* block = (Block_Header*)commit_base; Block_Header* last_block = (Block_Header*)((Block*)block -1); Index: src/common/hashcode.h =================================================================== --- src/common/hashcode.h (revision 541183) +++ src/common/hashcode.h (working copy) @@ -53,6 +53,7 @@ typedef struct Hashcode_Buf{ Seq_List* list; POINTER_SIZE_INT* checkpoint; + SpinLock lock; }Hashcode_Buf; extern GC_Metadata gc_metadata; @@ -117,6 +118,7 @@ inline int hashcode_buf_lookup(Partial_Reveal_Object* p_obj,Hashcode_Buf* hashcode_buf) { POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj; + lock(hashcode_buf->lock); Seq_List* list = hashcode_buf->list; seq_list_iterate_init(list); while(seq_list_has_next(list)){ @@ -132,11 +134,13 @@ iter = vector_block_iterator_advance(curr_block, iter); POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; iter = vector_block_iterator_advance(curr_block, iter); + unlock(hashcode_buf->lock); return *(int*)&hashcode; } } } assert(0); + unlock(hashcode_buf->lock); return 0; } @@ -366,371 +370,3 @@ return hash; } #endif //_HASHCODE_H_ -#ifndef _HASHCODE_H_ -#define _HASHCODE_H_ - -#include "gc_common.h" -#include "../utils/vector_block.h" -#include "../utils/seq_list.h" - -#define HASHCODE_MASK 0x1C - -#define HASHCODE_SET_BIT 0x04 -#define HASHCODE_ATTACHED_BIT 0x08 -#define HASHCODE_BUFFERED_BIT 0x10 - -#define HASHCODE_EXTENDED_VT_BIT 0x02 - -enum Hashcode_Kind{ - HASHCODE_UNSET = 0x0, - HASHCODE_SET_UNALLOCATED = HASHCODE_SET_BIT, - HASHCODE_SET_ATTACHED = HASHCODE_SET_BIT | HASHCODE_ATTACHED_BIT, - HASHCODE_SET_BUFFERED = HASHCODE_SET_BIT | HASHCODE_BUFFERED_BIT -}; - -inline Boolean obj_is_sethash_in_vt(Partial_Reveal_Object* p_obj){ - return (Boolean)((POINTER_SIZE_INT)obj_get_vt_raw(p_obj) & HASHCODE_EXTENDED_VT_BIT); -} - -inline void obj_sethash_in_vt(Partial_Reveal_Object* p_obj){ - VT vt = obj_get_vt_raw(p_obj); - obj_set_vt(p_obj,(VT)((POINTER_SIZE_INT)vt | HASHCODE_EXTENDED_VT_BIT)); -} - -inline Boolean hashcode_is_set(Partial_Reveal_Object* p_obj) -{ - Obj_Info_Type obj_info = get_obj_info_raw(p_obj); - return obj_info & HASHCODE_SET_BIT; -} - -inline Boolean hashcode_is_attached(Partial_Reveal_Object* p_obj) -{ - Obj_Info_Type obj_info = get_obj_info_raw(p_obj); - return obj_info & HASHCODE_ATTACHED_BIT; -} - -inline Boolean hashcode_is_buffered(Partial_Reveal_Object* p_obj) -{ - Obj_Info_Type obj_info = get_obj_info_raw(p_obj); - return obj_info & HASHCODE_BUFFERED_BIT; -} - -inline int hashcode_gen(void* addr) -{ return (int)(POINTER_SIZE_INT)addr; } - -typedef struct Hashcode_Buf{ - Seq_List* list; - POINTER_SIZE_INT* checkpoint; -}Hashcode_Buf; - -extern GC_Metadata gc_metadata; -Vector_Block* free_set_pool_get_entry(GC_Metadata *metadata); -void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata); - -inline void hashcode_buf_set_checkpoint(Hashcode_Buf* hashcode_buf) -{ hashcode_buf->checkpoint = vector_block_get_last_entry((Vector_Block*)hashcode_buf->list->end); } - -inline Hashcode_Buf* hashcode_buf_create() -{ - Hashcode_Buf* hashcode_buf = (Hashcode_Buf*) STD_MALLOC(sizeof(Hashcode_Buf)); - memset(hashcode_buf, 0, sizeof(Hashcode_Buf)); - hashcode_buf->list = seq_list_create(); - return hashcode_buf; -} - -inline void hashcode_buf_remove(Hashcode_Buf* hashcode_buf, Vector_Block* block) -{ - Seq_List* list = hashcode_buf->list; - seq_list_remove(list, (List_Node*) block); - vector_block_clear(block); - free_set_pool_put_entry(block, &gc_metadata); -} - -inline void hashcode_buf_clear(Hashcode_Buf* hashcode_buf) -{ - //push vector block back to free list - Seq_List* list = hashcode_buf->list; - seq_list_iterate_init(list); - - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; - vector_block_clear(curr_block); - free_set_pool_put_entry(curr_block, &gc_metadata); - } - seq_list_clear(list); - return; -} - -inline void hashcode_buf_destory(Hashcode_Buf* hashcode_buf) -{ - Seq_List* list = hashcode_buf->list; - hashcode_buf_clear(hashcode_buf); - seq_list_destruct(list); - STD_FREE((void*)hashcode_buf); -} - -inline void hashcode_buf_init(Hashcode_Buf* hashcode_buf) -{ - Seq_List* list = hashcode_buf->list; -#ifdef _DEBUG - seq_list_iterate_init(list); - assert(!seq_list_has_next(list)); -#endif - Vector_Block* free_block = free_set_pool_get_entry(&gc_metadata); - seq_list_add(list, (List_Node*)free_block); - hashcode_buf_set_checkpoint(hashcode_buf); - return; -} - -inline int hashcode_buf_lookup(Partial_Reveal_Object* p_obj,Hashcode_Buf* hashcode_buf) -{ - POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj; - Seq_List* list = hashcode_buf->list; - seq_list_iterate_init(list); - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list); - POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block); - - while(!vector_block_iterator_end(curr_block, iter)){ - POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; - if(obj_addr != addr){ - iter = vector_block_iterator_advance(curr_block, iter); - iter = vector_block_iterator_advance(curr_block, iter); - }else{ - iter = vector_block_iterator_advance(curr_block, iter); - POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; - iter = vector_block_iterator_advance(curr_block, iter); - return *(int*)&hashcode; - } - } - } - assert(0); - return 0; -} - -inline void hashcode_buf_add(Partial_Reveal_Object* p_obj, int32 hashcode, Hashcode_Buf* hashcode_buf) -{ - Seq_List* list = hashcode_buf->list; - Vector_Block* tail_block = (Vector_Block*)seq_list_end_node(list); - vector_block_add_entry(tail_block, (POINTER_SIZE_INT) p_obj); - POINTER_SIZE_INT hashcode_var = 0; - *(int*) &hashcode_var = hashcode; - vector_block_add_entry(tail_block, hashcode_var); - - if(!vector_block_is_full(tail_block)) return; - - tail_block = free_set_pool_get_entry(&gc_metadata); - seq_list_add(list, (List_Node*)tail_block); - return; -} - -inline void hashcode_buf_refresh_all(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist) -{ - Seq_List* list = hashcode_buf->list; - seq_list_iterate_init(list); - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; - POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block); - while(!vector_block_iterator_end(curr_block, iter)){ - POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; - *iter = addr - dist; - iter =vector_block_iterator_advance(curr_block, iter); - iter =vector_block_iterator_advance(curr_block, iter); - } - } - return; -} - -inline void hashcode_buf_rollback_new_entry(Hashcode_Buf* hashcode_buf) -{ - Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint); - POINTER_SIZE_INT* iter = hashcode_buf->checkpoint; - while(!vector_block_iterator_end(first_block, iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - Obj_Info_Type oi = get_obj_info_raw(p_obj); - set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); - iter =vector_block_iterator_advance(first_block, iter); - iter =vector_block_iterator_advance(first_block, iter); - } - first_block->tail = hashcode_buf->checkpoint; - - Seq_List* list = hashcode_buf->list; - seq_list_iterate_init_after_node(list, (List_Node*)first_block); - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; - iter = vector_block_iterator_init(curr_block); - while(!vector_block_iterator_end(curr_block, iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - Obj_Info_Type oi = get_obj_info_raw(p_obj); - set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); - iter =vector_block_iterator_advance(curr_block, iter); - iter =vector_block_iterator_advance(curr_block, iter); - } - hashcode_buf_remove(hashcode_buf, curr_block); - } - return; -} - -inline void hashcode_buf_transfer_new_entry(Hashcode_Buf* old_buf, Hashcode_Buf* new_buf) -{ - hashcode_buf_set_checkpoint(new_buf); - - Vector_Block* first_block = VECTOR_BLOCK_HEADER(old_buf->checkpoint); - POINTER_SIZE_INT* iter = old_buf->checkpoint; - while(!vector_block_iterator_end(first_block, iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - - iter =vector_block_iterator_advance(first_block, iter); - POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; - iter =vector_block_iterator_advance(first_block, iter); - hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf); - } - first_block->tail = old_buf->checkpoint; - - Seq_List* list = old_buf->list; - seq_list_iterate_init_after_node(list, (List_Node*)first_block); - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; - iter = vector_block_iterator_init(curr_block); - while(!vector_block_iterator_end(curr_block, iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - iter =vector_block_iterator_advance(curr_block, iter); - POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; - iter =vector_block_iterator_advance(curr_block, iter); - - hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf); - } - hashcode_buf_remove(old_buf, curr_block); - } - return; -} - -inline void hashcode_buf_refresh_new_entry(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist) -{ - Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint); - POINTER_SIZE_INT* iter = hashcode_buf->checkpoint; - while(!vector_block_iterator_end(first_block, iter)){ - POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; - *iter = addr - dist; - - iter =vector_block_iterator_advance(first_block, iter); - iter =vector_block_iterator_advance(first_block, iter); - } - - Seq_List* list = hashcode_buf->list; - seq_list_iterate_init_after_node(list, (List_Node*)first_block); - while(seq_list_has_next(list)){ - Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; - iter = vector_block_iterator_init(curr_block); - while(!vector_block_iterator_end(curr_block, iter)){ - POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; - *iter = addr - dist; - - iter =vector_block_iterator_advance(curr_block, iter); - iter =vector_block_iterator_advance(curr_block, iter); - } - } - hashcode_buf_set_checkpoint(hashcode_buf); - return; -} - -void collector_hashcodeset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref); - -inline Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, - unsigned int* p_obj_size, Collector* collector, - Hashcode_Buf* old_buf, Hashcode_Buf* new_buf) -{ - Obj_Info_Type obj_info = get_obj_info(p_obj); - POINTER_SIZE_INT hashcode; - - switch(obj_info & HASHCODE_MASK){ - case HASHCODE_SET_UNALLOCATED: - if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){ - *p_obj_size += GC_OBJECT_ALIGNMENT; - obj_info = obj_info | HASHCODE_ATTACHED_BIT; - *(int*) &hashcode = hashcode_gen(p_obj); - POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj); - collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos); - collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode); - } - break; - - case HASHCODE_SET_ATTACHED: - obj_sethash_in_vt(p_obj); - break; - - case HASHCODE_SET_BUFFERED: - *(int*) &hashcode = hashcode_buf_lookup(p_obj, old_buf); - if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){ - *p_obj_size += GC_OBJECT_ALIGNMENT; - obj_info = obj_info & ~HASHCODE_BUFFERED_BIT; - obj_info = obj_info | HASHCODE_ATTACHED_BIT; - POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj); - collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos); - collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode); - }else{ - hashcode_buf_add((Partial_Reveal_Object*)dest_addr, *(int*) &hashcode, new_buf); - } - break; - - case HASHCODE_UNSET: - break; - - default: - assert(0); - - } - return obj_info; -} - -inline void move_compact_process_hashcode(Partial_Reveal_Object* p_obj, - Hashcode_Buf* old_buf, Hashcode_Buf* new_buf) -{ - if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ - int hashcode; - if(hashcode_is_buffered(p_obj)){ - /*already buffered objects;*/ - hashcode = hashcode_buf_lookup(p_obj, old_buf); - hashcode_buf_add(p_obj, hashcode, new_buf); - }else{ - /*objects need buffering.*/ - hashcode = hashcode_gen(p_obj); - hashcode_buf_add(p_obj, hashcode, new_buf); - Obj_Info_Type oi = get_obj_info_raw(p_obj); - set_obj_info(p_obj, oi | HASHCODE_BUFFERED_BIT); - } - } -} - -inline Obj_Info_Type trace_forward_process_hashcode(Partial_Reveal_Object* p_obj, - Obj_Info_Type oi, unsigned int p_obj_size) -{ - oi |= HASHCODE_ATTACHED_BIT; - *(int *)(((char*)p_obj) + p_obj_size - GC_OBJECT_ALIGNMENT) = hashcode_gen(p_obj); - assert(vm_object_size(p_obj) != 0); - return oi; -} - -inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, - void* dest_addr, unsigned int * obj_size_precompute) -{ - if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ - if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj) - *obj_size_precompute += GC_OBJECT_ALIGNMENT; - } -} - -inline int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj); -inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info) -{ - int hash; - if(hashcode_is_attached(p_obj)){ - int offset = vm_object_size(p_obj); - unsigned char* pos = (unsigned char *)p_obj; - hash = *(int*) (pos + offset); - }else if(hashcode_is_buffered(p_obj)){ - hash = obj_lookup_hashcode_in_buf(p_obj); - } - return hash; -} -#endif //_HASHCODE_H_ Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 541183) +++ src/common/space_tuner.cpp (working copy) @@ -27,38 +27,36 @@ Space* gc_get_mos(GC_Gen* gc); Space* gc_get_nos(GC_Gen* gc); Space* gc_get_los(GC_Gen* gc); -POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace); +float mspace_get_expected_threshold_ratio(Mspace* mspace); POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); -/*Prepare the paramenters which are to be used to compute new los size.*/ -void gc_space_tune_prepare(GC* gc, unsigned int cause) +void gc_decide_space_tune(GC* gc, unsigned int cause) { - if(gc_match_kind(gc, MINOR_COLLECTION)) - return; - Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); Space_Tuner* tuner = gc->tuner; - + //debug_adjust assert(fspace->free_block_idx >= fspace->first_block_idx); unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES; fspace->alloced_size = nos_alloc_size; /*Fixme: LOS_Adaptive: There should be a condition here, that fspace->collection_num != 0*/ mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio); - /*For_statistic alloc speed: Speed could be represented by sum of alloced size.*/ - tuner->speed_los += lspace->alloced_size; + /*For_statistic alloc speed: Speed could be represented by sum of alloced size. + *The right of this time los/mos alloc speed is the biggest. + */ + tuner->speed_los = lspace->alloced_size; tuner->speed_los = (tuner->speed_los + tuner->old_speed_los) >> 1; - tuner->speed_mos += mspace->alloced_size; + tuner->speed_mos = mspace->alloced_size; tuner->speed_mos = (tuner->speed_mos + tuner->old_speed_mos) >> 1; /*For_statistic wasted memory*/ POINTER_SIZE_INT curr_used_los = lspace->surviving_size + lspace->alloced_size; - assert(curr_used_los <= lspace->committed_heap_size); POINTER_SIZE_INT curr_wast_los = lspace->committed_heap_size - curr_used_los; tuner->wast_los += curr_wast_los; POINTER_SIZE_INT curr_used_mos = mspace->surviving_size + mspace->alloced_size; - POINTER_SIZE_INT expected_mos = mspace_get_expected_threshold((Mspace*)mspace); + float expected_mos_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace); + POINTER_SIZE_INT expected_mos = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio); POINTER_SIZE_INT curr_wast_mos = 0; if(expected_mos > curr_used_mos) curr_wast_mos = expected_mos - curr_used_mos; @@ -68,270 +66,263 @@ /*For_statistic ds in heuristic*/ tuner->current_ds = (unsigned int)((float)fspace->committed_heap_size * fspace->survive_ratio); /*Fixme: Threshold should be computed by heuristic. tslow, total recycled heap size shold be statistic.*/ - tuner->threshold = tuner->current_ds; - if(tuner->threshold > 8 * MB) tuner->threshold = 8 * MB; + tuner->threshold_waste = tuner->current_ds; + if(tuner->threshold_waste > 8 * MB) tuner->threshold_waste = 8 * MB; tuner->min_tuning_size = tuner->current_ds; if(tuner->min_tuning_size > 4 * MB) tuner->min_tuning_size = 4 * MB; - return; -} -/*Check the tuning size, if too small, cancle the tuning.*/ -void check_space_tuner(GC* gc) -{ - POINTER_SIZE_INT los_fail_sz_uped = 0; - - Space_Tuner* tuner = gc->tuner; - if((!tuner->need_tune) && (!tuner->force_tune)){ - assert(tuner->kind == TRANS_NOTHING); - assert(tuner->tuning_size == 0); - return; - } - Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - if((!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size)){ - tuner->tuning_size = 0; - goto check_size; - } - if((tuner->need_tune) && (!tuner->force_tune)) goto check_size; - /*tuner->force_tune must be true here!*/ - los_fail_sz_uped = lspace_get_failure_size((Lspace*)lspace); - assert(!(los_fail_sz_uped % KB)); + if(tuner->speed_los == 0) tuner->speed_los = 16; + if(tuner->speed_mos == 0) tuner->speed_mos = 16; - if(tuner->kind == TRANS_FROM_LOS_TO_MOS){ - tuner->kind = TRANS_FROM_MOS_TO_LOS; - tuner->tuning_size = 0; - lspace->move_object = 0; - } - if(tuner->tuning_size < los_fail_sz_uped){ - tuner->tuning_size = los_fail_sz_uped; - } - -check_size: - tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); - if(tuner->tuning_size == 0){ - tuner->kind = TRANS_NOTHING; - lspace->move_object = 0; - } - + /*Needn't tune if dw does not reach threshold.*/ + if(tuner->current_dw > tuner->threshold_waste) tuner->need_tune = 1; + /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/ + if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1; + return; } - extern POINTER_SIZE_INT min_los_size_bytes; extern POINTER_SIZE_INT min_none_los_size_bytes; -/*Give the tuning kind, and tuning size hint*/ -void gc_space_tune_before_gc(GC* gc, unsigned int cause) + +void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause) { - if(gc_match_kind(gc, MINOR_COLLECTION)) return; + if(gc_match_kind(gc, MINOR_COLLECTION)) return; + + gc_decide_space_tune(gc, cause); + Space_Tuner* tuner = gc->tuner; if((tuner->speed_los == 0) && ( tuner->speed_mos == 0)) return; - if(tuner->speed_los == 0) tuner->speed_los = 16; - if(tuner->speed_mos == 0) tuner->speed_mos = 16; - - /*Needn't tune if dw does not reach threshold.*/ - if(tuner->current_dw > tuner->threshold) tuner->need_tune = 1; - /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/ - if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1; if((!tuner->need_tune) && (!tuner->force_tune)) return; - + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - POINTER_SIZE_INT los_expect_survive_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); - POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_survive_sz) ? - (lspace->committed_heap_size - los_expect_survive_sz) : 0); + POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); + POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? + (lspace->committed_heap_size - los_expect_surviving_sz) : 0); POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio); - POINTER_SIZE_INT mos_expect_threshold = mspace_get_expected_threshold((Mspace*)mspace); + float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace); + POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio); POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)? (mos_expect_threshold - mos_expect_survive_sz) : 0); - POINTER_SIZE_INT total_free = los_expect_free_sz + mos_expect_free_sz; - assert(total_free <= gc->committed_heap_size); + POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz; + float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_mos); - POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free * new_los_ratio); - assert(new_free_los_sz <= gc->committed_heap_size); - POINTER_SIZE_INT max_tuning_size = 0; + POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio); + /*LOS_Extend:*/ if((new_free_los_sz > los_expect_free_sz) ) { - if ( (!tuner->force_tune) && (new_free_los_sz - los_expect_free_sz < tuner->min_tuning_size) ){ - tuner->kind = TRANS_NOTHING; - tuner->tuning_size = 0; - return; - } tuner->kind = TRANS_FROM_MOS_TO_LOS; - tuner->tuning_size = round_down_to_size(new_free_los_sz - los_expect_free_sz, GC_BLOCK_SIZE_BYTES); - POINTER_SIZE_INT non_los_sz = mspace->committed_heap_size + fspace->committed_heap_size; - if(non_los_sz > min_none_los_size_bytes) - max_tuning_size = non_los_sz - min_none_los_size_bytes; - if(tuner->tuning_size > max_tuning_size) tuner->tuning_size = max_tuning_size; + tuner->tuning_size = new_free_los_sz - los_expect_free_sz; } /*LOS_Shrink:*/ - if((new_free_los_sz < los_expect_free_sz)) + else if(new_free_los_sz < los_expect_free_sz) { - if ( (!tuner->force_tune) && (los_expect_free_sz - new_free_los_sz < tuner->min_tuning_size) ){ - tuner->kind = TRANS_NOTHING; - tuner->tuning_size = 0; - return; - } tuner->kind = TRANS_FROM_LOS_TO_MOS; + tuner->tuning_size = los_expect_free_sz - new_free_los_sz; lspace->move_object = 1; - assert(lspace->committed_heap_size >= min_los_size_bytes); - max_tuning_size = lspace->committed_heap_size - min_los_size_bytes; - POINTER_SIZE_INT tuning_size = los_expect_free_sz - new_free_los_sz; - if(tuning_size > max_tuning_size) tuning_size = max_tuning_size; - tuner->tuning_size = round_down_to_size(tuning_size, GC_BLOCK_SIZE_BYTES); } - if( (tuner->tuning_size == 0) && (!tuner->force_tune) ){ + /*Nothing*/ + else + { + tuner->tuning_size = 0; + } + + /*If not force tune, and the tuning size is too small, tuner will not take effect.*/ + if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){ tuner->kind = TRANS_NOTHING; + tuner->tuning_size = 0; lspace->move_object = 0; - return; } - check_space_tuner(gc); - return; -} -void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause) -{ - if(gc_match_kind(gc, MINOR_COLLECTION)) return; - Space_Tuner* tuner = gc->tuner; - Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); - Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); - Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - - if(cause == GC_CAUSE_LOS_IS_FULL){ - tuner->kind = TRANS_FROM_MOS_TO_LOS; - POINTER_SIZE_INT los_fail_sz = lspace_get_failure_size((Lspace*)lspace); - if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){ - /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/ - tuner->tuning_size = los_fail_sz; - tuner->least_tuning_size = los_fail_sz; - tuner->conservative_tuning_size = los_fail_sz; - }else{ - tuner->tuning_size = GC_LOS_MIN_VARY_SIZE; - tuner->least_tuning_size = los_fail_sz; - tuner->conservative_tuning_size = ((tuner->tuning_size + tuner->min_tuning_size) >> 1); - } - POINTER_SIZE_INT none_los_size; -#ifdef STATIC_NOS_MAPPING - none_los_size = mspace->committed_heap_size; -#else - none_los_size = mspace->committed_heap_size + fspace->committed_heap_size; -#endif - if(tuner->tuning_size > none_los_size){ - tuner->tuning_size = tuner->conservative_tuning_size; - } - if(tuner->tuning_size > none_los_size){ - tuner->tuning_size = tuner->least_tuning_size; - } - if((tuner->tuning_size + gc->num_active_collectors * GC_BLOCK_SIZE_BYTES) >= none_los_size){ - tuner->tuning_size = 0; - } - } - else - /*LOS_Shrink: Fixme: Very simple strategy now. */ - { - return; - tuner->kind = TRANS_FROM_LOS_TO_MOS; - lspace->move_object = TRUE; - tuner->tuning_size = GC_LOS_MIN_VARY_SIZE >> 1; - } - - /*Fixme: Should MOS heap_start must be 64k aligned?*/ - tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); - if(tuner->tuning_size == 0){ + /*If los or non-los is already the smallest size, there is no need to tune anymore.*/ + if(((lspace->committed_heap_size <= min_los_size_bytes) && (tuner->kind == TRANS_FROM_LOS_TO_MOS)) || + ((fspace->committed_heap_size + mspace->committed_heap_size <= min_none_los_size_bytes) && (tuner->kind == TRANS_FROM_MOS_TO_LOS))){ + assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes)); tuner->kind = TRANS_NOTHING; + tuner->tuning_size = 0; lspace->move_object = 0; } + + if(tuner->force_tune){ + if(tuner->kind == TRANS_FROM_LOS_TO_MOS){ + tuner->kind = TRANS_FROM_MOS_TO_LOS; + tuner->reverse = 1; + } + } - return; + return; } #include "../thread/collector.h" #include "../mark_sweep/lspace.h" -Boolean gc_space_retune(GC *gc) + +static POINTER_SIZE_INT non_los_live_obj_size; +static POINTER_SIZE_INT los_live_obj_size; +static void gc_compute_live_object_size_after_marking(GC* gc) { - Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); + non_los_live_obj_size = 0; + los_live_obj_size = 0; + + unsigned int collector_num = gc->num_active_collectors; + for(unsigned int i = collector_num; i--;){ + Collector *collector = gc->collectors[i]; + non_los_live_obj_size += collector->non_los_live_obj_size; + los_live_obj_size += collector->los_live_obj_size; + } + + non_los_live_obj_size += ((collector_num << 2) << GC_BLOCK_SHIFT_COUNT); + non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, GC_BLOCK_SIZE_BYTES); + + los_live_obj_size += ((collector_num << 2) << GC_BLOCK_SHIFT_COUNT); + los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES); + +} + +void gc_compute_space_tune_size_after_marking(GC *gc) +{ + gc_compute_live_object_size_after_marking(gc); + + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc); Space_Tuner* tuner = gc->tuner; - /*LOS_Extend:*/ - if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ - POINTER_SIZE_INT non_los_live_obj_size = 0; - unsigned int collector_num = gc->num_active_collectors; - for(unsigned int i = collector_num; i--;){ - Collector *collector = gc->collectors[i]; - non_los_live_obj_size += collector->non_los_live_obj_size; - } - non_los_live_obj_size += GC_BLOCK_SIZE_BYTES * collector_num * 4; - non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, GC_BLOCK_SIZE_BYTES); - POINTER_SIZE_INT max_free_for_tuning = 0; - if (gc->committed_heap_size > los->committed_heap_size + non_los_live_obj_size) - max_free_for_tuning = gc->committed_heap_size - los->committed_heap_size - non_los_live_obj_size; + + POINTER_SIZE_INT max_tuning_size = 0; + POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size; + /*We should assure that the non_los area is no less than min_none_los_size_bytes*/ + POINTER_SIZE_INT max_tune_for_min_non_los = 0; + if(non_los_size > min_none_los_size_bytes) + max_tune_for_min_non_los = non_los_size - min_none_los_size_bytes; + POINTER_SIZE_INT max_tune_for_min_los = 0; + //debug_adjust + assert(lspace->committed_heap_size >= min_los_size_bytes); + max_tune_for_min_los = lspace->committed_heap_size - min_los_size_bytes; - if(!tuner->force_tune){ - /*This should not happen! If GC is not issued by los, then it's not necessary to extend it*/ - if(max_free_for_tuning < tuner->tuning_size) - tuner->tuning_size = max_free_for_tuning; - if(tuner->tuning_size == 0){ + /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/ + Boolean doforce = TRUE; + POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace); + if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) ) + doforce = FALSE; + + /*If force tune*/ + if( (tuner->force_tune) && (doforce) ){ + + tuner->tuning_size = failure_size; + + /*We should assure that the tuning size is no more than the free space of non_los area*/ + if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size ) + max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size; + + if(max_tuning_size > max_tune_for_min_non_los) + max_tuning_size = max_tune_for_min_non_los; + + /*Round up to satisfy LOS alloc demand.*/ + tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES); + + /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/ + /*Fixme: if the heap size is not mx, we can extend the whole heap size*/ + if(tuner->tuning_size > max_tuning_size){ + tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); + max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT); + //debug_adjust + assert(max_heap_size_bytes >= gc->committed_heap_size); + POINTER_SIZE_INT extend_heap_size = 0; + POINTER_SIZE_INT potential_max_heap_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size; + + //debug_adjust + assert(!(potential_max_heap_size % SPACE_ALLOC_UNIT)); + if(tuner->tuning_size > potential_max_heap_size){ + tuner->tuning_size = 0; tuner->kind = TRANS_NOTHING; - los->move_object = 0; + lspace->move_object = 0; + }else{ + extend_heap_size = tuner->tuning_size - max_tuning_size; + blocked_space_extend(fspace, (unsigned int)extend_heap_size); + gc->committed_heap_size += extend_heap_size; + tuner->kind = TRANS_FROM_MOS_TO_LOS; + lspace->move_object = 0; } - return TRUE; } - /*force tune here!*/ - POINTER_SIZE_INT min_tuning_uped = round_up_to_size(los->failure_size, GC_BLOCK_SIZE_BYTES); - if(min_tuning_uped > max_free_for_tuning){ - tuner->tuning_size = 0; - tuner->kind = TRANS_NOTHING; - los->move_object = 0; - return FALSE; + else + { + tuner->kind = TRANS_FROM_MOS_TO_LOS; + lspace->move_object = 0; } - if(tuner->tuning_size < min_tuning_uped){ - assert(tuner->tuning_size < max_free_for_tuning); - tuner->tuning_size = min_tuning_uped; - return TRUE; - }else/*tuner->tuning_size >= min_tuning_uped*/{ - if(tuner->tuning_size > max_free_for_tuning) - tuner->tuning_size = max_free_for_tuning; - return TRUE; - } } - else// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) + /*No force tune, LOS_Extend:*/ + else if(tuner->kind == TRANS_FROM_MOS_TO_LOS) { - POINTER_SIZE_INT los_live_obj_size = 0; - unsigned int collector_num = gc->num_active_collectors; - for(unsigned int i = collector_num; i--;){ - Collector *collector = gc->collectors[i]; - los_live_obj_size += collector->los_live_obj_size; + if (gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size){ + max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size; + if(max_tuning_size > max_tune_for_min_non_los) + max_tuning_size = max_tune_for_min_non_los; + if( tuner->tuning_size > max_tuning_size) + tuner->tuning_size = max_tuning_size; + /*Round down so as not to break max_tuning_size*/ + tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + }else{ + tuner->tuning_size = 0; } - los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES); - los_live_obj_size += (collector_num << 2 << GC_BLOCK_SHIFT_COUNT); - - Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); - Space_Tuner *tuner = gc->tuner; - POINTER_SIZE_INT los_max_shrink_size = 0; - if(los->committed_heap_size > los_live_obj_size) - los_max_shrink_size = los->committed_heap_size - los_live_obj_size; - if(tuner->tuning_size > los_max_shrink_size) - tuner->tuning_size = los_max_shrink_size; - assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); - if(tuner->tuning_size == 0){ - tuner->kind = TRANS_NOTHING; - los->move_object = 0; - return TRUE; - }else - return TRUE; } + /*No force tune, LOS Shrink*/ + else + { + if(lspace->committed_heap_size > los_live_obj_size){ + max_tuning_size = lspace->committed_heap_size - los_live_obj_size; + if(max_tuning_size > max_tune_for_min_los) + max_tuning_size = max_tune_for_min_los; + if(tuner->tuning_size > max_tuning_size) + tuner->tuning_size = max_tuning_size; + /*Round down so as not to break max_tuning_size*/ + tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + }else{ + /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/ + tuner->tuning_size = 0; + } + } + if(tuner->tuning_size == 0){ + tuner->kind = TRANS_NOTHING; + lspace->move_object = 0; + } + return; + } void gc_space_tuner_reset(GC* gc) { + Space_Tuner* tuner = gc->tuner; if( !gc_match_kind(gc, MINOR_COLLECTION)){ - Space_Tuner* tuner = gc->tuner; - POINTER_SIZE_INT old_slos = tuner->speed_los; - POINTER_SIZE_INT old_smos = tuner->speed_mos; - memset(tuner, 0, sizeof(Space_Tuner)); - tuner->old_speed_los = old_slos; - tuner->old_speed_mos = old_smos; + /*Clear the fields every major collection except the wast area statistic.*/ + tuner->tuning_size = 0; + tuner->interim_blocks = NULL; + tuner->need_tune = FALSE; + tuner->force_tune = FALSE; + + tuner->old_speed_los = tuner->speed_los; + tuner->old_speed_mos = tuner->speed_mos; + tuner->speed_los = 0; + tuner->speed_mos = 0; + + tuner->current_dw = 0; + tuner->current_ds = 0; + + tuner->threshold_waste = 0; + tuner->min_tuning_size = 0; + /*Reset the sum up of wast area size only if los is changed.*/ + if(tuner->kind != TRANS_NOTHING){ + tuner->wast_los = 0; + tuner->wast_mos = 0; + } + tuner->kind = TRANS_NOTHING; } + + return; + } void gc_space_tuner_initialize(GC* gc) @@ -343,3 +334,61 @@ tuner->tuning_size = 0; gc->tuner = tuner; } + +/*Malloc and initialize fake blocks for LOS_Shrink*/ +void gc_space_tuner_init_fake_blocks_for_los_shrink(GC* gc) +{ + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + Space_Tuner* tuner = gc->tuner; + Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; + unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header)); + Block_Header* los_trans_fake_blocks = tuner->interim_blocks; + memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header)); + void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size); + unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base); + Block_Header* last_block = los_trans_fake_blocks; + + for(unsigned int i = 0; i < trans_blocks; i ++){ + Block_Header* curr_block = &los_trans_fake_blocks[i]; + curr_block->block_idx = start_idx + i; + curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES); + curr_block->free = curr_block->base ; + curr_block->new_free = curr_block->free; + curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES); + curr_block->status = BLOCK_COMPACTED; +#ifdef USE_32BITS_HASHCODE + curr_block->hashcode_buf = hashcode_buf_create(); +#endif + last_block->next = curr_block; + last_block = curr_block; + } + last_block->next = mos_first_block; +} + +/*Copy the fake blocks into real blocks, reconnect these new block into main list of mspace. + *Free the fake blocks. The infomation of mspace is not updated yet. + */ +void gc_space_tuner_release_fake_blocks_for_los_shrink(GC* gc) +{ + Space_Tuner *tuner = gc->tuner; + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + + POINTER_SIZE_INT tune_size = tuner->tuning_size; + unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); + + Block* blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); + Block_Header* last_real_block = (Block_Header*)blocks; + unsigned int i; + for(i=0; i < tune_blocks; i++){ + Block_Header* real_block = (Block_Header*)&(blocks[i]); + Block_Header* fake_block = &tuner->interim_blocks[i]; + memcpy((void*)real_block, (void*)fake_block, sizeof(Block_Header)); + last_real_block->next = real_block; + last_real_block = real_block; + } + last_real_block->next = (Block_Header*)mspace->blocks; + STD_FREE(tuner->interim_blocks); + return; +} + Index: src/common/space_tuner.h =================================================================== --- src/common/space_tuner.h (revision 541183) +++ src/common/space_tuner.h (working copy) @@ -27,6 +27,8 @@ #define GC_LOS_MIN_VARY_SIZE ( 2 * MB ) //#define GC_FIXED_SIZE_TUNER +extern POINTER_SIZE_INT max_heap_size_bytes; + //For_LOS_extend enum Transform_Kind { TRANS_NOTHING = 0, @@ -36,19 +38,22 @@ typedef struct Space_Tuner{ Transform_Kind kind; - + /*This flag is set if the los tuning status changes in the process of tuning*/ + Boolean reverse; POINTER_SIZE_INT tuning_size; - POINTER_SIZE_INT conservative_tuning_size; - POINTER_SIZE_INT least_tuning_size; /*Used for LOS_Shrink*/ Block_Header* interim_blocks; + /*This flag is set when tuning strategy decide to tune los size. + *i.e. wasted memory is greater than wast_threshold. + */ Boolean need_tune; + /*This flag is set if gc is caused by los alloc failure.*/ Boolean force_tune; - /*LOS alloc speed sciecne last los variation*/ + /*LOS alloc speed since last major*/ POINTER_SIZE_INT speed_los; POINTER_SIZE_INT old_speed_los; - /*MOS alloc speed sciecne last los variation*/ + /*MOS alloc speed since last major*/ POINTER_SIZE_INT speed_mos; POINTER_SIZE_INT old_speed_mos; @@ -61,22 +66,17 @@ /*NOS survive size of last minor, this could be the least meaningful space unit when talking about tuning.*/ POINTER_SIZE_INT current_ds; - /*Threshold for deta wast*/ - POINTER_SIZE_INT threshold; + /*Threshold for deta waste*/ + POINTER_SIZE_INT threshold_waste; /*Minimun tuning size for los variation*/ POINTER_SIZE_INT min_tuning_size; - - /*Cost of normal major compaction*/ - unsigned int fast_cost; - /*Cost of major compaction when changing LOS size*/ - unsigned int slow_cost; }Space_Tuner; -void gc_space_tune_prepare(GC* gc, unsigned int cause); -void gc_space_tune_before_gc(GC* gc, unsigned int cause); -void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause); -Boolean gc_space_retune(GC *gc); +void gc_compute_space_tune_size_before_marking(GC* gc, unsigned int cause); +void gc_compute_space_tune_size_after_marking(GC *gc); void gc_space_tuner_reset(GC* gc); void gc_space_tuner_initialize(GC* gc); +void gc_space_tuner_init_fake_blocks_for_los_shrink(GC* gc); +void gc_space_tuner_release_fake_blocks_for_los_shrink(GC* gc); #endif /* _SPACE_TUNER_H_ */ Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 541183) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -125,7 +125,7 @@ for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ REF* p_ref = (REF *)iter; if(IS_FALLBACK_COMPACTION) - fallback_update_fw_ref(p_ref); // in case that this collection is FALLBACK_COLLECTION + fallback_update_fw_ref(p_ref); // in case that this collection is FALLBACK_COLLECTION Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!p_obj) continue; @@ -281,7 +281,7 @@ assert(p_obj); REF* p_referent_field = obj_get_referent_field(p_obj); if(IS_FALLBACK_COMPACTION) - fallback_update_fw_ref(p_referent_field); + fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared @@ -598,7 +598,7 @@ assert(p_obj); REF* p_referent_field = obj_get_referent_field(p_obj); if(IS_FALLBACK_COMPACTION) - fallback_update_fw_ref(p_referent_field); + fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object* p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 541183) +++ src/gen/gen.cpp (working copy) @@ -74,14 +74,14 @@ min_nos_size_bytes *= gc_gen->_num_processors; - POINTER_SIZE_INT min_nos_size_threshold = max_heap_size>>5; + POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5; if(min_nos_size_bytes > min_nos_size_threshold){ min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT); } if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; - POINTER_SIZE_INT los_size = max_heap_size >> 7; + POINTER_SIZE_INT los_size = min_heap_size >> 7; if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE; if(los_size < min_los_size_bytes ) los_size = min_los_size_bytes ; @@ -95,25 +95,25 @@ POINTER_SIZE_INT mos_reserve_size, mos_commit_size; POINTER_SIZE_INT los_mos_size; - /*Give GC a hint of gc survive ratio.*/ + /*Give GC a hint of gc survive ratio. And the last_survive_ratio field is used in heap size adjustment*/ gc_gen->survive_ratio = 0.2f; if(NOS_SIZE){ - los_mos_size = max_heap_size - NOS_SIZE; + los_mos_size = min_heap_size - NOS_SIZE; mos_reserve_size = los_mos_size - los_size; nos_commit_size = NOS_SIZE; nos_reserve_size = NOS_SIZE; }else{ - los_mos_size = max_heap_size; + los_mos_size = min_heap_size; mos_reserve_size = los_mos_size - los_size; - nos_commit_size = (POINTER_SIZE_INT)(((float)(max_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); + nos_commit_size = (POINTER_SIZE_INT)(((float)(min_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); nos_reserve_size = mos_reserve_size; } nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT); - mos_commit_size = max_heap_size - los_size - nos_commit_size; + mos_commit_size = min_heap_size - los_size - nos_commit_size; /* allocate memory for gc_gen */ void* reserved_base; @@ -145,8 +145,8 @@ } reserved_base = vm_reserve_mem(los_mos_base, los_mos_size); } - -#else /* STATIC_NOS_MAPPING */ +/* NON_STATIC_NOS_MAPPING */ +#else reserved_base = NULL; if(large_page_hint){ @@ -160,18 +160,29 @@ } } - if(reserved_base==NULL){ + if(reserved_base == NULL){ + Boolean max_size_reduced = 0; reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT); + while( !reserved_base ){ + max_size_reduced = 1; + max_heap_size -= SPACE_ALLOC_UNIT; + reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT); + } + + if(max_size_reduced){ + printf("Max heap size: can't be reserved, reduced to %d MB according to virtual memory limitation.\n", max_heap_size/MB); + } + + if(max_heap_size < min_heap_size){ + printf("Heap size: invalid, please reimput a smaller \"ms\" paramenter!\n"); + exit(0); + } reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT); assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0); - - while( !reserved_base ){ - printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size); - exit(0); - } } reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size); + /* compute first time nos_boundary */ nos_base = (void*)((POINTER_SIZE_INT)reserved_base + mos_commit_size + los_size); @@ -208,9 +219,6 @@ nos->collect_algorithm = MINOR_ALGO; mos->collect_algorithm = MAJOR_ALGO; - /*Give GC a hint of space survive ratio.*/ -// nos->survive_ratio = gc_gen->survive_ratio; -// mos->survive_ratio = gc_gen->survive_ratio; gc_space_tuner_initialize((GC*)gc_gen); gc_gen_mode_adapt_init(gc_gen); @@ -219,7 +227,6 @@ space_committed_size((Space*)gc_gen->mos) + space_committed_size((Space*)gc_gen->los); - set_native_finalizer_thread_flag(!IGNORE_FINREF); set_native_ref_enqueue_thread_flag(!IGNORE_FINREF); @@ -271,12 +278,6 @@ unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;} - -static Boolean major_collection_needed(GC_Gen* gc) -{ - return space_used_memory_size((Blocked_Space*)gc->nos)*gc->survive_ratio > (space_free_memory_size((Blocked_Space*)gc->mos)); -} - Boolean FORCE_FULL_COMPACT = FALSE; void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) @@ -356,6 +357,69 @@ return; } +void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time) +{ + if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) return; + if(gc->committed_heap_size == max_heap_size_bytes) return; + + Mspace* mos = gc->mos; + Fspace* nos = gc->nos; + Lspace* los = gc->los; + /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously. + *Or, we must adjust heap size + */ + static unsigned int tolerate = 0; + + POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size; + assert(heap_total_size == gc->committed_heap_size); + + assert(nos->surviving_size == 0); + POINTER_SIZE_INT heap_surviving_size = mos->surviving_size + los->surviving_size; + assert(heap_total_size > heap_surviving_size); + + float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size; + float threshold_survive_ratio = 0.3f; + float regular_survive_ratio = 0.125f; + + POINTER_SIZE_INT new_heap_total_size = 0; + POINTER_SIZE_INT adjust_size = 0; + + if(heap_survive_ratio < threshold_survive_ratio) return; + + if(++tolerate < 2) return; + tolerate = 0; + + new_heap_total_size = (POINTER_SIZE_INT)((float)heap_surviving_size / regular_survive_ratio); + new_heap_total_size = round_down_to_size(new_heap_total_size, SPACE_ALLOC_UNIT); + + + if(new_heap_total_size <= heap_total_size) return; + if(new_heap_total_size > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) + new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL; + + adjust_size = new_heap_total_size - heap_total_size; + assert( !(adjust_size % SPACE_ALLOC_UNIT) ); + if(adjust_size == 0) return; + +#ifdef STATIC_NOS_MAPPING + /*Fixme: Static mapping have other bugs to be fixed first.*/ + assert(!large_page_hint); + return; +#else + assert(!large_page_hint); + POINTER_SIZE_INT old_nos_size = nos->committed_heap_size; + blocked_space_extend(nos, (unsigned int)adjust_size); + nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size; + /*Fixme: gc fields should be modified according to nos extend*/ + gc->committed_heap_size += adjust_size; + //debug_adjust + assert(gc->committed_heap_size == los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size); +#endif + +// printf("heap_size: %x MB , heap_survive_ratio: %f\n", gc->committed_heap_size/MB, heap_survive_ratio); + +} + Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ void gc_gen_reclaim_heap(GC_Gen* gc) @@ -372,41 +436,31 @@ if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ /* FIXME:: move_object is only useful for nongen_slide_copy */ gc->mos->move_object = FALSE; - fspace_collection(gc->nos); - gc->mos->move_object = TRUE; - - }else{ - /* process mos and nos together in one compaction */ mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); - } if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ - if(gc_is_gen_mode()) - gc_clear_remset((GC*)gc); + if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); /* runout mspace in minor collection */ assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1); mspace->num_used_blocks = mspace->num_managed_blocks; IS_FALLBACK_COMPACTION = TRUE; - gc_reset_collect_result((GC*)gc); gc->collect_kind = FALLBACK_COLLECTION; - if(verify_live_heap) - event_gc_collect_kind_changed((GC*)gc); + if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc); mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); IS_FALLBACK_COMPACTION = FALSE; - } if( gc->collect_result == FALSE){ @@ -423,7 +477,6 @@ #endif assert(!gc->los->move_object); - return; } Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 541183) +++ src/gen/gen.h (working copy) @@ -169,6 +169,8 @@ void gc_gen_reclaim_heap(GC_Gen* gc); void gc_gen_assign_free_area_to_mutators(GC_Gen* gc); + +void gc_gen_adjust_heap_size(GC_Gen* gc, int64 pause_time); void gc_gen_mode_adapt_init(GC_Gen *gc); Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 541183) +++ src/gen/gen_adapt.cpp (working copy) @@ -25,11 +25,9 @@ #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<1) /*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/ #define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB) -/*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/ -//#define NOS_SURVIVE_RATIO_SENSITIVE struct Mspace; -void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold); +void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio); static float Tslow = 0.0f; static POINTER_SIZE_INT SMax = 0; @@ -187,7 +185,7 @@ return; } -void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold); +void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio); static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) { @@ -200,7 +198,8 @@ POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE; - if(!gc->force_gen_mode){ + if(!gc->force_gen_mode){ + /*For major collection:*/ if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){ mspace->time_collections += pause_time; @@ -213,48 +212,44 @@ /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){ - POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; - survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); + POINTER_SIZE_INT major_surviving_size = space_committed_size((Space*)mspace) - mos_free_size; + survive_ratio = (float)major_surviving_size/(float)space_committed_size((Space*)mspace); mspace->survive_ratio = survive_ratio; } - /*For LOS_Shrink:*/ - if(gc->tuner->kind != TRANS_NOTHING){ - POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1; - mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); - } - #ifdef NOS_SURVIVE_RATIO_SENSITIVE - /*If this major is caused by fall back compaction, - we must give fspace->survive_ratio a conservative and reasonable number to avoid next fall back.*/ - //fspace->survive_ratio = mspace->survive_ratio; - /*In fallback compaction, the survive_ratio of mspace must be 1.*/ - if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) - fspace->survive_ratio = 1; - - #endif + /*If there is no minor collection at all, we must give mspace expected threshold a reasonable value.*/ + if((gc->tuner->kind != TRANS_NOTHING) && (fspace->num_collections == 0)) + mspace_set_expected_threshold_ratio((Mspace *)mspace, 0.5f); + /*If this major is caused by fall back compaction, we must give fspace->survive_ratio + *a conservative and reasonable number to avoid next fall back. + *In fallback compaction, the survive_ratio of mspace must be 1.*/ + if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) fspace->survive_ratio = 1; + /*For minor collection:*/ }else{ /*Give a hint to mini_free_ratio. */ if(fspace->num_collections == 1){ /*fixme: This is only set for tuning the first warehouse!*/ Tslow = pause_time / gc->survive_ratio; - SMax = (POINTER_SIZE_INT)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio )); + SMax = (POINTER_SIZE_INT)((float)(gc->committed_heap_size - gc->los->committed_heap_size) * ( 1 - gc->survive_ratio )); last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; } fspace->time_collections += pause_time; POINTER_SIZE_INT free_size_threshold; - - POINTER_SIZE_INT minor_survive_size = last_total_free_size - total_free_size; + + POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size; float k = Tslow * fspace->num_collections/fspace->time_collections; - float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); + float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); float free_ratio_threshold = mini_free_ratio(k, m); - free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); + + if(SMax > GC_MOS_MIN_EXTRA_REMAIN_SIZE) + free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); + else + free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * SMax); - if ((mos_free_size + nos_free_size)< free_size_threshold) { - gc->force_major_collect = TRUE; - } + if ((mos_free_size + nos_free_size)< free_size_threshold) gc->force_major_collect = TRUE; - survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); + survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)fspace); fspace->survive_ratio = survive_ratio; /*For LOS_Adaptive*/ POINTER_SIZE_INT mspace_committed_size = space_committed_size((Space*)mspace); @@ -262,12 +257,12 @@ if(mspace_committed_size + fspace_committed_size > free_size_threshold){ POINTER_SIZE_INT mspace_size_threshold; mspace_size_threshold = mspace_committed_size + fspace_committed_size - free_size_threshold; - mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); + float mspace_size_threshold_ratio = (float)mspace_size_threshold / (mspace_committed_size + fspace_committed_size); + mspace_set_expected_threshold_ratio((Mspace *)mspace, mspace_size_threshold_ratio); } } - - gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; + gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; last_total_free_size = total_free_size; } @@ -295,7 +290,9 @@ #ifdef STATIC_NOS_MAPPING total_size = max_heap_size_bytes - lspace->committed_heap_size; #else - total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start; + POINTER_SIZE_INT curr_heap_commit_end = + (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size; + total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mspace->heap_start; #endif POINTER_SIZE_INT total_free = total_size - used_mos_size; @@ -306,16 +303,15 @@ POINTER_SIZE_INT nos_reserve_size; nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio)); /*NOS should not be zero, if there is only one block in non-los, i.e. in the former if sentence, - if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero - and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/ + *if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero + *and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/ if(nos_reserve_size <= GC_BLOCK_SIZE_BYTES) nos_reserve_size = GC_BLOCK_SIZE_BYTES; #ifdef STATIC_NOS_MAPPING if(nos_reserve_size > fspace->reserved_heap_size) nos_reserve_size = fspace->reserved_heap_size; #endif - //To reserve some MOS space to avoid fallback situation. - //But we need ensure nos has at least one block - //if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ; + /*To reserve some MOS space to avoid fallback situation. + *But we need ensure nos has at least one block */ POINTER_SIZE_INT reserve_in_mos = GC_MOS_MIN_EXTRA_REMAIN_SIZE; while (reserve_in_mos >= GC_BLOCK_SIZE_BYTES){ if(nos_reserve_size >= reserve_in_mos + GC_BLOCK_SIZE_BYTES){ @@ -328,7 +324,7 @@ new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, GC_BLOCK_SIZE_BYTES); if(gc->force_gen_mode){ - new_nos_size = min_nos_size_bytes;//round_down_to_size((unsigned int)(gc->gen_minor_adaptor->adapt_nos_size), SPACE_ALLOC_UNIT); + new_nos_size = min_nos_size_bytes; } new_mos_size = total_size - new_nos_size; @@ -342,7 +338,6 @@ } #ifndef STATIC_NOS_MAPPING - void gc_gen_adapt(GC_Gen* gc, int64 pause_time) { gc_decide_next_collect(gc, pause_time); @@ -366,9 +361,10 @@ return; /* below are ajustment */ + POINTER_SIZE_INT curr_heap_commit_end = + (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size; + nos_boundary = (void*)(curr_heap_commit_end - new_nos_size); - nos_boundary = (void*)((POINTER_SIZE_INT)gc->heap_end - new_nos_size); - fspace->heap_start = nos_boundary; fspace->blocks = (Block*)nos_boundary; fspace->committed_heap_size = new_nos_size; @@ -394,8 +390,8 @@ return; } -#else /* ifndef STATIC_NOS_MAPPING */ - +/* ifdef STATIC_NOS_MAPPING */ +#else void gc_gen_adapt(GC_Gen* gc, int64 pause_time) { gc_decide_next_collect(gc, pause_time); Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 541183) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -96,7 +96,7 @@ /* for marking phase termination detection */ static volatile unsigned int num_finished_collectors = 0; -void fallback_mark_scan_heap(Collector* collector) +void mark_scan_heap_for_fallback(Collector* collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; Index: src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- src/mark_compact/los_extention_mark_scan.cpp (revision 541183) +++ src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -108,7 +108,7 @@ So we abondoned this design. We no longer use the repset to remember repointed slots */ -void los_adaptation_mark_scan_heap(Collector *collector) +void mark_scan_heap_for_space_tune(Collector *collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; @@ -202,4 +202,3 @@ } - Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 541183) +++ src/mark_compact/mspace.cpp (working copy) @@ -75,8 +75,7 @@ mspace->move_object = TRUE; mspace->gc = gc; - /*For_LOS adaptive: The threshold is initiated by half of MOS + NOS commit size.*/ - mspace->expected_threshold = (unsigned int)( ( (float)mspace->committed_heap_size * (1.f + 1.f / gc->survive_ratio) ) * 0.5f ); + mspace->expected_threshold_ratio = 0.5f; gc_set_mos((GC_Gen*)gc, (Space*)mspace); @@ -96,22 +95,13 @@ mspace->block_iterator = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; } -//For_LOS_extend #include "../common/space_tuner.h" void mspace_block_iterator_init(Mspace* mspace) { - GC* gc = mspace->gc; - if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){ - unsigned int tuning_blocks = (unsigned int)((mspace->gc)->tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); - mspace->block_iterator = (Block_Header*)&(mspace->blocks[tuning_blocks]); - return; - } - mspace->block_iterator = (Block_Header*)mspace->blocks; return; } - Block_Header* mspace_block_iterator_get(Mspace* mspace) { return (Block_Header*)mspace->block_iterator; @@ -165,16 +155,15 @@ } /*For_LOS adaptive.*/ -void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold) +void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio) { - mspace->expected_threshold = threshold; + mspace->expected_threshold_ratio = threshold_ratio; return; } -POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace) +float mspace_get_expected_threshold_ratio(Mspace* mspace) { - return mspace->expected_threshold; + return mspace->expected_threshold_ratio; } - Index: src/mark_compact/mspace.h =================================================================== --- src/mark_compact/mspace.h (revision 541183) +++ src/mark_compact/mspace.h (working copy) @@ -56,7 +56,7 @@ volatile Block_Header* block_iterator; /*Threshold computed by NOS adaptive*/ - POINTER_SIZE_INT expected_threshold; + float expected_threshold_ratio; }Mspace; void mspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT mspace_size, POINTER_SIZE_INT commit_size); @@ -72,6 +72,6 @@ void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace); -void mspace_set_expected_threshold(Mspace* mspace, unsigned int threshold); +void mspace_set_expected_threshold_ratio(Mspace* mspace, float threshold_ratio); #endif //#ifdef _MSC_SPACE_H_ Index: src/mark_compact/mspace_collect_compact.cpp =================================================================== --- src/mark_compact/mspace_collect_compact.cpp (revision 541183) +++ src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -29,72 +29,30 @@ static volatile Block_Header* next_block_for_compact; static volatile Block_Header* next_block_for_target; -void mspace_update_info_for_los_extension(Mspace *mspace) -{ - Space_Tuner *tuner = mspace->gc->tuner; - - if(tuner->kind != TRANS_FROM_MOS_TO_LOS) return; - - POINTER_SIZE_INT tune_size = tuner->tuning_size; - unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); - -#ifdef USE_32BITS_HASHCODE - unsigned int index = 0; - for(; index < tune_blocks; index++){ - Block* curr_block = &mspace->blocks[index]; - hashcode_buf_destory(((Block_Header*)curr_block)->hashcode_buf); - } -#endif - - mspace->blocks = &mspace->blocks[tune_blocks]; - mspace->heap_start = mspace->blocks; - mspace->committed_heap_size -= tune_size; - mspace->reserved_heap_size -= tune_size; - mspace->first_block_idx += tune_blocks; - mspace->num_managed_blocks -= tune_blocks; - mspace->num_total_blocks -= tune_blocks; - if(mspace->num_used_blocks > tune_blocks) - mspace->num_used_blocks -= tune_blocks; - else - mspace->num_used_blocks = 0; -} - -void mspace_update_info_for_los_shrink(Mspace* mspace) +void mspace_update_info_after_space_tuning(Mspace* mspace) { Space_Tuner *tuner = mspace->gc->tuner; - if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return; - POINTER_SIZE_INT tune_size = tuner->tuning_size; unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); - - /*Update mspace infomation.*/ - mspace->blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); - mspace->heap_start = (void*)(mspace->blocks); - mspace->committed_heap_size += tune_size; - mspace->first_block_idx -= tune_blocks; - mspace->num_managed_blocks += tune_blocks; - mspace->num_total_blocks += tune_blocks; -} - -/*Copy the fake blocks into real blocks, reconnect these new block into main list of mspace.*/ -void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace) -{ - Space_Tuner *tuner = mspace->gc->tuner; - if(tuner->kind != TRANS_FROM_LOS_TO_MOS) return; - - POINTER_SIZE_INT tune_size = tuner->tuning_size; - unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); - - Block* blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); - unsigned int i; - for(i=0; i < tune_blocks; i++){ - Block_Header* real_block = (Block_Header*)&(blocks[i]); - Block_Header* fake_block = &tuner->interim_blocks[i]; - memcpy((void*)real_block, (void*)fake_block, sizeof(Block_Header)); - real_block->next = (Block_Header*)((POINTER_SIZE_INT)real_block + GC_BLOCK_SIZE_BYTES); + + if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ + mspace->blocks = &mspace->blocks[tune_blocks]; + mspace->heap_start = mspace->blocks; + mspace->committed_heap_size -= tune_size; + mspace->reserved_heap_size -= tune_size; + mspace->first_block_idx += tune_blocks; + mspace->num_managed_blocks -= tune_blocks; + mspace->num_total_blocks -= tune_blocks; + if(mspace->num_used_blocks > tune_blocks) mspace->num_used_blocks -= tune_blocks; + else mspace->num_used_blocks = 0; + }else if(tuner->kind == TRANS_FROM_LOS_TO_MOS){ + mspace->blocks = (Block*)((POINTER_SIZE_INT)mspace->blocks - tune_size); + mspace->heap_start = (void*)(mspace->blocks); + mspace->committed_heap_size += tune_size; + mspace->first_block_idx -= tune_blocks; + mspace->num_managed_blocks += tune_blocks; + mspace->num_total_blocks += tune_blocks; } - - return; } void mspace_reset_after_compaction(Mspace* mspace) @@ -158,6 +116,9 @@ Block_Header* block; Space_Tuner* tuner = gc->tuner; Block_Header* nos_last_block; + Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; + unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + /*Needn't change LOS size.*/ if(tuner->kind == TRANS_NOTHING){ for(i=0; inum_active_collectors; i++){ @@ -182,8 +143,7 @@ else /*If nos->num_managed_blocks is zero, we take mos_last_block as nos_last_block instead.*/ nos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks - 1]; - Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; - unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + nos_last_block->next = mos_first_block; ((Block_Header*)&(mspace->blocks[trans_blocks - 1]))->next = NULL; @@ -201,39 +161,17 @@ return; }else { - Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; - unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); - gc->tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header)); - Block_Header* los_trans_fake_blocks = gc->tuner->interim_blocks; - memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header)); - void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size); - unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base); - Block_Header* last_block = los_trans_fake_blocks; + gc_space_tuner_init_fake_blocks_for_los_shrink(gc); - for(i = 0; i < trans_blocks; i ++){ - Block_Header* curr_block = &los_trans_fake_blocks[i]; - curr_block->block_idx = start_idx + i; - curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES); - curr_block->free = curr_block->base ; - curr_block->new_free = curr_block->free; - curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES); - curr_block->status = BLOCK_COMPACTED; -#ifdef USE_32BITS_HASHCODE - curr_block->hashcode_buf = hashcode_buf_create(); -#endif - last_block->next = curr_block; - last_block = curr_block; - } - last_block->next = mos_first_block; - Collector* collector = gc->collectors[0]; - collector->cur_target_block = los_trans_fake_blocks; + collector->cur_target_block = tuner->interim_blocks; collector->cur_target_block->status = BLOCK_TARGET; + if(trans_blocks >= gc->num_active_collectors) collector->cur_compact_block = mos_first_block; else - collector->cur_compact_block = los_trans_fake_blocks; - + collector->cur_compact_block = gc->tuner->interim_blocks; + collector->cur_compact_block->status = BLOCK_IN_COMPACT; for(i=1; i< gc->num_active_collectors; i++){ @@ -402,4 +340,3 @@ - Index: src/mark_compact/mspace_collect_compact.h =================================================================== --- src/mark_compact/mspace_collect_compact.h (revision 541183) +++ src/mark_compact/mspace_collect_compact.h (working copy) @@ -28,9 +28,7 @@ void gc_reset_block_for_collectors(GC* gc, Mspace* mspace); void gc_init_block_for_collectors(GC* gc, Mspace* mspace); -void mspace_update_info_for_los_extension(Mspace* mspace); -void mspace_update_info_for_los_shrink(Mspace* mspace); -void mspace_settle_fake_blocks_for_los_shrink(Mspace* mspace); +void mspace_update_info_after_space_tuning(Mspace* mspace); void mspace_reset_after_compaction(Mspace* mspace); Block_Header* mspace_get_first_compact_block(Mspace* mspace); @@ -41,8 +39,8 @@ void slide_compact_mspace(Collector* collector); void move_compact_mspace(Collector* collector); -void fallback_mark_scan_heap(Collector* collector); -void los_adaptation_mark_scan_heap(Collector *collector); +void mark_scan_heap_for_fallback(Collector* collector); +void mark_scan_heap_for_space_tune(Collector *collector); void mspace_extend_compact(Collector *collector); @@ -56,4 +54,3 @@ #endif /* _MSPACE_COLLECT_COMPACT_H_ */ - Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 541183) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -170,7 +170,7 @@ if(!gc_match_kind(gc, FALLBACK_COLLECTION)) mark_scan_heap(collector); else - fallback_mark_scan_heap(collector); + mark_scan_heap_for_fallback(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ @@ -225,7 +225,6 @@ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); - mspace_update_info_for_los_extension(mspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 541183) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -292,14 +292,45 @@ } } +static inline void gc_init_block_for_fix_repointed_refs(GC* gc, Mspace* mspace) +{ + Space_Tuner* tuner = gc->tuner; + POINTER_SIZE_INT tuning_size = tuner->tuning_size; + /*If LOS_Shrink, we just fix the repointed refs from the start of old mspace.*/ + if((tuner->kind == TRANS_NOTHING) || (tuner->kind == TRANS_FROM_LOS_TO_MOS)){ + mspace_block_iterator_init(mspace); + return; + }else{ + /*If LOS_Extend, we fix from the new start of mspace, because the block list is start from there.*/ + mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks + tuning_size); + } + return; +} + static inline void gc_init_block_for_sliding_compact(GC *gc, Mspace *mspace) { /* initialize related static variables */ next_block_for_dest = NULL; current_dest_block.block = NULL; current_dest_block.lock = FREE_LOCK; - mspace_block_iterator_init(mspace); + Space_Tuner* tuner = gc->tuner; + POINTER_SIZE_INT tuning_size = tuner->tuning_size; + + if( tuner->kind == TRANS_NOTHING ){ + /*If space is not tuned, we just start from mspace->heap_start.*/ + mspace_block_iterator_init(mspace); + return; + }else if (tuner->kind == TRANS_FROM_MOS_TO_LOS){ + /*If LOS_Extend, we compact from the new start of mspace, because the block list is start from there.*/ + mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks + tuning_size); + }else{ + /*If LOS_Shrink, we compact from the new start of mspace too. + *This is different from the operations in function gc_init_block_for_fix_repointed_refs, + *because we want to compact mspace to the new start.*/ + mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks - tuning_size); + } + return; } @@ -353,8 +384,8 @@ } -//For_LOS_extend -void mspace_restore_block_chain(Mspace* mspace) +/*For LOS_Extend*/ +static void mspace_restore_block_chain(Mspace* mspace) { GC* gc = mspace->gc; Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); @@ -381,32 +412,22 @@ unsigned int num_active_collectors = gc->num_active_collectors; /* Pass 1: ************************************************** - mark all live objects in heap, and save all the slots that - have references that are going to be repointed */ + *mark all live objects in heap, and save all the slots that + *have references that are going to be repointed. + */ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(gc_match_kind(gc, FALLBACK_COLLECTION)) - fallback_mark_scan_heap(collector); + mark_scan_heap_for_fallback(collector); else if(gc->tuner->kind != TRANS_NOTHING) - los_adaptation_mark_scan_heap(collector); + mark_scan_heap_for_space_tune(collector); else mark_scan_heap(collector); - old_num = atomic_inc32(&num_marking_collectors); + + /* last collector's world here */ if( ++old_num == num_active_collectors ){ - /* last collector's world here */ - /*Retune space tuner to insure the tuning size is not to great*/ -// Boolean retune_result; - if(gc->tuner->kind != TRANS_NOTHING) gc_space_retune(gc); -// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) printf("los shrink...\n"); -// if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) printf("los extend...\n"); - -/* if(!retune_result){ - gc->collect_result = FALSE; - num_marking_collectors++; - return; - }*/ - + if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc); assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); @@ -426,12 +447,10 @@ #endif last_block_for_dest = NULL; - /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); -// if(!gc->collect_result) return; /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ @@ -445,40 +464,43 @@ mspace_compute_object_target(collector, mspace); old_num = atomic_inc32(&num_repointing_collectors); + /*last collector's world here*/ if( ++old_num == num_active_collectors ){ - /* single thread world */ - /*LOS_Shrink: */ + /*LOS_Shrink: but lspace->move_object could be set individually without shrinking LOS.*/ if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_repointing_collectors++; return; } - gc_reset_block_for_collectors(gc, mspace); - mspace_block_iterator_init(mspace); + gc_init_block_for_fix_repointed_refs(gc, mspace); num_repointing_collectors++; } while(num_repointing_collectors != num_active_collectors + 1); if(!gc->collect_result) return; /* Pass 3: ************************************************** - update all references whose objects are to be moved */ + *update all references whose objects are to be moved + */ old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); + /*last collector's world here */ if( ++old_num == num_active_collectors ){ - /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); gc_init_block_for_sliding_compact(gc, mspace); - /*LOS_Shrink: Fixme: This operation moves objects in LOS, and should be part of Pass 4*/ + /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4 + *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually. + *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS. + */ if(lspace->move_object) lspace_sliding_compact(collector, lspace); - mspace_settle_fake_blocks_for_los_shrink(mspace); - /*Fixme: LOS_Shrink: set dest block for sliding compact*/ - if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) - mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks - (mspace->gc)->tuner->tuning_size); - + /*The temp blocks for storing interim infomation is copied to the real place they should be. + *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink. + */ + last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES); + if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); @@ -502,11 +524,9 @@ #endif old_num = atomic_inc32(&num_restoring_collectors); + if( ++old_num == num_active_collectors ){ - - mspace_update_info_for_los_extension(mspace); - mspace_update_info_for_los_shrink(mspace); - + if(gc->tuner->kind != TRANS_NOTHING) mspace_update_info_after_space_tuning(mspace); num_restoring_collectors++; } while(num_restoring_collectors != num_active_collectors + 1); @@ -529,7 +549,7 @@ mspace_reset_after_compaction(mspace); fspace_reset_for_allocation(fspace); - //For_LOS_extend + /*For LOS_Extend*/ mspace_restore_block_chain(mspace); gc_set_pool_clear(gc->metadata->gc_rootset_pool); Index: src/mark_sweep/lspace.cpp =================================================================== --- src/mark_sweep/lspace.cpp (revision 541183) +++ src/mark_sweep/lspace.cpp (working copy) @@ -111,7 +111,6 @@ lspace in that function lspace_get_next_marked_object is not true*/ if(!lspace->move_object) lspace_sweep(lspace); lspace->move_object = FALSE; -// printf("lspace: %d MB \n", lspace->committed_heap_size / MB); return; } Index: src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- src/mark_sweep/lspace_alloc_collect.cpp (revision 541183) +++ src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -300,9 +300,6 @@ POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size; if( p_obj != p_target_obj){ memmove(p_target_obj, p_obj, obj_size); - /*Fixme: For LOS_Shrink debug*/ -// unsigned int padding_lenth = ALIGN_UP_TO_KILO(target_obj_end) - target_obj_end; -// memset(p_target_obj, 0, padding_lenth); } set_obj_info(p_target_obj, 0); p_obj = lspace_get_next_marked_object(lspace, &iterate_index); Index: src/utils/seq_list.h =================================================================== --- src/utils/seq_list.h (revision 541183) +++ src/utils/seq_list.h (working copy) @@ -118,123 +118,3 @@ } #endif #endif //_SEQ_LIST_H_ -#ifndef _SEQ_LIST_H_ -#define _SEQ_LIST_H_ - -#include "vector_block.h" - -typedef struct List_Node{ - List_Node* next; -}List_Node; - -typedef struct Seq_List{ - List_Node* head; - List_Node* end; - List_Node* curr; -#ifdef _DEBUG - unsigned int node_num; -#endif -}Seq_List; - -inline Seq_List* seq_list_create() -{ - unsigned int size = sizeof(Seq_List); - Seq_List* seq_list = (Seq_List*)STD_MALLOC(size); - memset(seq_list, 0, size); - - //List Head - size = sizeof(List_Node); - List_Node* lnode = (List_Node*)STD_MALLOC(size); - seq_list->head = seq_list->end = lnode; - lnode->next = lnode; - - return seq_list; -} - -inline void seq_list_destruct(Seq_List* seq_list) -{ - STD_FREE(seq_list->head); - STD_FREE(seq_list); -} - -inline Boolean seq_list_add(Seq_List* seq_list, List_Node* node) -{ -#ifdef _DEBUG - seq_list->node_num ++; -#endif - seq_list->end ->next = node; - seq_list->end = node; - node->next = seq_list->head; - return TRUE; -} - -inline void seq_list_iterate_init(Seq_List* seq_list) -{ - seq_list->curr = seq_list->head->next; -} - -inline void seq_list_iterate_init_after_node(Seq_List* seq_list, List_Node* begin) -{ - seq_list->curr = begin->next; -} - -inline List_Node* seq_list_iterate_next(Seq_List* seq_list) -{ - if(seq_list->curr != seq_list->head){ - List_Node* ret_node = seq_list->curr; - seq_list->curr =seq_list->curr->next; - return ret_node; - } - return NULL; -} - -inline Boolean seq_list_has_next(Seq_List* seq_list) -{ - return seq_list->curr != seq_list->head; -} - -inline List_Node* seq_list_end_node(Seq_List* seq_list) -{ return seq_list->end; } - -inline List_Node* seq_list_lookup_prev_node(Seq_List* seq_list, List_Node* node) -{ - List_Node* prev_node = seq_list->head; - seq_list_iterate_init(seq_list); - while(seq_list_has_next(seq_list)){ - List_Node* curr_node = seq_list_iterate_next(seq_list); - if( node == curr_node ) return prev_node; - prev_node = curr_node; - } - return NULL; -} - -inline Boolean seq_list_remove(Seq_List* seq_list, List_Node* node) -{ - List_Node* prev_node = seq_list_lookup_prev_node(seq_list, node); - if(prev_node==NULL) return FALSE; //need assertion here. - prev_node->next = node->next; -#ifdef _DEBUG - seq_list->node_num --; -#endif - if(seq_list->end == node) seq_list->end = prev_node; - return TRUE; -} - -inline void seq_list_clear(Seq_List* seq_list) -{ - seq_list->end = seq_list->head; - seq_list->curr = seq_list->head; - List_Node* head = seq_list->head; - head->next = seq_list->head; -#ifdef _DEBUG - seq_list->node_num = 0; -#endif -} - -#ifdef _DEBUG -inline unsigned int seq_list_size(Seq_List* seq_list) -{ - return seq_list->node_num; -} -#endif -#endif //_SEQ_LIST_H_