Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 522899) +++ src/common/gc_for_vm.cpp (working copy) @@ -193,7 +193,7 @@ Obj_Info_Type info = get_obj_info_raw(obj); int hash = info & GCGEN_HASH_MASK; if (!hash) { - hash = (((unsigned int)obj) >> 3) & GCGEN_HASH_MASK; + hash = (((POINTER_SIZE_INT)obj) >> 3) & GCGEN_HASH_MASK; if(!hash) hash = (23 & GCGEN_HASH_MASK); unsigned int new_info = (unsigned int)(info | hash); while (true) { @@ -239,6 +239,3 @@ mutator_need_block = FALSE; return old_flag; } - - - Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 522899) +++ src/gen/gen.cpp (working copy) @@ -268,7 +268,7 @@ /* this is for debugging. */ gc->last_collect_kind = gc->collect_kind; - if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT || (gc->nos->num_managed_blocks == 0)) + if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT) gc->collect_kind = MAJOR_COLLECTION; else gc->collect_kind = MINOR_COLLECTION; Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 522899) +++ src/gen/gen_adapt.cpp (working copy) @@ -22,7 +22,7 @@ #include "../common/space_tuner.h" #include -#define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<5) +#define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<1) /*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/ #define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB) /*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/ @@ -194,7 +194,7 @@ Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; - float survive_ratio = 0; + float survive_ratio = 0.2f; POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); @@ -206,11 +206,14 @@ Tslow = (float)pause_time; SMax = total_free_size; - gc->force_major_collect = FALSE; + /*If fall back happens, and nos_boundary is up to heap_ceiling, then we force major.*/ + if(gc->nos->num_managed_blocks == 0) + gc->force_major_collect = TRUE; + else gc->force_major_collect = FALSE; - POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){ + POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); mspace->survive_ratio = survive_ratio; } @@ -285,20 +288,17 @@ total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start; #endif - /* check if curr nos size is too small to shrink */ - /* - if(curr_nos_size <= min_nos_size_bytes){ - //after major, should not allow this size - assert(gc_match_kind((GC*)gc, MINOR_COLLECTION)); - return FALSE; - } - */ - POINTER_SIZE_INT total_free = total_size - used_mos_size; + /*If total free is smaller than one block, there is no room for us to adjust*/ + if(total_free < GC_BLOCK_SIZE_BYTES) return FALSE; + /* predict NOS + NOS*ratio = total_free_size */ POINTER_SIZE_INT nos_reserve_size; nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio)); - new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, SPACE_ALLOC_UNIT); + /*Nos should not be too small*/ + if(nos_reserve_size <= GC_BLOCK_SIZE_BYTES) nos_reserve_size = GC_BLOCK_SIZE_BYTES; + new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, GC_BLOCK_SIZE_BYTES); + #ifdef STATIC_NOS_MAPPING if(new_nos_size > fspace->reserved_heap_size) new_nos_size = fspace->reserved_heap_size; #endif Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 522899) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -193,7 +193,7 @@ while(num_fixing_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ - if((mspace->free_block_idx > fspace->first_block_idx) || ((fspace->num_managed_blocks == 0) && (mspace->free_block_idx < fspace->first_block_idx))){ + if(mspace->free_block_idx > fspace->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 522899) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -430,7 +430,7 @@ while(num_restoring_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ - if((mspace->free_block_idx > fspace->first_block_idx) || ((fspace->num_managed_blocks == 0) && (mspace->free_block_idx < fspace->first_block_idx))){ + if(mspace->free_block_idx > fspace->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 522899) +++ src/verify/verifier_scanner.cpp (working copy) @@ -257,9 +257,9 @@ p_ref = obj_get_referent_field(p_obj); if(!is_unreachable_obj(p_obj)){ verify_write_barrier(p_ref, heap_verifier); - if(*p_ref != NULL) verify_live_object_slot(p_ref, heap_verifier); + if(*p_ref != COMPRESSED_NULL) verify_live_object_slot(p_ref, heap_verifier); }else{ - if(*p_ref != NULL) verify_all_object_slot(p_ref, heap_verifier); + if(*p_ref != COMPRESSED_NULL) verify_all_object_slot(p_ref, heap_verifier); } } #endif