Index: build/make/components/vm/gc_gen.xml =================================================================== --- build/make/components/vm/gc_gen.xml (revision 499061) +++ build/make/components/vm/gc_gen.xml (working copy) @@ -52,7 +52,7 @@ - @@ -65,7 +65,7 @@ - Index: vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java =================================================================== --- vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 499692) +++ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (working copy) @@ -33,7 +33,7 @@ public static final int TLS_GC_OFFSET = TLSGCOffset(); - public static Address alloc(int objSize, int allocationHandle) throws InlinePragma { + public static Address alloc(int objSize, int allocationHandle) throws InlinePragma { Address TLS_BASE = VMHelper.getTlsBaseAddress(); Address allocator_addr = TLS_BASE.plus(TLS_GC_OFFSET); @@ -74,13 +74,13 @@ be a constant in future. */ - public static final int NOS_BOUNDARY = getNosBoundary(); + public static Address NOS_BOUNDARY = getNosBoundary(); public static boolean GEN_MODE = getGenMode(); public static void write_barrier_slot_rem(Address p_objBase, Address p_objSlot, Address p_target) throws InlinePragma { /* If the slot is in NOS or the target is not in NOS, we simply return*/ - if(p_objSlot.toInt() >= NOS_BOUNDARY || p_target.toInt() < NOS_BOUNDARY || !GEN_MODE) { + if(p_objSlot.GE(NOS_BOUNDARY) || p_target.LT(NOS_BOUNDARY) || !GEN_MODE) { p_objSlot.store(p_target); return; } @@ -90,7 +90,7 @@ private static native int helperCallback(); private static native boolean getGenMode(); - private static native int getNosBoundary(); + private static native Address getNosBoundary(); private static native int TLSGCOffset(); } Index: vm/gc_gen/src/common/fix_repointed_refs.h =================================================================== --- vm/gc_gen/src/common/fix_repointed_refs.h (revision 499692) +++ vm/gc_gen/src/common/fix_repointed_refs.h (working copy) @@ -37,9 +37,9 @@ /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, - * for whose which can be scanned in MOS & NOS must have been set fw bit in oi. + * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ - assert((unsigned int)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); + assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); *p_ref = obj_get_fw_in_oi(p_obj); } } @@ -57,7 +57,7 @@ assert(!obj_is_primitive_array(p_obj)); int32 array_length = array->array_len; - Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); + Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (int i = 0; i < array_length; i++) { slot_fix(p_refs + i); } @@ -79,11 +79,11 @@ inline void block_fix_ref_after_copying(Block_Header* curr_block) { - unsigned int cur_obj = (unsigned int)curr_block->base; - unsigned int block_end = (unsigned int)curr_block->free; + POINTER_SIZE_INT cur_obj = (POINTER_SIZE_INT)curr_block->base; + POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free; while(cur_obj < block_end){ object_fix_ref_slots((Partial_Reveal_Object*)cur_obj); - cur_obj = (unsigned int)cur_obj + vm_object_size((Partial_Reveal_Object*)cur_obj); + cur_obj = cur_obj + vm_object_size((Partial_Reveal_Object*)cur_obj); } return; } Index: vm/gc_gen/src/common/gc_block.h =================================================================== --- vm/gc_gen/src/common/gc_block.h (revision 499692) +++ vm/gc_gen/src/common/gc_block.h (working copy) @@ -23,8 +23,6 @@ #include "gc_common.h" -#define SYSTEM_ALLOC_UNIT 0x10000 - #define GC_BLOCK_SHIFT_COUNT 15 #define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT) @@ -50,7 +48,7 @@ Partial_Reveal_Object* src; Partial_Reveal_Object* next_src; Block_Header* next; - unsigned int table[1]; /* entry num == OFFSET_TABLE_SIZE_WORDS */ + POINTER_SIZE_INT table[1]; /* entry num == OFFSET_TABLE_SIZE_WORDS */ }Block_Header; typedef union Block{ @@ -58,7 +56,7 @@ unsigned char raw_bytes[GC_BLOCK_SIZE_BYTES]; }Block; -#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (unsigned int)&(((Block_Header*)0)->table) +#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (POINTER_SIZE_INT)&(((Block_Header*)0)->table) #define SECTOR_SIZE_SHIFT_COUNT 8 #define SECTOR_SIZE_BYTES (1 << SECTOR_SIZE_SHIFT_COUNT) @@ -75,22 +73,22 @@ #define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES) #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES) -#define GC_BLOCK_BODY(block) ((void*)((unsigned int)(block) + GC_BLOCK_HEADER_SIZE_BYTES)) -#define GC_BLOCK_END(block) ((void*)((unsigned int)(block) + GC_BLOCK_SIZE_BYTES)) +#define GC_BLOCK_BODY(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_HEADER_SIZE_BYTES)) +#define GC_BLOCK_END(block) ((void*)((POINTER_SIZE_INT)(block) + GC_BLOCK_SIZE_BYTES)) -#define GC_BLOCK_LOW_MASK ((unsigned int)(GC_BLOCK_SIZE_BYTES - 1)) +#define GC_BLOCK_LOW_MASK ((POINTER_SIZE_INT)(GC_BLOCK_SIZE_BYTES - 1)) #define GC_BLOCK_HIGH_MASK (~GC_BLOCK_LOW_MASK) -#define GC_BLOCK_HEADER(addr) ((Block_Header *)((unsigned int)(addr) & GC_BLOCK_HIGH_MASK)) +#define GC_BLOCK_HEADER(addr) ((Block_Header *)((POINTER_SIZE_INT)(addr) & GC_BLOCK_HIGH_MASK)) #define GC_BLOCK_INDEX(addr) ((unsigned int)(GC_BLOCK_HEADER(addr)->block_idx)) -#define GC_BLOCK_INDEX_FROM(heap_start, addr) ((unsigned int)(((unsigned int)(addr)-(unsigned int)(heap_start)) >> GC_BLOCK_SHIFT_COUNT)) +#define GC_BLOCK_INDEX_FROM(heap_start, addr) ((unsigned int)(((POINTER_SIZE_INT)(addr)-(POINTER_SIZE_INT)(heap_start)) >> GC_BLOCK_SHIFT_COUNT)) -#define ADDRESS_OFFSET_TO_BLOCK_HEADER(addr) ((unsigned int)((unsigned int)addr&GC_BLOCK_LOW_MASK)) +#define ADDRESS_OFFSET_TO_BLOCK_HEADER(addr) ((unsigned int)((POINTER_SIZE_INT)addr&GC_BLOCK_LOW_MASK)) #define ADDRESS_OFFSET_IN_BLOCK_BODY(addr) ((unsigned int)(ADDRESS_OFFSET_TO_BLOCK_HEADER(addr)- GC_BLOCK_HEADER_SIZE_BYTES)) inline void block_init(Block_Header* block) { - block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES); - block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); + block->free = (void*)((POINTER_SIZE_INT)block + GC_BLOCK_HEADER_SIZE_BYTES); + block->ceiling = (void*)((POINTER_SIZE_INT)block + GC_BLOCK_SIZE_BYTES); block->base = block->free; block->new_free = block->free; block->status = BLOCK_FREE; @@ -101,7 +99,7 @@ inline Partial_Reveal_Object *obj_end(Partial_Reveal_Object *obj) { - return (Partial_Reveal_Object *)((unsigned int)obj + vm_object_size(obj)); + return (Partial_Reveal_Object *)((POINTER_SIZE_INT)obj + vm_object_size(obj)); } inline Partial_Reveal_Object *next_marked_obj_in_block(Partial_Reveal_Object *cur_obj, Partial_Reveal_Object *block_end) @@ -240,12 +238,12 @@ /* only for inter-sector compaction */ unsigned int index = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); Block_Header *curr_block = GC_BLOCK_HEADER(p_obj); - return (Partial_Reveal_Object *)(((unsigned int)p_obj) - curr_block->table[index]); + return (Partial_Reveal_Object *)(((POINTER_SIZE_INT)p_obj) - curr_block->table[index]); } inline void block_clear_table(Block_Header* block) { - unsigned int* table = block->table; + POINTER_SIZE_INT* table = block->table; memset(table, 0, OFFSET_TABLE_SIZE_BYTES); return; } Index: vm/gc_gen/src/common/gc_common.cpp =================================================================== --- vm/gc_gen/src/common/gc_common.cpp (revision 499692) +++ vm/gc_gen/src/common/gc_common.cpp (working copy) @@ -29,10 +29,13 @@ unsigned int Cur_Mark_Bit = 0x1; unsigned int Cur_Forward_Bit = 0x2; +unsigned int SPACE_ALLOC_UNIT; + extern Boolean GC_VERIFY; extern unsigned int NOS_SIZE; extern unsigned int MIN_NOS_SIZE; +extern unsigned int MIN_LOS_SIZE; extern Boolean FORCE_FULL_COMPACT; extern Boolean MINOR_ALGORITHM; @@ -46,6 +49,8 @@ unsigned int min_heap_size_bytes = 32 * MB; unsigned int max_heap_size_bytes = 0; +extern Boolean JVMTI_HEAP_ITERATION ; + static int get_int_property(const char *property_name) { assert(property_name); @@ -154,6 +159,10 @@ MIN_NOS_SIZE = get_size_property("gc.min_nos_size"); } + if (is_property_set("gc.min_los_size", VM_PROPERTIES) == 1) { + MIN_LOS_SIZE = get_size_property("gc.min_los_size"); + } + if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) { unsigned int num = get_int_property("gc.num_collectors"); NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num; @@ -214,6 +223,21 @@ if (is_property_set("gc.verify", VM_PROPERTIES) == 1) { GC_VERIFY = get_boolean_property("gc.verify"); } + + if (is_property_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){ + GEN_NONGEN_SWITCH= get_boolean_property("gc.gen_nongen_switch"); + gc->generate_barrier = TRUE; + } + + if (is_property_set("gc.heap_iteration", VM_PROPERTIES) == 1) { + JVMTI_HEAP_ITERATION = get_boolean_property("gc.heap_iteration"); + } + + if (is_property_set("gc.use_large_page", VM_PROPERTIES) == 1){ + char* value = get_property("gc.use_large_page", VM_PROPERTIES); + large_page_hint = strdup(value); + destroy_property_value(value); + } return; } @@ -222,17 +246,23 @@ void gc_reclaim_heap(GC* gc, unsigned int gc_cause) { + int64 start_time = time_now(); + /* FIXME:: before mutators suspended, the ops below should be very careful to avoid racing with mutators. */ gc->num_collections++; - + gc->cause = gc_cause; gc_decide_collection_kind((GC_Gen*)gc, gc_cause); //For_LOS_extend! - gc_space_tune(gc, gc_cause); +#ifdef GC_FIXED_SIZE_TUNER + gc_space_tune_before_gc_simplified(gc, gc_cause); +#else + gc_space_tune_prepare(gc, gc_cause); + gc_space_tune_before_gc(gc, gc_cause); +#endif - #ifdef MARK_BIT_FLIPPING if(gc->collect_kind == MINOR_COLLECTION) mark_bit_flip(); @@ -254,18 +284,28 @@ if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); + + gc_gen_reclaim_heap((GC_Gen*)gc); - gc_gen_reclaim_heap((GC_Gen*)gc); gc_reset_interior_pointer_table(); gc_metadata_verify(gc, FALSE); + int64 pause_time = time_now() - start_time; + gc->time_collections += pause_time; + gc_gen_adapt((GC_Gen*)gc, pause_time); + if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); if(!IGNORE_FINREF ){ + gc_put_finref_to_vm(gc); gc_reset_finref_metadata(gc); gc_activate_finref_threads((GC*)gc); +#ifndef BUILD_IN_REFERENT + } else { + gc_clear_weakref_pools(gc); +#endif } //For_LOS_extend! Index: vm/gc_gen/src/common/gc_common.h =================================================================== --- vm/gc_gen/src/common/gc_common.h (revision 499692) +++ vm/gc_gen/src/common/gc_common.h (working copy) @@ -53,6 +53,9 @@ #define BIT_SHIFT_TO_KILO 10 #define BIT_MASK_TO_BITS_PER_WORD ((1<gc_clss = ch; @@ -181,13 +183,13 @@ /* these should be set last to use the gcvt pointer */ if(gcvt->gc_number_of_ref_fields) - gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_REFS); + gcvt = (GC_VTable_Info*)((POINTER_SIZE_INT)gcvt | GC_CLASS_FLAG_REFS); if(class_is_array(ch)) - gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_ARRAY); + gcvt = (GC_VTable_Info*)((POINTER_SIZE_INT)gcvt | GC_CLASS_FLAG_ARRAY); if(class_is_finalizable(ch)) - gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_FINALIZER); + gcvt = (GC_VTable_Info*)((POINTER_SIZE_INT)gcvt | GC_CLASS_FLAG_FINALIZER); vtable_set_gcvt(vt, gcvt); Index: vm/gc_gen/src/common/gc_for_class.h =================================================================== --- vm/gc_gen/src/common/gc_for_class.h (revision 499692) +++ vm/gc_gen/src/common/gc_for_class.h (working copy) @@ -154,7 +154,7 @@ { assert(vt && vt->gcvt); return vt->gcvt; } inline GC_VTable_Info *vtable_get_gcvt(Partial_Reveal_VTable *vt) -{ assert(vt && vt->gcvt); return (GC_VTable_Info*)((unsigned int)vt->gcvt & GC_CLASS_FLAGS_MASK); } +{ assert(vt && vt->gcvt); return (GC_VTable_Info*)((POINTER_SIZE_INT)vt->gcvt & GC_CLASS_FLAGS_MASK); } inline void vtable_set_gcvt(Partial_Reveal_VTable *vt, GC_VTable_Info *new_gcvt) { assert(vt && new_gcvt); vt->gcvt = new_gcvt; } @@ -174,14 +174,14 @@ inline Boolean object_has_ref_field(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); - return (unsigned int)gcvt & GC_CLASS_FLAG_REFS; + return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS; } inline Boolean object_has_ref_field_before_scan(Partial_Reveal_Object *obj) { Partial_Reveal_VTable *vt = obj_get_vt_raw(obj); GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); - return (unsigned int)gcvt & GC_CLASS_FLAG_REFS; + return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS; } inline unsigned int object_ref_field_num(Partial_Reveal_Object *obj) @@ -193,7 +193,7 @@ inline Boolean object_is_array(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); - return ((unsigned int)gcvt & GC_CLASS_FLAG_ARRAY); + return ((POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_ARRAY); } inline Boolean obj_is_primitive_array(Partial_Reveal_Object *obj) @@ -244,7 +244,7 @@ inline Boolean type_has_finalizer(Partial_Reveal_VTable *vt) { GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); - return (unsigned int)gcvt & GC_CLASS_FLAG_FINALIZER; + return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_FINALIZER; } #endif //#ifndef _GC_TYPES_H_ Index: vm/gc_gen/src/common/gc_for_vm.cpp =================================================================== --- vm/gc_gen/src/common/gc_for_vm.cpp (revision 499692) +++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -82,7 +82,10 @@ { Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref; Partial_Reveal_Object* p_obj = *p_ref; - if (p_obj == NULL) return; + /* we don't enumerate NULL reference and nos_boundary + FIXME:: nos_boundary is a static field in GCHelper.java for fast write barrier, not a real object reference + this should be fixed that magic Address field should not be enumerated. */ + if (p_obj == NULL || p_obj == nos_boundary ) return; assert( !obj_is_marked_in_vt(p_obj)); /* for Minor_collection, it's possible for p_obj be forwarded in non-gen mark-forward GC. The forward bit is actually last cycle's mark bit. @@ -153,7 +156,6 @@ int32 gc_get_hashcode(Managed_Object_Handle p_object) { return 23; } - void gc_finalize_on_exit() { if(!IGNORE_FINREF ) @@ -169,3 +171,14 @@ * } * } */ + +extern Boolean JVMTI_HEAP_ITERATION; +void gc_iterate_heap() { + // data structures in not consistent for heap iteration + if (!JVMTI_HEAP_ITERATION) return; + + gc_gen_iterate_heap((GC_Gen *)p_global_gc); +} + + + Index: vm/gc_gen/src/common/gc_metadata.cpp =================================================================== --- vm/gc_gen/src/common/gc_metadata.cpp (revision 499692) +++ vm/gc_gen/src/common/gc_metadata.cpp (working copy) @@ -40,13 +40,13 @@ void* metadata = STD_MALLOC(seg_size); memset(metadata, 0, seg_size); gc_metadata.segments[0] = metadata; - metadata = (void*)round_up_to_size((unsigned int)metadata, METADATA_BLOCK_SIZE_BYTES); + metadata = (void*)round_up_to_size((POINTER_SIZE_INT)metadata, METADATA_BLOCK_SIZE_BYTES); gc_metadata.num_alloc_segs = 1; unsigned int i=0; unsigned int num_blocks = GC_METADATA_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES; for(i=0; i> 1; gc_metadata.free_task_pool = sync_pool_create(); for(i=0; ialloc_lock); return block; } - + unsigned int num_alloced = metadata->num_alloc_segs; if(num_alloced == GC_METADATA_SEGMENT_NUM){ printf("Run out GC metadata, please give it more segments!\n"); @@ -116,36 +116,35 @@ void *new_segment = STD_MALLOC(seg_size); memset(new_segment, 0, seg_size); metadata->segments[num_alloced] = new_segment; - new_segment = (void*)round_up_to_size((unsigned int)new_segment, METADATA_BLOCK_SIZE_BYTES); + new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, METADATA_BLOCK_SIZE_BYTES); metadata->num_alloc_segs = num_alloced + 1; unsigned int num_blocks = GC_METADATA_EXTEND_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES; unsigned int i=0; for(i=0; ialloc_lock); - return block; } @@ -160,7 +159,7 @@ Vector_Block* root_set = pool_iterator_next(pool); while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(root_set,iter); @@ -175,7 +174,7 @@ /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, - * for whose which can be scanned in MOS & NOS must have been set fw bit in oi. + * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc)); *p_ref = obj_get_fw_in_oi(p_obj); @@ -197,7 +196,7 @@ if( gc->collect_kind != MINOR_COLLECTION ) /* MINOR but not forwarding */ gc_update_repointed_sets(gc, metadata->gc_rootset_pool); else - gc_set_pool_clear(metadata->gc_rootset_pool); + gc_set_pool_clear(metadata->gc_rootset_pool); #ifndef BUILD_IN_REFERENT gc_update_finref_repointed_refs(gc); @@ -337,7 +336,6 @@ if(verify_live_heap ){ unsigned int free_pool_size = pool_size(metadata->free_set_pool); - printf("===========%s, free_pool_size = %d =============\n", is_before_gc?"before GC":"after GC", free_pool_size); } return; Index: vm/gc_gen/src/common/gc_metadata.h =================================================================== --- vm/gc_gen/src/common/gc_metadata.h (revision 499692) +++ vm/gc_gen/src/common/gc_metadata.h (working copy) @@ -30,7 +30,7 @@ typedef struct GC_Metadata{ void *segments[GC_METADATA_SEGMENT_NUM]; /* address array of malloced segments for free pool */ - unsigned int num_alloc_segs; /* next available position in pool_segments array */ + unsigned int num_alloc_segs; /* allocated segment number */ SpinLock alloc_lock; Pool* free_task_pool; /* list of free buffers for mark tasks */ @@ -107,7 +107,7 @@ assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); Vector_Block* root_set = mutator->rem_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); if( !vector_block_is_full(root_set)) return; @@ -121,7 +121,7 @@ // assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); Vector_Block* root_set = collector->rep_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); if( !vector_block_is_full(root_set)) return; @@ -135,7 +135,7 @@ //assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); Vector_Block* root_set = collector->rem_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); if( !vector_block_is_full(root_set)) return; @@ -149,7 +149,7 @@ /* we don't have assert as others because p_task is a p_obj for marking, or a p_ref for trace forwarding. The latter can be a root set pointer */ Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; - vector_stack_push(trace_task, (unsigned int)p_task); + vector_stack_push(trace_task, (POINTER_SIZE_INT)p_task); if( !vector_stack_is_full(trace_task)) return; @@ -163,7 +163,7 @@ assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); Vector_Block* root_set = gc->root_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); if( !vector_block_is_full(root_set)) return; Index: vm/gc_gen/src/common/gc_platform.h =================================================================== --- vm/gc_gen/src/common/gc_platform.h (revision 499692) +++ vm/gc_gen/src/common/gc_platform.h (working copy) @@ -21,19 +21,24 @@ #ifndef _GC_PLATFORM_H_ #define _GC_PLATFORM_H_ +#include "port_vmem.h" + #include + +#ifdef __linux__ #include +#endif #include #include #include +extern char* large_page_hint; #ifndef _DEBUG //#define RELEASE_DEBUG - #ifdef RELEASE_DEBUG #undef assert #define assert(x) do{ if(!(x)) __asm{int 3}}while(0) @@ -41,6 +46,16 @@ #endif //_DEBUG +#ifdef _WINDOWS_ +#define FORCE_INLINE __forceinline +#else + +#ifdef __linux__ +#define FORCE_INLINE inline __attribute__((always_inline)) +#endif + +#endif + #define USEC_PER_SEC INT64_C(1000000) #define VmThreadHandle void* @@ -106,20 +121,31 @@ inline void pool_destroy(apr_pool_t *p) { apr_pool_destroy(p); } -#ifndef _WIN32 +#ifndef _WINDOWS_ #include #endif +inline unsigned int vm_get_system_alloc_unit() +{ +#ifdef _WINDOWS_ + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwAllocationGranularity; +#else + return port_vmem_page_sizes()[0]; +#endif +} + inline void *vm_map_mem(void* start, unsigned int size) { void* address; -#ifdef _WIN32 +#ifdef _WINDOWS_ address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); -#else +#else address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if(address == MAP_FAILED) address = NULL; -#endif /* ifdef _WIN32 else */ +#endif /* ifdef _WINDOWS_ else */ return address; } @@ -127,13 +153,13 @@ inline Boolean vm_unmap_mem(void* start, unsigned int size) { unsigned int result; -#ifdef _WIN32 +#ifdef _WINDOWS_ result = VirtualFree(start, 0, MEM_RELEASE); #else result = munmap(start, size); - if(result == -1) result = 0; - -#endif /* ifdef _WIN32 else */ + if(result == 0) result = TRUE; + else result = FALSE; +#endif /* ifdef _WINDOWS_ else */ return result; } @@ -141,13 +167,13 @@ inline void *vm_alloc_mem(void* start, unsigned int size) { void* address; -#ifdef _WIN32 +#ifdef _WINDOWS_ address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); #else address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if(address == MAP_FAILED) address = NULL; -#endif /* ifdef _WIN32 else */ +#endif /* ifdef _WINDOWS_ else */ return address; } @@ -160,25 +186,36 @@ inline void *vm_reserve_mem(void* start, unsigned int size) { void* address; -#ifdef _WIN32 +#ifdef _WINDOWS_ address = VirtualAlloc(start, size, MEM_RESERVE, PAGE_READWRITE); #else - address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if(start == 0) + address = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + else + address = mmap(start, size, PROT_NONE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if(address == MAP_FAILED) address = NULL; -#endif /* ifdef _WIN32 else */ +#endif /* ifdef _WINDOWS_ else */ return address; } +inline Boolean vm_release_mem(void* start, unsigned int size) +{ + return vm_unmap_mem(start, size); +} + inline void *vm_commit_mem(void* start, unsigned int size) { void* address; -#ifdef _WIN32 +#ifdef _WINDOWS_ address = VirtualAlloc(start, size, MEM_COMMIT, PAGE_READWRITE); #else - -#endif /* ifdef _WIN32 else */ + int result = mprotect(start, size, PROT_READ|PROT_WRITE); + if(result == 0) address = start; + else address = NULL; +#endif /* ifdef _WINDOWS_ else */ return address; } @@ -186,11 +223,14 @@ inline Boolean vm_decommit_mem(void* start, unsigned int size) { unsigned int result; -#ifdef _WIN32 +#ifdef _WINDOWS_ result = VirtualFree(start, size, MEM_DECOMMIT); #else + result = mprotect(start, size, PROT_NONE); + if(result == 0) result = TRUE; + else result = FALSE; -#endif /* ifdef _WIN32 else */ +#endif /* ifdef _WINDOWS_ else */ return result; } @@ -207,7 +247,7 @@ } #ifdef PLATFORM_POSIX -#define max(x, y) ((x)>(y)?(x):(y)) +#define max(x, y) (((x)>(y))?(x):(y)) #endif typedef volatile unsigned int SpinLock; Index: vm/gc_gen/src/common/gc_space.h =================================================================== --- vm/gc_gen/src/common/gc_space.h (revision 499692) +++ vm/gc_gen/src/common/gc_space.h (working copy) @@ -23,6 +23,8 @@ #include "gc_block.h" +extern unsigned int SPACE_ALLOC_UNIT; + struct GC; /* all Spaces inherit this Space structure */ typedef struct Space{ @@ -36,6 +38,10 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; + /*Size allocted after last collection. */ + unsigned int alloced_size; + /*For_statistic*/ + unsigned int surviving_size; }Space; inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;} @@ -65,6 +71,10 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; + /*Size allocted after last collection. */ + unsigned int alloced_size; + /*For_statistic*/ + unsigned int surviving_size; /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ @@ -108,18 +118,18 @@ unsigned int block_dec_count = changed_size >> GC_BLOCK_SHIFT_COUNT; void* new_base = (void*)&(space->blocks[space->num_managed_blocks - block_dec_count]); - void* decommit_base = (void*)round_down_to_size((unsigned int)new_base, SYSTEM_ALLOC_UNIT); + void* decommit_base = (void*)round_down_to_size((POINTER_SIZE_INT)new_base, SPACE_ALLOC_UNIT); assert( ((Block_Header*)decommit_base)->block_idx >= space->free_block_idx); void* old_end = (void*)&space->blocks[space->num_managed_blocks]; - unsigned int decommit_size = (unsigned int)old_end - (unsigned int)decommit_base; + unsigned int decommit_size = (POINTER_SIZE_INT)old_end - (POINTER_SIZE_INT)decommit_base; assert(decommit_size && !(decommit_size%GC_BLOCK_SIZE_BYTES)); Boolean result = vm_decommit_mem(decommit_base, decommit_size); assert(result == TRUE); - space->committed_heap_size = (unsigned int)decommit_base - (unsigned int)space->heap_start; + space->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)space->heap_start; space->num_managed_blocks = space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; Block_Header* new_last_block = (Block_Header*)&space->blocks[space->num_managed_blocks - 1]; @@ -132,16 +142,16 @@ unsigned int block_inc_count = changed_size >> GC_BLOCK_SHIFT_COUNT; void* old_base = (void*)&space->blocks[space->num_managed_blocks]; - void* commit_base = (void*)round_down_to_size((unsigned int)old_base, SYSTEM_ALLOC_UNIT); - unsigned int block_diff_count = ((unsigned int)old_base - (unsigned int)commit_base) >> GC_BLOCK_SHIFT_COUNT; + void* commit_base = (void*)round_down_to_size((POINTER_SIZE_INT)old_base, SPACE_ALLOC_UNIT); + unsigned int block_diff_count = ((POINTER_SIZE_INT)old_base - (POINTER_SIZE_INT)commit_base) >> GC_BLOCK_SHIFT_COUNT; block_inc_count += block_diff_count; unsigned int commit_size = block_inc_count << GC_BLOCK_SHIFT_COUNT; void* result = vm_commit_mem(commit_base, commit_size); assert(result == commit_base); - void* new_end = (void*)((unsigned int)commit_base + commit_size); - space->committed_heap_size = (unsigned int)new_end - (unsigned int)space->heap_start; + void* new_end = (void*)((POINTER_SIZE_INT)commit_base + commit_size); + space->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)space->heap_start; /* init the grown blocks */ Block_Header* block = (Block_Header*)commit_base; Index: vm/gc_gen/src/common/interior_pointer.cpp =================================================================== --- vm/gc_gen/src/common/interior_pointer.cpp (revision 499692) +++ vm/gc_gen/src/common/interior_pointer.cpp (working copy) @@ -25,9 +25,9 @@ void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned); typedef struct slot_offset_entry_struct{ - void** slot; - unsigned int offset; - Partial_Reveal_Object *base; + void** slot; + unsigned int offset; + Partial_Reveal_Object *base; } slot_offset_entry; static std::vector interior_pointer_set; @@ -38,49 +38,49 @@ void add_root_set_entry_interior_pointer(void **slot, int offset, Boolean is_pinned) { - //check size; - if( interior_pointer_set.size() == interior_pointer_num_count ) - { - unsigned int size = interior_pointer_num_count == 0 ? initial_vector_size : interior_pointer_set.size()*2; - interior_pointer_set.resize(size); - } + //check size; + if( interior_pointer_set.size() == interior_pointer_num_count ) + { + unsigned int size = interior_pointer_num_count == 0 ? initial_vector_size : interior_pointer_set.size()*2; + interior_pointer_set.resize(size); + } - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) ((Byte*)*slot - offset); - assert(p_obj->vt_raw); - slot_offset_entry* push_back_entry = (slot_offset_entry*)&interior_pointer_set[interior_pointer_num_count++]; - push_back_entry->offset = offset; - push_back_entry->slot = slot; - push_back_entry->base = p_obj; + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) ((Byte*)*slot - offset); + assert(p_obj->vt_raw); + slot_offset_entry* push_back_entry = (slot_offset_entry*)&interior_pointer_set[interior_pointer_num_count++]; + push_back_entry->offset = offset; + push_back_entry->slot = slot; + push_back_entry->base = p_obj; } void gc_copy_interior_pointer_table_to_rootset() { - unsigned int i; - for( i = 0; ibase)), FALSE); - } + unsigned int i; + for( i = 0; ibase)), FALSE); + } } void update_rootset_interior_pointer() { - unsigned int i; - for( i = 0; islot; - Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base; - unsigned int root_offset = entry_traverser->offset; - void *new_slot_contents = (void *)((Byte*)root_base + root_offset); - *root_slot = new_slot_contents; - } + unsigned int i; + for( i = 0; islot; + Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base; + unsigned int root_offset = entry_traverser->offset; + void *new_slot_contents = (void *)((Byte*)root_base + root_offset); + *root_slot = new_slot_contents; + } //can not reset the table here, for the rootset may be updated multi times } void gc_reset_interior_pointer_table() { - interior_pointer_num_count = 0; - //this function is for the case of out of space which need to call update_rootset_interior_pointer multi-times + interior_pointer_num_count = 0; + //this function is for the case of out of memory which need to call update_rootset_interior_pointer multi-times } Index: vm/gc_gen/src/common/large_pages.cpp =================================================================== --- vm/gc_gen/src/common/large_pages.cpp (revision 0) +++ vm/gc_gen/src/common/large_pages.cpp (revision 0) @@ -0,0 +1,180 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Yu-Nan He, 2007/01/18 + */ +#include "gc_common.h" +char* large_page_hint = NULL; + +#if defined (_WINDOWS_) +Boolean set_privilege(HANDLE process, LPCTSTR priv_name, Boolean is_enable) +{ + HANDLE token; + TOKEN_PRIVILEGES tp; + bool res = OpenProcessToken(process, TOKEN_ADJUST_PRIVILEGES, &token); + if(!res){ + return FALSE; + } + + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = is_enable ? SE_PRIVILEGE_ENABLED : 0; + + res = LookupPrivilegeValue( NULL, priv_name, &tp.Privileges[0].Luid); + if(!res){ + CloseHandle(token); + return FALSE; + } + + if (AdjustTokenPrivileges( token, FALSE, &tp, 0, NULL, 0) == ERROR_NOT_ALL_ASSIGNED) { + CloseHandle(token); + return FALSE; + } + return TRUE; +} + +Boolean obtain_lock_memory_priv() +{ + HANDLE process = GetCurrentProcess(); + return set_privilege(process, SE_LOCK_MEMORY_NAME, TRUE); +} + +Boolean release_lock_memory_priv() +{ + HANDLE process = GetCurrentProcess(); + return set_privilege(process, SE_LOCK_MEMORY_NAME, TRUE); +} + +void* alloc_large_pages(size_t size, const char* hint) +{ + void* alloc_addr = NULL; + bool lock_memory_enable = obtain_lock_memory_priv(); + + if(lock_memory_enable){ + alloc_addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE); + release_lock_memory_priv(); + if(alloc_addr == NULL){ + printf("GC large_page: No required number of large pages found. Please reboot.....\n"); + return NULL; + }else + return alloc_addr; + }else{ + printf("GC large_page: Check that you have permissions:\n"); + printf("GC large_page: Control Panel->Administrative Tools->Local Security Settings->->User Rights Assignment->Lock pages in memory;\n"); + printf("GC large_page: Start VM as soon after reboot as possible, because large pages become fragmented and unusable after a while;\n"); + printf("GC large_page: Heap size should be multiple of large page size.\n"); + return NULL; + } +} + +#elif defined (__linux__) +#include +#include + +static size_t proc_huge_page_size = 4 * MB; +static size_t proc_huge_pages_total = (size_t)-1; +static size_t proc_huge_pages_free = 0; +static const char* str_HugePages_Total = "HugePages_Total:"; +static const char* str_HugePages_Free = "HugePages_Free:"; +static const char* str_Hugepagesize = "Hugepagesize:"; + + +static const char* parse_value(const char* buf, int len, const char* name, int name_len, size_t* value){ + if (len < name_len) return NULL; + if (strncmp(buf, name, name_len)) return NULL; + buf += name_len; + char* endpos; + long int res = strtol(buf, &endpos, 10); + if (endpos == buf) return NULL; + *value = (size_t) res; + return endpos; +} + +static void parse_proc_meminfo(size_t required_size){ + FILE* f = fopen("/proc/meminfo", "r"); + if (f == NULL){ + printf("GC large_page: Can't open /proc/meminfo \n"); + return; + } + + size_t size = 128; + char* buf = (char*) malloc(size); + while (true){ + ssize_t len = getline(&buf, &size, f); + if (len == -1) break; + parse_value(buf, len, str_HugePages_Total, strlen(str_HugePages_Total), &proc_huge_pages_total); + parse_value(buf, len, str_HugePages_Free, strlen(str_HugePages_Free), &proc_huge_pages_free); + const char* end =parse_value(buf, len, str_Hugepagesize, strlen(str_Hugepagesize), &proc_huge_page_size); + if (end && !strncmp(end, " kB", 3)) proc_huge_page_size *= KB; + } + if (buf) free(buf); + + if (proc_huge_pages_total == (size_t)-1){ + printf("GC large_page: Large pages are not supported by kernel.\n"); + printf("GC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled.\n"); + } else if (proc_huge_pages_total == 0){ + printf("GC large_page: No large pages reserved, Use following command: echo num> /proc/sys/vm/nr_hugepages.\n"); + printf("GC large_page: Do it just after kernel boot before huge pages become fragmented.\n"); + } else if (proc_huge_pages_free * proc_huge_page_size < required_size) { + if (proc_huge_pages_total * proc_huge_page_size >= required_size) { + printf("GC large_page: Not enough free large pages, some of reserved space is already busy.\n"); + } else { + printf("GC large_page: Not enough reserved large pages.\n"); + } + } +} + +void* mmap_large_pages(size_t size, const char* path) +{ + const char* postfix = "/vm_heap"; + char* buf = (char*) malloc(strlen(path) + strlen(postfix) + 1); + assert(buf); + + strcpy(buf, path); + strcat(buf, postfix); + + int fd = open(buf, O_CREAT | O_RDWR, 0700); + if (fd == -1){ + printf("GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfs.\n"); + printf("GC large_page: Check you have appropriate permissions to /mnt/huge.\n"); + printf("GC large_page: Use command line switch -Dgc.use_large_page=/mnt/huge.\n"); + free(buf); + return NULL; + } + unlink(buf); + + void* addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (addr == MAP_FAILED){ + printf("GC large_page: Map failed.\n"); + close(fd); + free(buf); + return NULL; + } + close(fd); + free(buf); + return addr; +} + +void* alloc_large_pages(size_t size, const char* hint){ + parse_proc_meminfo(size); + void* alloc_addr = mmap_large_pages(size, hint); + if(alloc_addr == NULL || ((POINTER_SIZE_INT)alloc_addr%proc_huge_page_size!=0)){ + printf("GC large_page: Large pages allocation failed.\n"); + return NULL; + } + return alloc_addr; +} +#endif Index: vm/gc_gen/src/common/mark_scan_pool.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan_pool.cpp (revision 499692) +++ vm/gc_gen/src/common/mark_scan_pool.cpp (working copy) @@ -23,7 +23,7 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" -static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) +static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) { Partial_Reveal_Object* p_obj = *p_ref; if(p_obj==NULL) return; @@ -35,7 +35,7 @@ } -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { if( !object_has_ref_field(p_obj) ) return; @@ -46,7 +46,7 @@ Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; unsigned int array_length = array->array_len; - p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); + p_ref = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (unsigned int i = 0; i < array_length; i++) { scan_slot(collector, p_ref+i); @@ -120,7 +120,7 @@ /* first step: copy all root objects to mark tasks. FIXME:: can be done sequentially before coming here to eliminate atomic ops */ while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(root_set,iter); @@ -152,7 +152,7 @@ Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); + POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(mark_task,iter); @@ -188,37 +188,7 @@ return; } -/* this is to resurrect p_obj and its decedants for some reason, here for finalizables */ -void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj) +void trace_obj_in_marking(Collector *collector, void *p_obj) { - GC *gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - obj_mark_in_vt(p_obj); - collector->trace_stack = free_task_pool_get_entry(metadata); - collector_tracestack_push(collector, p_obj); - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - -//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */ - collector->trace_stack = free_task_pool_get_entry(metadata); - Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); - while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; - trace_object(collector, p_obj); - iter = vector_block_iterator_advance(mark_task, iter); - } - /* run out one task, put back to the pool and grab another task */ - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - mark_task = pool_get_entry(metadata->mark_task_pool); - } - - mark_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - collector->trace_stack = NULL; -//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */ -//collector->rep_set = NULL; /* has got collector->rep_set in caller */ + trace_object(collector, (Partial_Reveal_Object *)p_obj); } Index: vm/gc_gen/src/common/space_tuner.cpp =================================================================== --- vm/gc_gen/src/common/space_tuner.cpp (revision 499692) +++ vm/gc_gen/src/common/space_tuner.cpp (working copy) @@ -20,42 +20,194 @@ #include "space_tuner.h" -#define GC_LOS_MIN_VARY_SIZE ( 2 * 1024 * 1024 ) +#include - struct GC_Gen; +struct Mspace; +struct Lspace; Space* gc_get_mos(GC_Gen* gc); Space* gc_get_nos(GC_Gen* gc); +Space* gc_get_los(GC_Gen* gc); +unsigned int mspace_get_expected_threshold(Mspace* mspace); +unsigned int lspace_get_failure_size(Lspace* lspace); + +/*Now just prepare the alloc_size field of mspace, used to compute new los size.*/ +void gc_space_tune_prepare(GC* gc, unsigned int cause) +{ + if(gc->collect_kind == MINOR_COLLECTION) + return; + + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); + Space_Tuner* tuner = gc->tuner; -void gc_space_tune(GC* gc, unsigned int cause) + assert(fspace->free_block_idx > fspace->first_block_idx); + unsigned int nos_alloc_size = (fspace->free_block_idx - fspace->first_block_idx) * GC_BLOCK_SIZE_BYTES; + fspace->alloced_size = nos_alloc_size; + mspace->alloced_size += (unsigned int)((float)nos_alloc_size * fspace->survive_ratio); + + /*For_statistic alloc speed: Speed could be represented by sum of alloced size.*/ + tuner->speed_los += lspace->alloced_size; + tuner->speed_mos += mspace->alloced_size; + + /*For_statistic wasted memory*/ + unsigned int curr_used_los = lspace->surviving_size + lspace->alloced_size; + assert(curr_used_los < lspace->committed_heap_size); + unsigned int curr_wast_los = lspace->committed_heap_size - curr_used_los; + tuner->wast_los += curr_wast_los; + unsigned int curr_used_mos = mspace->surviving_size + mspace->alloced_size; + unsigned int curr_wast_mos = mspace_get_expected_threshold((Mspace*)mspace) - curr_used_mos; + tuner->wast_mos += curr_wast_mos; + tuner->current_dw = abs((int)tuner->wast_mos - (int)tuner->wast_los); + + /*For_statistic ds in heuristic*/ + tuner->current_ds = (unsigned int)((float)fspace->committed_heap_size * fspace->survive_ratio); + + /*Fixme: Threshold should be computed by heuristic. tslow, total recycled heap size shold be statistic.*/ + tuner->threshold = tuner->current_ds; + //For debug + if(tuner->threshold > 8 * MB) tuner->threshold = 8 * MB; + + tuner->min_tuning_size = tuner->current_ds; + //For debug + if(tuner->min_tuning_size > 4 * MB) tuner->min_tuning_size = 4 * MB; +} + +void gc_space_tune_before_gc(GC* gc, unsigned int cause) { + if(gc->collect_kind == MINOR_COLLECTION) return; + + Space_Tuner* tuner = gc->tuner; + + /*Only tune when LOS need extend*/ + if( tuner->wast_los > tuner->wast_mos ) return; + + /*Needn't tune if dw does not reach threshold.*/ + if(tuner->current_dw < tuner->threshold) return; + + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); + + unsigned int los_expect_survive_sz = (unsigned int)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); + unsigned int los_expect_free_sz = lspace->committed_heap_size - los_expect_survive_sz; + + unsigned int mos_expect_survive_sz = (unsigned int)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio); + unsigned int mos_expect_free_sz = mspace_get_expected_threshold((Mspace*)mspace) - mos_expect_survive_sz; + + unsigned int total_free = los_expect_free_sz + mos_expect_free_sz; + + float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los + tuner->speed_mos); + unsigned int new_free_los_sz = (unsigned int)((float)total_free * new_los_ratio); + + if((int)new_free_los_sz - (int)los_expect_free_sz > (int)tuner->min_tuning_size){ + tuner->kind = TRANS_FROM_MOS_TO_LOS; + tuner->tuning_size = round_up_to_size(new_free_los_sz - los_expect_free_sz, SPACE_ALLOC_UNIT); + tuner->least_tuning_size = round_up_to_size(lspace_get_failure_size((Lspace*)lspace), SPACE_ALLOC_UNIT); + tuner->conservative_tuning_size = round_up_to_size(((tuner->tuning_size + tuner->least_tuning_size) >> 1), SPACE_ALLOC_UNIT); + + unsigned int none_los_size; + #ifdef STATIC_NOS_MAPPING + none_los_size = mspace->committed_heap_size; + #else + /*Fixme: There should be a minimal remain size like heap_size >> 3.*/ + none_los_size = mspace->committed_heap_size + fspace->committed_heap_size; + #endif + + if(tuner->tuning_size < none_los_size) return; + + tuner->tuning_size = tuner->conservative_tuning_size; + + if(tuner->tuning_size < none_los_size) return; + + tuner->tuning_size = tuner->least_tuning_size; + + if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; + + if(tuner->tuning_size < none_los_size) return; + + printf("Out of Memory!\n"); + assert(0); + exit(0); + + } +} + +void gc_space_tune_before_gc_simplified(GC* gc, unsigned int cause) +{ if((gc->collect_kind == MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) ) - return; - + return; + Space_Tuner* tuner = gc->tuner; tuner->kind = TRANS_FROM_MOS_TO_LOS; Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - unsigned int mos_free_block_nr = (mspace->ceiling_block_idx - mspace->free_block_idx + 1); - unsigned int nos_used_block_nr = fspace->free_block_idx - fspace->first_block_idx; - unsigned int mos_wast_block_nr = mos_free_block_nr - nos_used_block_nr; - unsigned int min_vary_block_nr = (GC_LOS_MIN_VARY_SIZE >> GC_BLOCK_SHIFT_COUNT); - if( mos_wast_block_nr > min_vary_block_nr ){ - tuner->tuning_size = min_vary_block_nr << GC_BLOCK_SHIFT_COUNT; - }else{ - tuner->tuning_size = mos_wast_block_nr << GC_BLOCK_SHIFT_COUNT; + /*Fixme: this branch should be modified after the policy of gen major is decieded!*/ + if(false){ + unsigned int mos_free_sz = ((mspace->ceiling_block_idx - mspace->free_block_idx + 1) << GC_BLOCK_SHIFT_COUNT); + unsigned int nos_survive_sz = + (unsigned int)((float)((fspace->free_block_idx - fspace->first_block_idx) << GC_BLOCK_SHIFT_COUNT) * fspace->survive_ratio); + int mos_wast_sz = mos_free_sz - nos_survive_sz; + + if( mos_wast_sz > GC_LOS_MIN_VARY_SIZE){ + tuner->tuning_size = GC_LOS_MIN_VARY_SIZE; + }else if(mos_wast_sz > 0){ + tuner->tuning_size = mos_wast_sz; + }else + tuner->tuning_size = 0; + } + /*For non gen virable sized NOS*/ + else + { + unsigned int los_fail_sz = lspace_get_failure_size((Lspace*)lspace); + + if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){ + /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/ + tuner->tuning_size = los_fail_sz; + tuner->least_tuning_size = los_fail_sz; + tuner->conservative_tuning_size = los_fail_sz; + }else{ + tuner->tuning_size = GC_LOS_MIN_VARY_SIZE; + tuner->least_tuning_size = los_fail_sz; + tuner->conservative_tuning_size = ((tuner->tuning_size + tuner->min_tuning_size) >> 1); + } + + unsigned int none_los_size; +#ifdef STATIC_NOS_MAPPING + none_los_size = mspace->committed_heap_size; +#else + none_los_size = mspace->committed_heap_size + fspace->committed_heap_size; +#endif + if(tuner->tuning_size > none_los_size){ + tuner->tuning_size = tuner->conservative_tuning_size; + } + if(tuner->tuning_size > none_los_size){ + tuner->tuning_size = tuner->least_tuning_size; + } + if(tuner->tuning_size > none_los_size){ + printf("Out of Memory!\n"); + assert(0); + exit(0); + } + + } + + /*Fixme: Should MOS heap_start must be 64k aligned?*/ + tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; - return; + return; } void gc_space_tuner_reset(GC* gc) { - if(gc->collect_kind != MINOR_COLLECTION){ + if((gc->collect_kind != MINOR_COLLECTION) && (gc->tuner->kind != TRANS_NOTHING)){ Space_Tuner* tuner = gc->tuner; memset(tuner, 0, sizeof(Space_Tuner)); } Index: vm/gc_gen/src/common/space_tuner.h =================================================================== --- vm/gc_gen/src/common/space_tuner.h (revision 499692) +++ vm/gc_gen/src/common/space_tuner.h (working copy) @@ -24,6 +24,9 @@ #include "gc_common.h" #include "gc_space.h" +#define GC_LOS_MIN_VARY_SIZE ( 2 * MB ) +#define GC_FIXED_SIZE_TUNER + //For_LOS_extend enum Transform_Kind { TRANS_NOTHING = 0, @@ -32,13 +35,41 @@ }; typedef struct Space_Tuner{ - /*fixme: Now we use static value of GC_LOS_MIN_VARY_SIZE. */ - unsigned int tuning_threshold; Transform_Kind kind; + unsigned int tuning_size; + unsigned int conservative_tuning_size; + unsigned int least_tuning_size; + unsigned int force_tune; + + /*LOS alloc speed sciecne last los variation*/ + unsigned int speed_los; + /*MOS alloc speed sciecne last los variation*/ + unsigned int speed_mos; + + /*Total wasted memory of los science last los variation*/ + unsigned int wast_los; + /*Total wasted memory of mos science last los variation*/ + unsigned int wast_mos; + + unsigned int current_dw; + /*NOS survive size of last minor, this could be the least meaningful space unit when talking about tuning.*/ + unsigned int current_ds; + + /*Threshold for deta wast*/ + unsigned int threshold; + /*Minimun tuning size for los variation*/ + unsigned int min_tuning_size; + + /*Cost of normal major compaction*/ + unsigned int fast_cost; + /*Cost of major compaction when changing LOS size*/ + unsigned int slow_cost; }Space_Tuner; -void gc_space_tune(GC* gc, unsigned int cause); +void gc_space_tune_prepare(GC* gc, unsigned int cause); +void gc_space_tune_before_gc(GC* gc, unsigned int cause); +void gc_space_tune_before_gc_simplified(GC* gc, unsigned int cause); void gc_space_tuner_reset(GC* gc); void gc_space_tuner_initialize(GC* gc); Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 499692) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -29,198 +29,106 @@ Boolean IGNORE_FINREF = TRUE; -/* reset obj_with_fin vector block of each mutator */ -void mutator_reset_obj_with_fin(Mutator *mutator) -{ - mutator->obj_with_fin = finref_get_free_block(); -} -void gc_set_obj_with_fin(GC *gc) +static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj) { - Finref_Metadata *metadata = gc->finref_metadata; - Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; - - /* put back last obj_with_fin block of each mutator */ - Mutator *mutator = gc->mutator_list; - while(mutator){ - pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin); - mutator->obj_with_fin = NULL; - mutator = mutator->next; - } - return; + /* + * The first condition is for supporting switch between nongen and gen minor collection + * With this kind of switch dead objects in MOS & LOS may be set the mark or fw bit in oi + */ + return obj_belongs_to_nos(p_obj) && !obj_is_marked_or_fw_in_oi(p_obj); } -/* reset weak references vetctor block of each collector */ -void collector_reset_weakref_sets(Collector *collector) +static inline Boolean obj_is_dead_in_nongen_minor_gc(Partial_Reveal_Object *p_obj) { - collector->softref_set = finref_get_free_block(); - collector->weakref_set = finref_get_free_block(); - collector->phanref_set= finref_get_free_block(); + return (obj_belongs_to_nos(p_obj) && !obj_is_fw_in_oi(p_obj)) + || (!obj_belongs_to_nos(p_obj) && !obj_is_marked_in_oi(p_obj)); } -void gc_set_weakref_sets(GC *gc) +static inline Boolean obj_is_dead_in_major_gc(Partial_Reveal_Object *p_obj) { - Finref_Metadata *metadata = gc->finref_metadata; - - /* put back last weak references block of each collector */ - unsigned int num_active_collectors = gc->num_active_collectors; - for(unsigned int i = 0; i < num_active_collectors; i++) - { - Collector* collector = gc->collectors[i]; - pool_put_entry(metadata->softref_pool, collector->softref_set); - pool_put_entry(metadata->weakref_pool, collector->weakref_set); - pool_put_entry(metadata->phanref_pool, collector->phanref_set); - collector->softref_set = NULL; - collector->weakref_set= NULL; - collector->phanref_set= NULL; - } - return; -} - - -extern Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj); -static inline Boolean obj_is_dead_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj) -{ return !obj_is_marked_in_vt(p_obj); } -static inline Boolean obj_is_dead_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj) -{ - return !obj_is_marked_in_vt(p_obj); -} // clear the two least significant bits of p_obj first -static inline Boolean obj_is_dead(Collector *collector, Partial_Reveal_Object *p_obj) +static inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj) { - GC *gc = collector->gc; + unsigned int collect_kind = gc->collect_kind; assert(p_obj); - if(gc->collect_kind == MINOR_COLLECTION){ + if(collect_kind == MINOR_COLLECTION){ if( gc_is_gen_mode()) - return obj_is_dead_in_minor_forward_gc(collector, p_obj); + return obj_is_dead_in_gen_minor_gc(p_obj); else - return obj_is_dead_in_minor_copy_gc(collector, p_obj); + return obj_is_dead_in_nongen_minor_gc(p_obj); } else { - return obj_is_dead_in_major_gc(collector, p_obj); + return obj_is_dead_in_major_gc(p_obj); } } - -static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space) +static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj) { - if(!obj_belongs_to_space(p_obj, (Space*)space)) return FALSE; + if(!obj_belongs_to_nos(p_obj)) return FALSE; return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); } -static inline Boolean obj_need_move(Collector *collector, Partial_Reveal_Object *p_obj) +static inline Boolean obj_need_move(GC *gc, Partial_Reveal_Object *p_obj) { - assert(!obj_is_dead(collector, p_obj)); - GC *gc = collector->gc; + assert(!gc_obj_is_dead(gc, p_obj)); if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) - return fspace_obj_to_be_forwarded(p_obj, collector->collect_space); + return fspace_obj_to_be_forwarded(p_obj); Space *space = space_of_addr(gc, p_obj); return space->move_object; } - -extern void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref); -extern void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj); -static inline void resurrect_obj_tree_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj) +static void finref_add_repset_from_pool(GC *gc, Pool *pool) { - resurrect_obj_tree_after_mark(collector, p_obj); -} -static inline void resurrect_obj_tree_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj) -{ - resurrect_obj_tree_after_mark(collector, p_obj); -} -// clear the two least significant bits of p_obj first -// add p_ref to repset -static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref) -{ - GC *gc = collector->gc; - - if(!gc_is_gen_mode() || !(gc->collect_kind == MINOR_COLLECTION)) - collector_repset_add_entry(collector, p_ref); - if(!obj_is_dead(collector, *p_ref)){ - if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref)) - *p_ref = obj_get_fw_in_oi(*p_ref); - return; - } - Partial_Reveal_Object* p_obj = *p_ref; - assert(p_obj); - - if(gc->collect_kind == MINOR_COLLECTION){ - if( gc_is_gen_mode()) - resurrect_obj_tree_after_trace(collector, p_ref); - else - resurrect_obj_tree_in_minor_copy_gc(collector, p_obj); - } else { - resurrect_obj_tree_in_major_gc(collector, p_obj); - } -} - - -/* called before loop of resurrect_obj_tree() */ -static inline void collector_reset_repset(Collector *collector) -{ - GC *gc = collector->gc; - - assert(!collector->rep_set); - if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) - return; - collector->rep_set = free_set_pool_get_entry(gc->metadata); -} -/* called after loop of resurrect_obj_tree() */ -static inline void collector_put_repset(Collector *collector) -{ - GC *gc = collector->gc; - - if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) - return; - pool_put_entry(gc->metadata->collector_repset_pool, collector->rep_set); - collector->rep_set = NULL; -} - - -static void finref_add_repset_from_pool(Collector *collector, Pool *pool) -{ - GC *gc = collector->gc; - finref_reset_repset(gc); - pool_iterator_init(pool); while(Vector_Block *block = pool_iterator_next(pool)){ - unsigned int *iter = vector_block_iterator_init(block); - - while(!vector_block_iterator_end(block, iter)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - iter = vector_block_iterator_advance(block, iter); - - if(*p_ref && obj_need_move(collector, *p_ref)) + if(*p_ref && obj_need_move(gc, *p_ref)) finref_repset_add_entry(gc, p_ref); } } finref_put_repset(gc); } +static inline void fallback_update_fw_ref(Partial_Reveal_Object **p_ref) +{ + if(!IS_FALLBACK_COMPACTION) + return; + + Partial_Reveal_Object *p_obj = *p_ref; + if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ + assert(!obj_is_marked_in_vt(p_obj)); + assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); + p_obj = obj_get_fw_in_oi(p_obj); + assert(p_obj); + *p_ref = p_obj; + } +} static void identify_finalizable_objects(Collector *collector) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; - Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; gc_reset_finalizable_objects(gc); pool_iterator_init(obj_with_fin_pool); while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){ unsigned int block_has_ref = 0; - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + fallback_update_fw_ref(p_ref); Partial_Reveal_Object *p_obj = *p_ref; if(!p_obj) continue; - if(obj_is_dead(collector, p_obj)){ + if(gc_obj_is_dead(gc, p_obj)){ gc_add_finalizable_obj(gc, p_obj); *p_ref = NULL; } else { @@ -231,83 +139,143 @@ vector_block_clear(block); } gc_put_finalizable_objects(gc); +} + +extern void trace_obj_in_gen_fw(Collector *collector, void *p_ref); +extern void trace_obj_in_nongen_fw(Collector *collector, void *p_ref); +extern void trace_obj_in_marking(Collector *collector, void *p_obj); +extern void trace_obj_in_fallback_marking(Collector *collector, void *p_ref); + +typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj); +// clear the two least significant bits of p_obj first +// add p_ref to repset +static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref) +{ + GC *gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + unsigned int collect_kind = gc->collect_kind; + Partial_Reveal_Object *p_obj = *p_ref; + assert(p_obj && gc_obj_is_dead(gc, p_obj)); + void *p_ref_or_obj = p_ref; + Trace_Object_Func trace_object; + + /* set trace_object() function */ + if(collect_kind == MINOR_COLLECTION){ + if(gc_is_gen_mode()) + trace_object = trace_obj_in_gen_fw; + else + trace_object = trace_obj_in_nongen_fw; + } else if(collect_kind == MAJOR_COLLECTION){ + p_ref_or_obj = p_obj; + trace_object = trace_obj_in_marking; + obj_mark_in_vt(p_obj); + } else { + assert(collect_kind == FALLBACK_COLLECTION); + trace_object = trace_obj_in_fallback_marking; + } + + collector->trace_stack = free_task_pool_get_entry(metadata); + collector_tracestack_push(collector, p_ref_or_obj); + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + collector->trace_stack = free_task_pool_get_entry(metadata); + Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool); + while(task_block){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block); + while(!vector_block_iterator_end(task_block, iter)){ + void* p_ref_or_obj = (void *)*iter; + assert((collect_kind!=MAJOR_COLLECTION && *(Partial_Reveal_Object **)p_ref_or_obj) + || (collect_kind==MAJOR_COLLECTION && p_ref_or_obj)); + trace_object(collector, p_ref_or_obj); + iter = vector_block_iterator_advance(task_block, iter); + } + vector_stack_clear(task_block); + pool_put_entry(metadata->free_task_pool, task_block); + task_block = pool_get_entry(metadata->mark_task_pool); + } + + task_block = (Vector_Block*)collector->trace_stack; + vector_stack_clear(task_block); + pool_put_entry(metadata->free_task_pool, task_block); + collector->trace_stack = NULL; +} + +static void resurrect_finalizable_objects(Collector *collector) +{ + GC *gc = collector->gc; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; + Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; + unsigned int collect_kind = gc->collect_kind; + if(!finalizable_obj_pool_is_empty(gc)){ - collector_reset_repset(collector); + finref_reset_repset(gc); pool_iterator_init(finalizable_obj_pool); while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){ - unsigned int *iter = vector_block_iterator_init(block); - while(!vector_block_iterator_end(block, iter)){ - assert(*iter); - resurrect_obj_tree(collector, (Partial_Reveal_Object **)iter); - iter = vector_block_iterator_advance(block, iter); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; + assert(p_obj); + + /* + * In major & fallback collection we need record p_ref of the root dead obj to update it later. + * Because it is outside heap, we can't update in ref fixing. + * In minor collection p_ref of the root dead obj is automatically updated while tracing. + */ + if(collect_kind != MINOR_COLLECTION) + finref_repset_add_entry(gc, p_ref); + + /* Perhaps obj has been resurrected by previous resurrections */ + if(!gc_obj_is_dead(gc, p_obj)){ + if(gc->collect_kind == MINOR_COLLECTION && obj_need_move(gc, p_obj)) + *p_ref = obj_get_fw_in_oi(p_obj); + continue; + } + + resurrect_obj_tree(collector, p_ref); } } metadata->pending_finalizers = TRUE; - collector_put_repset(collector); + finref_put_repset(gc); } - finref_add_repset_from_pool(collector, obj_with_fin_pool); + finref_add_repset_from_pool(gc, obj_with_fin_pool); /* fianlizable objects have been added to collector repset pool */ //finref_add_repset_from_pool(collector, finalizable_obj_pool); } -static void put_finalizable_obj_to_vm(GC *gc) +static void identify_dead_refs(GC *gc, Pool *pool) { - Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool; - Pool *free_pool = gc->finref_metadata->free_pool; - - while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ - unsigned int *iter = vector_block_iterator_init(block); - while(!vector_block_iterator_end(block, iter)){ - assert(*iter); - Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; - vm_finalize_object(p_obj); - iter = vector_block_iterator_advance(block, iter); - } - vector_block_clear(block); - pool_put_entry(free_pool, block); - } -} - -static void update_referent_ignore_finref(Collector *collector, Pool *pool) -{ - GC *gc = collector->gc; - - while(Vector_Block *block = pool_get_entry(pool)){ - unsigned int *iter = vector_block_iterator_init(block); + finref_reset_repset(gc); + pool_iterator_init(pool); + while(Vector_Block *block = pool_iterator_next(pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; Partial_Reveal_Object *p_obj = *p_ref; assert(p_obj); Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = *p_referent_field; if(!p_referent){ // referent field has been cleared *p_ref = NULL; continue; } - if(!obj_is_dead(collector, p_referent)){ // referent is alive - if(obj_need_move(collector, p_referent)) + if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive + if(obj_need_move(gc, p_referent)) finref_repset_add_entry(gc, p_referent_field); *p_ref = NULL; continue; } - *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */ + *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ } } -} - -void update_ref_ignore_finref(Collector *collector) -{ - GC *gc = collector->gc; - Finref_Metadata *metadata = gc->finref_metadata; - - finref_reset_repset(gc); - update_referent_ignore_finref(collector, metadata->softref_pool); - update_referent_ignore_finref(collector, metadata->weakref_pool); - update_referent_ignore_finref(collector, metadata->phanref_pool); finref_put_repset(gc); + + finref_add_repset_from_pool(gc, pool); } static void identify_dead_softrefs(Collector *collector) @@ -318,75 +286,22 @@ return; } - Finref_Metadata *metadata = gc->finref_metadata; - Pool *softref_pool = metadata->softref_pool; - - finref_reset_repset(gc); - pool_iterator_init(softref_pool); - while(Vector_Block *block = pool_iterator_next(softref_pool)){ - unsigned int *iter = vector_block_iterator_init(block); - for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; - assert(p_obj); - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); - Partial_Reveal_Object *p_referent = *p_referent_field; - - if(!p_referent){ // referent field has been cleared - *p_ref = NULL; - continue; - } - if(!obj_is_dead(collector, p_referent)){ // referent is alive - if(obj_need_move(collector, p_referent)) - finref_repset_add_entry(gc, p_referent_field); - *p_ref = NULL; - continue; - } - *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */ - } - } - finref_put_repset(gc); - - finref_add_repset_from_pool(collector, softref_pool); - return; + Pool *softref_pool = gc->finref_metadata->softref_pool; + identify_dead_refs(gc, softref_pool); } static void identify_dead_weakrefs(Collector *collector) { GC *gc = collector->gc; - Finref_Metadata *metadata = gc->finref_metadata; - Pool *weakref_pool = metadata->weakref_pool; + Pool *weakref_pool = gc->finref_metadata->weakref_pool; - finref_reset_repset(gc); - pool_iterator_init(weakref_pool); - while(Vector_Block *block = pool_iterator_next(weakref_pool)){ - unsigned int *iter = vector_block_iterator_init(block); - for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; - assert(p_obj); - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); - Partial_Reveal_Object *p_referent = *p_referent_field; - - if(!p_referent){ // referent field has been cleared - *p_ref = NULL; - continue; - } - if(!obj_is_dead(collector, p_referent)){ // referent is alive - if(obj_need_move(collector, p_referent)) - finref_repset_add_entry(gc, p_referent_field); - *p_ref = NULL; - continue; - } - *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ - } - } - finref_put_repset(gc); - - finref_add_repset_from_pool(collector, weakref_pool); - return; + identify_dead_refs(gc, weakref_pool); } +/* + * The reason why we don't use identify_dead_refs() to implement this function is + * that we will differentiate phanref from softref & weakref in the future. + */ static void identify_dead_phanrefs(Collector *collector) { GC *gc = collector->gc; @@ -397,20 +312,21 @@ // collector_reset_repset(collector); pool_iterator_init(phanref_pool); while(Vector_Block *block = pool_iterator_next(phanref_pool)){ - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; Partial_Reveal_Object *p_obj = *p_ref; assert(p_obj); Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = *p_referent_field; if(!p_referent){ // referent field has been cleared *p_ref = NULL; continue; } - if(!obj_is_dead(collector, p_referent)){ // referent is alive - if(obj_need_move(collector, p_referent)) + if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive + if(obj_need_move(gc, p_referent)) finref_repset_add_entry(gc, p_referent_field); *p_ref = NULL; continue; @@ -428,16 +344,33 @@ // collector_put_repset(collector); finref_put_repset(gc); - finref_add_repset_from_pool(collector, phanref_pool); - return; + finref_add_repset_from_pool(gc, phanref_pool); } -static inline void put_dead_refs_to_vm(GC *gc, Pool *reference_pool) +static void put_finalizable_obj_to_vm(GC *gc) { + Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool; Pool *free_pool = gc->finref_metadata->free_pool; + while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + while(!vector_block_iterator_end(block, iter)){ + assert(*iter); + Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; + vm_finalize_object(p_obj); + iter = vector_block_iterator_advance(block, iter); + } + vector_block_clear(block); + pool_put_entry(free_pool, block); + } +} + +static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *reference_pool) +{ + Pool *free_pool = gc->finref_metadata->free_pool; + while(Vector_Block *block = pool_get_entry(reference_pool)){ - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; if(p_obj) @@ -449,7 +382,7 @@ } } -static void put_dead_weak_refs_to_vm(GC *gc) +static void put_dead_refs_to_vm(GC *gc) { if(softref_pool_is_empty(gc) && weakref_pool_is_empty(gc) @@ -465,9 +398,9 @@ Pool *phanref_pool = gc->finref_metadata->phanref_pool; Pool *free_pool = gc->finref_metadata->free_pool; - put_dead_refs_to_vm(gc, softref_pool); - put_dead_refs_to_vm(gc, weakref_pool); - put_dead_refs_to_vm(gc, phanref_pool); + put_dead_weak_refs_to_vm(gc, softref_pool); + put_dead_weak_refs_to_vm(gc, weakref_pool); + put_dead_weak_refs_to_vm(gc, phanref_pool); } void collector_identify_finref(Collector *collector) @@ -478,12 +411,13 @@ identify_dead_softrefs(collector); identify_dead_weakrefs(collector); identify_finalizable_objects(collector); + resurrect_finalizable_objects(collector); identify_dead_phanrefs(collector); } void gc_put_finref_to_vm(GC *gc) { - put_dead_weak_refs_to_vm(gc); + put_dead_refs_to_vm(gc); put_finalizable_obj_to_vm(gc); } @@ -502,7 +436,7 @@ gc_set_obj_with_fin(gc); unlock(gc->mutator_list_lock); while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; if(p_obj) @@ -515,33 +449,90 @@ vm_gc_unlock_enum(); } -void gc_update_finref_repointed_refs(GC* gc) +static void update_referent_field_ignore_finref(GC *gc, Pool *pool) { + while(Vector_Block *block = pool_get_entry(pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; + assert(p_obj); + Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + fallback_update_fw_ref(p_referent_field); + Partial_Reveal_Object *p_referent = *p_referent_field; + + if(!p_referent){ // referent field has been cleared + *p_ref = NULL; + continue; + } + if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive + if(obj_need_move(gc, p_referent)) + finref_repset_add_entry(gc, p_referent_field); + *p_ref = NULL; + continue; + } + *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ + } + } +} + +void gc_update_weakref_ignore_finref(GC *gc) +{ + Finref_Metadata *metadata = gc->finref_metadata; + + finref_reset_repset(gc); + update_referent_field_ignore_finref(gc, metadata->softref_pool); + update_referent_field_ignore_finref(gc, metadata->weakref_pool); + update_referent_field_ignore_finref(gc, metadata->phanref_pool); + finref_put_repset(gc); +} + +static void move_compaction_update_referent_field(GC *gc, Partial_Reveal_Object **p_referent_field) +{ + if(!address_belongs_to_gc_heap((void *)p_referent_field, gc)){ + *p_referent_field = obj_get_fw_in_table(*p_referent_field); + return; + } + + Space *ref_space = space_of_addr(gc, p_referent_field); + if(ref_space->move_object){ + unsigned int offset = get_gc_referent_offset(); + Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_referent_field - offset); + Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref); + p_referent_field = (Partial_Reveal_Object **)((POINTER_SIZE_INT)p_new_ref + offset); + } + assert(space_of_addr(gc, *p_referent_field)->move_object); + *p_referent_field = obj_get_fw_in_table(*p_referent_field); +} + +extern Boolean IS_MOVE_COMPACT; + +void gc_update_finref_repointed_refs(GC *gc) +{ + unsigned int collect_kind = gc->collect_kind; Finref_Metadata* metadata = gc->finref_metadata; Pool *repset_pool = metadata->repset_pool; /* NOTE:: this is destructive to the root sets. */ - Vector_Block* root_set = pool_get_entry(repset_pool); + Vector_Block* repset = pool_get_entry(repset_pool); - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object* p_obj = *p_ref; - /* For repset, this check is unnecessary, since all slots are repointed; otherwise - they will not be recorded. For root set, it is possible to point to LOS or other - non-moved space. */ - Partial_Reveal_Object* p_target_obj; - assert(obj_is_fw_in_oi(p_obj)); - p_target_obj = obj_get_fw_in_oi(p_obj); - - *p_ref = p_target_obj; + while(repset){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(repset); + for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object** )*iter; + Partial_Reveal_Object *p_obj = *p_ref; + + if(!IS_MOVE_COMPACT){ + assert(obj_is_fw_in_oi(p_obj)); + assert(collect_kind == MINOR_COLLECTION || obj_is_marked_in_vt(p_obj)); + *p_ref = obj_get_fw_in_oi(p_obj); + } else { + move_compaction_update_referent_field(gc, p_ref); + } } - vector_block_clear(root_set); - pool_put_entry(metadata->free_pool, root_set); - root_set = pool_get_entry(repset_pool); + vector_block_clear(repset); + pool_put_entry(metadata->free_pool, repset); + repset = pool_get_entry(repset_pool); } return; @@ -552,7 +543,7 @@ Finref_Metadata* metadata = gc->finref_metadata; if(metadata->pending_finalizers || metadata->pending_weakrefs){ - metadata->pending_finalizers = FALSE; + metadata->pending_finalizers = FALSE; metadata->pending_weakrefs = FALSE; vm_hint_finalize(); } Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 499692) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -28,7 +28,7 @@ extern Boolean IGNORE_FINREF; -/* Phantom status: for future use +/* Phanref status: for future use * #define PHANTOM_REF_ENQUEUE_STATUS_MASK 0x3 * #define PHANTOM_REF_ENQUEUED_MASK 0x1 * #define PHANTOM_REF_PENDING_MASK 0x2 @@ -82,13 +82,7 @@ } } - -extern void mutator_reset_obj_with_fin(Mutator *mutator); -extern void gc_set_obj_with_fin(GC *gc); -extern void collector_reset_weakref_sets(Collector *collector); - -extern void gc_set_weakref_sets(GC *gc); -extern void update_ref_ignore_finref(Collector *collector); +extern void gc_update_weakref_ignore_finref(GC *gc); extern void collector_identify_finref(Collector *collector); extern void gc_put_finref_to_vm(GC *gc); extern void put_all_fin_on_exit(GC *gc); Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 499692) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -22,14 +22,16 @@ #include "../thread/mutator.h" #include "../thread/collector.h" -#define POOL_SEGMENT_SIZE_BIT_SHIFT 20 -#define POOL_SEGMENT_SIZE_BYTES (1 << POOL_SEGMENT_SIZE_BIT_SHIFT) +#define FINREF_METADATA_SEG_SIZE_BIT_SHIFT 20 +#define FINREF_METADATA_SEG_SIZE_BYTES (1 << FINREF_METADATA_SEG_SIZE_BIT_SHIFT) -#define METADATA_BLOCK_SIZE_BIT_SHIFT 10 -#define METADATA_BLOCK_SIZE_BYTES (1<> METADATA_BLOCK_SIZE_BIT_SHIFT; + unsigned int num_blocks = FINREF_METADATA_SEG_SIZE_BYTES >> FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT; for(unsigned int i=0; irepset = NULL; for(unsigned int i=0; inum_alloc_segs; i++){ - assert(metadata->pool_segments[i]); - STD_FREE(metadata->pool_segments[i]); + assert(metadata->segments[i]); + STD_FREE(metadata->segments[i]); } gc->finref_metadata = NULL; @@ -112,6 +117,107 @@ return; } + +/* called when there is no Vector_Block in finref_metadata->free_pool + * extend the pool by a segment + */ +Vector_Block *finref_metadata_extend(void) +{ + Finref_Metadata *metadata = &finref_metadata; + lock(metadata->alloc_lock); + Vector_Block* block = pool_get_entry(metadata->free_pool); + if( block ){ + unlock(metadata->alloc_lock); + return block; + } + + unsigned int num_alloced = metadata->num_alloc_segs; + if(num_alloced == FINREF_METADATA_SEGMENT_NUM){ + printf("Run out Finref metadata, please give it more segments!\n"); + exit(0); + } + + unsigned int seg_size = FINREF_METADATA_SEG_SIZE_BYTES + FINREF_METADATA_BLOCK_SIZE_BYTES; + void *new_segment = STD_MALLOC(seg_size); + memset(new_segment, 0, seg_size); + metadata->segments[num_alloced] = new_segment; + new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, FINREF_METADATA_BLOCK_SIZE_BYTES); + metadata->num_alloc_segs++; + + unsigned int num_blocks = FINREF_METADATA_SEG_SIZE_BYTES >> FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT; + for(unsigned int i=0; ifree_pool, (void *)block); + } + + block = pool_get_entry(metadata->free_pool); + unlock(metadata->alloc_lock); + return block; +} + +/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata + * shrink the free pool by half + */ +static void finref_metadata_shrink(GC *gc) +{ +} + + +/* reset obj_with_fin vector block of each mutator */ +static void gc_reset_obj_with_fin(GC *gc) +{ + Mutator *mutator = gc->mutator_list; + while(mutator){ + assert(!mutator->obj_with_fin); + mutator->obj_with_fin = finref_get_free_block(gc); + mutator = mutator->next; + } +} + +/* put back last obj_with_fin block of each mutator */ +void gc_set_obj_with_fin(GC *gc) +{ + Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool; + + Mutator *mutator = gc->mutator_list; + while(mutator){ + pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin); + mutator->obj_with_fin = NULL; + mutator = mutator->next; + } +} + +/* reset weak references vetctor block of each collector */ +void collector_reset_weakref_sets(Collector *collector) +{ + GC *gc = collector->gc; + + collector->softref_set = finref_get_free_block(gc); + collector->weakref_set = finref_get_free_block(gc); + collector->phanref_set= finref_get_free_block(gc); +} + +/* put back last weak references block of each collector */ +void gc_set_weakref_sets(GC *gc) +{ + Finref_Metadata *metadata = gc->finref_metadata; + + unsigned int num_active_collectors = gc->num_active_collectors; + for(unsigned int i = 0; i < num_active_collectors; i++) + { + Collector* collector = gc->collectors[i]; + pool_put_entry(metadata->softref_pool, collector->softref_set); + pool_put_entry(metadata->weakref_pool, collector->weakref_set); + pool_put_entry(metadata->phanref_pool, collector->phanref_set); + collector->softref_set = NULL; + collector->weakref_set= NULL; + collector->phanref_set= NULL; + } + return; +} + void gc_reset_finref_metadata(GC *gc) { Finref_Metadata *metadata = gc->finref_metadata; @@ -127,7 +233,7 @@ assert(metadata->repset == NULL); while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); if(vector_block_iterator_end(block, iter)){ vector_block_clear(block); pool_put_entry(metadata->free_pool, block); @@ -138,100 +244,54 @@ assert(pool_is_empty(obj_with_fin_pool)); metadata->obj_with_fin_pool = finalizable_obj_pool; metadata->finalizable_obj_pool = obj_with_fin_pool; -} - -/* called when there is no Vector_Block in finref_metadata->free_pool - * extend the pool by a pool segment - */ -static void finref_metadata_extend(void) -{ - Finref_Metadata *metadata = &finref_metadata; - unsigned int pos = metadata->num_alloc_segs; - while(pos < POOL_SEGMENT_NUM){ - unsigned int next_pos = pos + 1; - unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata->num_alloc_segs, next_pos, pos); - if(temp == pos) - break; - pos = metadata->num_alloc_segs; - } - if(pos > POOL_SEGMENT_NUM) - return; - - void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES); - memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES); - metadata->pool_segments[pos] = pool_segment; - - unsigned int num_blocks = POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT; - for(unsigned int i=0; ifree_pool, (void *)block); - } - - return; + gc_reset_obj_with_fin(gc); } -Vector_Block *finref_get_free_block(void) -{ - Vector_Block *block; - - while(!(block = pool_get_entry(finref_metadata.free_pool))) - finref_metadata_extend(); - return block; -} -/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata - * shrink the free pool by half - */ -void finref_metadata_shrink(GC *gc) +static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref) { -} - -static inline void finref_metadata_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref) -{ assert(vector_block_in_use); assert(ref); Vector_Block* block = vector_block_in_use; - vector_block_add_entry(block, (unsigned int)ref); + vector_block_add_entry(block, (POINTER_SIZE_INT)ref); if(!vector_block_is_full(block)) return; pool_put_entry(pool, block); - vector_block_in_use = finref_get_free_block(); + vector_block_in_use = finref_get_free_block(gc); } void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref); + finref_metadata_add_entry(mutator->gc, mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref); } void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref); + finref_metadata_add_entry(gc, finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref); } void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->softref_set, finref_metadata.softref_pool, ref); + finref_metadata_add_entry(collector->gc, collector->softref_set, finref_metadata.softref_pool, ref); } void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->weakref_set, finref_metadata.weakref_pool, ref); + finref_metadata_add_entry(collector->gc, collector->weakref_set, finref_metadata.weakref_pool, ref); } void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->phanref_set, finref_metadata.phanref_pool, ref); + finref_metadata_add_entry(collector->gc, collector->phanref_set, finref_metadata.phanref_pool, ref); } void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref) { assert(*p_ref); - finref_metadata_add_entry(finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref); + finref_metadata_add_entry(gc, finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref); } static inline Boolean pool_has_no_ref(Pool *pool) @@ -240,7 +300,7 @@ return TRUE; pool_iterator_init(pool); while(Vector_Block *block = pool_iterator_next(pool)){ - unsigned int *iter = vector_block_iterator_init(block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ if(*iter) return FALSE; Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (revision 499692) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (working copy) @@ -25,31 +25,32 @@ #include "../utils/vector_block.h" #include "../utils/sync_pool.h" -#define POOL_SEGMENT_NUM 256 +#define FINREF_METADATA_SEGMENT_NUM 256 typedef struct Finref_Metadata{ - void *pool_segments[POOL_SEGMENT_NUM]; // malloced free pool segments' addresses array - unsigned int num_alloc_segs; // next available position in pool_segments array + void *segments[FINREF_METADATA_SEGMENT_NUM]; // malloced free pool segments' addresses array + unsigned int num_alloc_segs; // allocated segment number + SpinLock alloc_lock; // thread must hold this lock when allocating new segment - Pool *free_pool; // list of free buffers for the five pools below + Pool *free_pool; // list of free buffers for the five pools below - Pool *obj_with_fin_pool; // list of objects that have finalizer; - // these objects are added in when they are allocated - Pool *finalizable_obj_pool; // temporary buffer for finalizable objects identified during one single GC + Pool *obj_with_fin_pool; // list of objects that have finalizer; + // these objects are added in when they are allocated + Pool *finalizable_obj_pool; // temporary buffer for finalizable objects identified during one single GC - Pool *softref_pool; // temporary buffer for soft references identified during one single GC - Pool *weakref_pool; // temporary buffer for weak references identified during one single GC - Pool *phanref_pool; // temporary buffer for phantom references identified during one single GC + Pool *softref_pool; // temporary buffer for soft references identified during one single GC + Pool *weakref_pool; // temporary buffer for weak references identified during one single GC + Pool *phanref_pool; // temporary buffer for phantom references identified during one single GC - Pool *repset_pool; // repointed reference slot sets + Pool *repset_pool; // repointed reference slot sets - Vector_Block *finalizable_obj_set; // buffer for finalizable_objects_pool - Vector_Block *repset; // buffer for repset_pool + Vector_Block *finalizable_obj_set; // buffer for finalizable_objects_pool + Vector_Block *repset; // buffer for repset_pool - Boolean pending_finalizers; // there are objects waiting to be finalized - Boolean pending_weakrefs; // there are weak references waiting to be enqueued + Boolean pending_finalizers; // there are objects waiting to be finalized + Boolean pending_weakrefs; // there are weak references waiting to be enqueued - unsigned int gc_referent_offset; // the referent field's offset in Reference Class + unsigned int gc_referent_offset; // the referent field's offset in Reference Class }Finref_Metadata; extern unsigned int get_gc_referent_offset(void); @@ -58,8 +59,11 @@ extern void gc_finref_metadata_initialize(GC *gc); extern void gc_finref_metadata_destruct(GC *gc); extern void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc); + +extern void gc_set_obj_with_fin(GC *gc); +extern void collector_reset_weakref_sets(Collector *collector); +extern void gc_set_weakref_sets(GC *gc); extern void gc_reset_finref_metadata(GC *gc); -extern Vector_Block *finref_get_free_block(void); extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref); extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref); @@ -77,7 +81,19 @@ extern void gc_clear_weakref_pools(GC *gc); +extern Vector_Block *finref_metadata_extend(void); +inline Vector_Block *finref_get_free_block(GC *gc) +{ + Vector_Block *block = pool_get_entry(gc->finref_metadata->free_pool); + + while(!block) + block = finref_metadata_extend(); + + assert(vector_block_is_empty(block)); + return block; +} + /* called before loop of recording finalizable objects */ inline void gc_reset_finalizable_objects(GC *gc) { Index: vm/gc_gen/src/gen/gen.cpp =================================================================== --- vm/gc_gen/src/gen/gen.cpp (revision 499692) +++ vm/gc_gen/src/gen/gen.cpp (working copy) @@ -28,13 +28,19 @@ /* fspace size limit is not interesting. only for manual tuning purpose */ unsigned int min_nos_size_bytes = 16 * MB; unsigned int max_nos_size_bytes = 256 * MB; +unsigned int min_los_size_bytes = 4*MB; unsigned int NOS_SIZE = 0; +unsigned int MIN_LOS_SIZE = 0; unsigned int MIN_NOS_SIZE = 0; unsigned int MAX_NOS_SIZE = 0; static unsigned int MINOR_ALGO = 0; static unsigned int MAJOR_ALGO = 0; +Boolean GEN_NONGEN_SWITCH = FALSE; + +Boolean JVMTI_HEAP_ITERATION = false; + #ifndef STATIC_NOS_MAPPING void* nos_boundary; #endif @@ -45,29 +51,35 @@ { gc_gen->_machine_page_size_bytes = port_vmem_page_sizes()[0]; gc_gen->_num_processors = port_CPUs_number(); + gc_gen->_system_alloc_unit = vm_get_system_alloc_unit(); + SPACE_ALLOC_UNIT = max(gc_gen->_system_alloc_unit, GC_BLOCK_SIZE_BYTES); } +void* alloc_large_pages(size_t size, const char* hint); + void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size) { assert(gc_gen); + gc_gen_get_system_info(gc_gen); - /*Give GC a hint of gc survive ratio.*/ - gc_gen->survive_ratio = 0.2f; - - /*fixme: max_heap_size should not beyond 448 MB*/ max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT); min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT); assert(max_heap_size <= max_heap_size_bytes); - assert(max_heap_size > min_heap_size_bytes); + assert(max_heap_size >= min_heap_size_bytes); - gc_gen_get_system_info(gc_gen); min_nos_size_bytes *= gc_gen->_num_processors; + + unsigned int min_nos_size_threshold = max_heap_size>>5; + if(min_nos_size_bytes > min_nos_size_threshold){ + min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT); + } if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; unsigned int los_size = max_heap_size >> 7; - if(los_size < GC_MIN_LOS_SIZE) - los_size = GC_MIN_LOS_SIZE; + if(MIN_LOS_SIZE) min_los_size_bytes = MIN_LOS_SIZE; + if(los_size < min_los_size_bytes ) + los_size = min_los_size_bytes ; los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT); @@ -78,6 +90,8 @@ unsigned int mos_reserve_size, mos_commit_size; unsigned int los_mos_size; + /*Give GC a hint of gc survive ratio.*/ + gc_gen->survive_ratio = 0.2f; if(NOS_SIZE){ los_mos_size = max_heap_size - NOS_SIZE; @@ -103,20 +117,23 @@ #ifdef STATIC_NOS_MAPPING - assert((unsigned int)nos_boundary%SPACE_ALLOC_UNIT == 0); + //FIXME: no large page support in static nos mapping + assert(large_page_hint==NULL); + + assert((POINTER_SIZE_INT)nos_boundary%SPACE_ALLOC_UNIT == 0); nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size); if( nos_base != nos_boundary ){ printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size); printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n"); exit(0); } - reserved_end = (void*)((unsigned int)nos_base + nos_reserve_size); + reserved_end = (void*)((POINTER_SIZE_INT)nos_base + nos_reserve_size); - void* los_mos_base = (void*)((unsigned int)nos_base - los_mos_size); - assert(!((unsigned int)los_mos_base%SPACE_ALLOC_UNIT)); + void* los_mos_base = (void*)((POINTER_SIZE_INT)nos_base - los_mos_size); + assert(!((POINTER_SIZE_INT)los_mos_base%SPACE_ALLOC_UNIT)); reserved_base = vm_reserve_mem(los_mos_base, los_mos_size); while( !reserved_base || reserved_base >= nos_base){ - los_mos_base = (void*)((unsigned int)los_mos_base - SPACE_ALLOC_UNIT); + los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT); if(los_mos_base < RESERVE_BOTTOM){ printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size); exit(0); @@ -126,15 +143,31 @@ #else /* STATIC_NOS_MAPPING */ - reserved_base = vm_reserve_mem(0, max_heap_size); - while( !reserved_base ){ - printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size); - exit(0); + reserved_base = NULL; + if(large_page_hint){ + reserved_base = alloc_large_pages(max_heap_size, large_page_hint); + if(reserved_base == NULL) { + free(large_page_hint); + large_page_hint = NULL; + printf("GC use small pages.\n"); + } } - reserved_end = (void*)((unsigned int)reserved_base + max_heap_size); + + if(reserved_base==NULL){ + reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT); + reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT); + assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0); + + while( !reserved_base ){ + printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size); + exit(0); + } + } + + reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size); /* compute first time nos_boundary */ - nos_base = (void*)((unsigned int)reserved_base + mos_commit_size + los_size); + nos_base = (void*)((POINTER_SIZE_INT)reserved_base + mos_commit_size + los_size); /* init nos_boundary if NOS is not statically mapped */ nos_boundary = nos_base; @@ -147,10 +180,11 @@ gc_gen->num_collections = 0; gc_gen->time_collections = 0; gc_gen->force_major_collect = FALSE; + gc_gen->force_gen_mode = FALSE; gc_los_initialize(gc_gen, reserved_base, los_size); - reserved_base = (void*)((unsigned int)reserved_base + los_size); + reserved_base = (void*)((POINTER_SIZE_INT)reserved_base + los_size); gc_mos_initialize(gc_gen, reserved_base, mos_reserve_size, mos_commit_size); gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); @@ -166,9 +200,11 @@ mos->collect_algorithm = MAJOR_ALGO; /*Give GC a hint of space survive ratio.*/ - nos->survive_ratio = gc_gen->survive_ratio; - mos->survive_ratio = gc_gen->survive_ratio; +// nos->survive_ratio = gc_gen->survive_ratio; +// mos->survive_ratio = gc_gen->survive_ratio; gc_space_tuner_initialize((GC*)gc_gen); + + gc_gen_mode_adapt_init(gc_gen); gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) + space_committed_size((Space*)gc_gen->mos) + @@ -183,6 +219,14 @@ void gc_gen_destruct(GC_Gen *gc_gen) { + Space* nos = (Space*)gc_gen->nos; + Space* mos = (Space*)gc_gen->mos; + Space* los = (Space*)gc_gen->los; + + vm_unmap_mem(nos->heap_start, space_committed_size(nos)); + vm_unmap_mem(mos->heap_start, space_committed_size(mos)); + vm_unmap_mem(los->heap_start, space_committed_size(los)); + gc_nos_destruct(gc_gen); gc_gen->nos = NULL; @@ -192,14 +236,6 @@ gc_los_destruct(gc_gen); gc_gen->los = NULL; - Space* nos = (Space*)gc_gen->nos; - Space* mos = (Space*)gc_gen->mos; - Space* los = (Space*)gc_gen->los; - - vm_unmap_mem(nos->heap_start, space_committed_size(nos)); - vm_unmap_mem(mos->heap_start, space_committed_size(mos)); - vm_unmap_mem(los->heap_start, space_committed_size(los)); - return; } @@ -259,17 +295,17 @@ } if(!major_algo){ - MAJOR_ALGO= MAJOR_COMPACT_SLIDE; + MAJOR_ALGO= MAJOR_COMPACT_MOVE; }else{ string_to_upper(major_algo); if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){ MAJOR_ALGO= MAJOR_COMPACT_SLIDE; - + }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){ MAJOR_ALGO= MAJOR_COMPACT_MOVE; - + }else{ printf("\nGC algorithm setting incorrect. Will use default algorithm.\n"); @@ -286,8 +322,6 @@ { if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); - int64 start_time = time_now(); - Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; mspace->num_used_blocks = mspace->free_block_idx - mspace->first_block_idx; @@ -339,13 +373,63 @@ exit(0); } - int64 pause_time = time_now() - start_time; - - gc->time_collections += pause_time; - if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); - gc_gen_adapt(gc, pause_time); - return; } + +void gc_gen_iterate_heap(GC_Gen *gc) +{ + /** the function is called after stoped the world **/ + Mutator *mutator = gc->mutator_list; + bool cont = true; + while (mutator) { + Block_Header* block = (Block_Header*)mutator->alloc_block; + if(block != NULL) block->free = mutator->free; + mutator = mutator->next; + } + + Mspace* mspace = gc->mos; + Block_Header *curr_block = (Block_Header*)mspace->blocks; + Block_Header *space_end = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + while(curr_block < space_end) { + POINTER_SIZE_INT p_obj = (POINTER_SIZE_INT)curr_block->base; + POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free; + while(p_obj < block_end){ + cont = vm_iterate_object((Managed_Object_Handle)p_obj); + if (!cont) return; + p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj); + } + curr_block = curr_block->next; + if(curr_block == NULL) break; + } + + Fspace* fspace = gc->nos; + curr_block = (Block_Header*)fspace->blocks; + space_end = (Block_Header*)&fspace->blocks[fspace->free_block_idx - fspace->first_block_idx]; + while(curr_block < space_end) { + POINTER_SIZE_INT p_obj = (POINTER_SIZE_INT)curr_block->base; + POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free; + while(p_obj < block_end){ + cont = vm_iterate_object((Managed_Object_Handle)p_obj); + if (!cont) return; + p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj); + } + curr_block = curr_block->next; + if(curr_block == NULL) break; + } + + Lspace* lspace = gc->los; + POINTER_SIZE_INT lspace_obj = (POINTER_SIZE_INT)lspace->heap_start; + POINTER_SIZE_INT lspace_end = (POINTER_SIZE_INT)lspace->heap_end; + while (lspace_obj < lspace_end) { + if(!*((unsigned int *)lspace_obj)){ + lspace_obj = lspace_obj + ((Free_Area*)lspace_obj)->size; + }else{ + cont = vm_iterate_object((Managed_Object_Handle)lspace_obj); + if (!cont) return; + unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object *)lspace_obj)); + lspace_obj = lspace_obj + obj_size; + } + } +} Index: vm/gc_gen/src/gen/gen.h =================================================================== --- vm/gc_gen/src/gen/gen.h (revision 499692) +++ vm/gc_gen/src/gen/gen.h (working copy) @@ -30,8 +30,6 @@ #include "../mark_sweep/lspace.h" #include "../finalizer_weakref/finalizer_weakref_metadata.h" -#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT) - enum Write_Barrier_Kind{ WRITE_BARRIER_NIL, WRITE_BARRIER_SLOT, @@ -49,6 +47,8 @@ extern unsigned int min_nos_size_bytes; extern unsigned int max_nos_size_bytes; +struct Gen_Mode_Adaptor; + typedef struct GC_Gen { /* <-- First couple of fields overloaded as GC */ void* heap_start; @@ -75,6 +75,7 @@ unsigned int collect_kind; /* MAJOR or MINOR */ unsigned int last_collect_kind; + unsigned int cause;/*GC_CAUSE_LOS_IS_FULL, GC_CAUSE_NOS_IS_FULL, or GC_CAUSE_RUNTIME_FORCE_GC*/ Boolean collect_result; /* succeed or fail */ Boolean generate_barrier; @@ -92,8 +93,11 @@ Lspace *los; Boolean force_major_collect; + Gen_Mode_Adaptor* gen_mode_adaptor; + Boolean force_gen_mode; /* system info */ + unsigned int _system_alloc_unit; unsigned int _machine_page_size_bytes; unsigned int _num_processors; @@ -160,5 +164,11 @@ void gc_gen_reclaim_heap(GC_Gen* gc); +void gc_gen_mode_adapt_init(GC_Gen *gc); + +void gc_gen_iterate_heap(GC_Gen *gc); + +extern Boolean GEN_NONGEN_SWITCH ; + #endif /* ifndef _GC_GEN_H_ */ Index: vm/gc_gen/src/gen/gen_adapt.cpp =================================================================== --- vm/gc_gen/src/gen/gen_adapt.cpp (revision 499692) +++ vm/gc_gen/src/gen/gen_adapt.cpp (working copy) @@ -19,15 +19,52 @@ */ #include "gen.h" +#include "../common/space_tuner.h" +#include #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<5) +/*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/ +#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB) +/*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/ +//#define NOS_SURVIVE_RATIO_SENSITIVE -#include +struct Mspace; +void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold); static float Tslow = 0.0f; -static unsigned int SMax = 0; -static unsigned int last_total_free_size = 0; +static POINTER_SIZE_INT SMax = 0; +static POINTER_SIZE_INT last_total_free_size = 0; +typedef struct Gen_Mode_Adaptor{ + float gen_minor_throughput; + float nongen_minor_throughput; + + /*for obtaining the gen minor collection throughput.*/ + int gen_mode_trial_count; + + float major_survive_ratio_threshold; + unsigned int major_repeat_count; + + POINTER_SIZE_INT adapt_nos_size; +}Gen_Mode_Adaptor; + +void gc_gen_mode_adapt_init(GC_Gen *gc) +{ + gc->gen_mode_adaptor = (Gen_Mode_Adaptor*)STD_MALLOC( sizeof(Gen_Mode_Adaptor)); + Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor; + + gen_mode_adaptor->gen_minor_throughput = 0.0f; + /*reset the nongen_minor_throughput: the first default nongen minor (maybe testgc)may caused the result + calculated to be zero. so we initial the value to 1.0f here. */ + gen_mode_adaptor->nongen_minor_throughput = 1.0f; + gen_mode_adaptor->gen_mode_trial_count = 0; + + gen_mode_adaptor->major_survive_ratio_threshold = 1.0f; + gen_mode_adaptor->major_repeat_count = 1; + + gen_mode_adaptor->adapt_nos_size = min_nos_size_bytes; +} + static float mini_free_ratio(float k, float m) { /*fixme: the check should be proved!*/ @@ -53,84 +90,199 @@ return res; } -#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (4*1024*1024) -static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) +#define MAX_MAJOR_REPEAT_COUNT 3 +#define MAX_MINOR_TRIAL_COUNT 2 +#define MAX_INT32 0x7fffffff + +void gc_gen_mode_adapt(GC_Gen* gc, int64 pause_time) { + if(GEN_NONGEN_SWITCH == FALSE) return; + Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; + Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor; - float survive_ratio = 0; - - unsigned int mos_free_size = space_free_memory_size(mspace); - unsigned int nos_free_size = space_free_memory_size(fspace); - unsigned int total_free_size = mos_free_size + nos_free_size; + POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); + POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); + POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; - if(gc->collect_kind != MINOR_COLLECTION) - { - mspace->time_collections += pause_time; - - Tslow = (float)pause_time; - SMax = total_free_size; - gc->force_major_collect = FALSE; + if(gc->collect_kind != MINOR_COLLECTION) { + assert(!gc_is_gen_mode()); - unsigned int major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; - survive_ratio = (float)major_survive_size/(float)gc_gen_total_memory_size(gc); - mspace->survive_ratio = survive_ratio; - + if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mspace->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){ + if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){ + gc->force_gen_mode = TRUE; + gc_enable_gen_mode(); + gc->force_major_collect = FALSE; + return; + }else{ + gen_mode_adaptor->major_repeat_count++; + } + }else{ + gen_mode_adaptor->major_repeat_count = 1; + } + }else{ - /*Give a hint to mini_free_ratio. */ - if(gc->num_collections == 1){ - /*fixme: This is only set for tuning the first warehouse!*/ - Tslow = pause_time / gc->survive_ratio; - SMax = (unsigned int)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio )); - last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; + /*compute throughput*/ + if(gc->last_collect_kind != MINOR_COLLECTION){ + gen_mode_adaptor->nongen_minor_throughput = 1.0f; } + if(gc->force_gen_mode){ + if(pause_time!=0){ + if(gen_mode_adaptor->gen_minor_throughput != 0) + gen_mode_adaptor->gen_minor_throughput = (gen_mode_adaptor->gen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f; + else + gen_mode_adaptor->gen_minor_throughput =(float) nos_free_size/(float)pause_time; + } + }else{ + if(pause_time!=0){ + if(gen_mode_adaptor->gen_minor_throughput != 1.0f) + gen_mode_adaptor->nongen_minor_throughput = (gen_mode_adaptor->nongen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f; + else + gen_mode_adaptor->nongen_minor_throughput = (float) nos_free_size/(float)pause_time; + } + } - fspace->time_collections += pause_time; - unsigned int free_size_threshold; - - unsigned int minor_survive_size = last_total_free_size - total_free_size; + if(gen_mode_adaptor->nongen_minor_throughput <= gen_mode_adaptor->gen_minor_throughput ){ + if( gc->last_collect_kind != MINOR_COLLECTION ){ + gen_mode_adaptor->major_survive_ratio_threshold = mspace->survive_ratio; + }else if( !gc->force_gen_mode ){ + gc->force_gen_mode = TRUE; + gen_mode_adaptor->gen_mode_trial_count = MAX_INT32; + } + } - float k = Tslow * fspace->num_collections/fspace->time_collections; - float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); - float free_ratio_threshold = mini_free_ratio(k, m); - free_size_threshold = (unsigned int)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); + if(gc->force_major_collect && !gc->force_gen_mode){ + gc->force_major_collect = FALSE; + gc->force_gen_mode = TRUE; + gen_mode_adaptor->gen_mode_trial_count = 2; + }else if(gc->last_collect_kind != MINOR_COLLECTION && gc->force_gen_mode){ + gen_mode_adaptor->gen_mode_trial_count = MAX_INT32; + } - if ((mos_free_size + nos_free_size)< free_size_threshold) { - gc->force_major_collect = TRUE; + if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){ + gc->force_gen_mode = FALSE; + gc_disable_gen_mode(); + gc->force_major_collect = TRUE; + gen_mode_adaptor->gen_mode_trial_count = 0; + return; } + + if( gc->force_gen_mode ){ + assert( gen_mode_adaptor->gen_mode_trial_count >= 0); - survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); - fspace->survive_ratio = survive_ratio; + gen_mode_adaptor->gen_mode_trial_count --; + if( gen_mode_adaptor->gen_mode_trial_count >= 0){ + gc_enable_gen_mode(); + return; + } + + gc->force_gen_mode = FALSE; + gc->force_major_collect = TRUE; + gen_mode_adaptor->gen_mode_trial_count = 0; + } } - gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; + gc_disable_gen_mode(); + return; +} - last_total_free_size = total_free_size; +void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold); +static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) +{ + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + + float survive_ratio = 0; + + POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); + POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); + POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; + if(gc->collect_kind != MINOR_COLLECTION) gc->force_gen_mode = FALSE; + if(!gc->force_gen_mode){ + if(gc->collect_kind != MINOR_COLLECTION){ + mspace->time_collections += pause_time; + + Tslow = (float)pause_time; + SMax = total_free_size; + gc->force_major_collect = FALSE; + + POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; + /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ + if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (gc->collect_kind != EXTEND_COLLECTION)){ + survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); + mspace->survive_ratio = survive_ratio; + } + if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){ + POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1; + mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); + } + #ifdef NOS_SURVIVE_RATIO_SENSITIVE + /*If this major is caused by fall back compaction, + we must give fspace->survive_ratio a conservative and reasonable number to avoid next fall back.*/ + fspace->survive_ratio = mspace->survive_ratio; + #endif + }else{ + /*Give a hint to mini_free_ratio. */ + if(fspace->num_collections == 1){ + /*fixme: This is only set for tuning the first warehouse!*/ + Tslow = pause_time / gc->survive_ratio; + SMax = (POINTER_SIZE_INT)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio )); + last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; + } + + fspace->time_collections += pause_time; + POINTER_SIZE_INT free_size_threshold; + + POINTER_SIZE_INT minor_survive_size = last_total_free_size - total_free_size; + + float k = Tslow * fspace->num_collections/fspace->time_collections; + float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); + float free_ratio_threshold = mini_free_ratio(k, m); + free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); + + if ((mos_free_size + nos_free_size)< free_size_threshold) { + gc->force_major_collect = TRUE; + } + + survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); + fspace->survive_ratio = survive_ratio; + /*For_LOS adaptive*/ + POINTER_SIZE_INT mspace_size_threshold = space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace) - free_size_threshold; + mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); + } + + gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; + + last_total_free_size = total_free_size; + } + + gc_gen_mode_adapt(gc,pause_time); + return; } -Boolean gc_compute_new_space_size(GC_Gen* gc, unsigned int* mos_size, unsigned int* nos_size) +Boolean gc_compute_new_space_size(GC_Gen* gc, POINTER_SIZE_INT* mos_size, POINTER_SIZE_INT* nos_size) { Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; Blocked_Space* lspace = (Blocked_Space*)gc->los; - unsigned int new_nos_size; - unsigned int new_mos_size; + POINTER_SIZE_INT new_nos_size; + POINTER_SIZE_INT new_mos_size; - unsigned int curr_nos_size = space_committed_size((Space*)fspace); - unsigned int used_mos_size = space_used_memory_size(mspace); - unsigned int free_mos_size = space_committed_size((Space*)mspace) - used_mos_size; + POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); + POINTER_SIZE_INT used_mos_size = space_used_memory_size(mspace); + POINTER_SIZE_INT free_mos_size = space_committed_size((Space*)mspace) - used_mos_size; - unsigned int total_size; + POINTER_SIZE_INT total_size; #ifdef STATIC_NOS_MAPPING total_size = max_heap_size_bytes - lspace->committed_heap_size; #else - total_size = (unsigned int)gc->heap_end - (unsigned int)mspace->heap_start; + total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start; #endif /* check if curr nos size is too small to shrink */ @@ -142,16 +294,20 @@ } */ - unsigned int total_free = total_size - used_mos_size; + POINTER_SIZE_INT total_free = total_size - used_mos_size; /* predict NOS + NOS*ratio = total_free_size */ - int nos_reserve_size; - nos_reserve_size = (int)(((float)total_free)/(1.0f + fspace->survive_ratio)); - new_nos_size = round_down_to_size((unsigned int)nos_reserve_size, SPACE_ALLOC_UNIT); + POINTER_SIZE_INT nos_reserve_size; + nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio)); + new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, SPACE_ALLOC_UNIT); #ifdef STATIC_NOS_MAPPING if(new_nos_size > fspace->reserved_heap_size) new_nos_size = fspace->reserved_heap_size; #endif if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ; + if(gc->force_gen_mode){ + new_nos_size = min_nos_size_bytes;//round_down_to_size((unsigned int)(gc->gen_minor_adaptor->adapt_nos_size), SPACE_ALLOC_UNIT); + } + new_mos_size = total_size - new_nos_size; #ifdef STATIC_NOS_MAPPING if(new_mos_size > mspace->reserved_heap_size) new_mos_size = mspace->reserved_heap_size; @@ -173,21 +329,21 @@ Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; - unsigned int new_nos_size; - unsigned int new_mos_size; + POINTER_SIZE_INT new_nos_size; + POINTER_SIZE_INT new_mos_size; Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size); if(!result) return; - unsigned int curr_nos_size = space_committed_size((Space*)fspace); + POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) return; /* below are ajustment */ - nos_boundary = (void*)((unsigned int)gc->heap_end - new_nos_size); + nos_boundary = (void*)((POINTER_SIZE_INT)gc->heap_end - new_nos_size); fspace->heap_start = nos_boundary; fspace->blocks = (Block*)nos_boundary; @@ -208,7 +364,8 @@ Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0]; /* this is redundant: mos_last_block->next = nos_first_block; */ - HelperClass_set_NosBoundary(nos_boundary); + if( gc_is_gen_mode()) + HelperClass_set_NosBoundary(nos_boundary); return; } @@ -221,8 +378,8 @@ if(NOS_SIZE) return; - unsigned int new_nos_size; - unsigned int new_mos_size; + POINTER_SIZE_INT new_nos_size; + POINTER_SIZE_INT new_mos_size; Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size); @@ -231,18 +388,18 @@ Blocked_Space* fspace = (Blocked_Space*)gc->nos; Blocked_Space* mspace = (Blocked_Space*)gc->mos; - unsigned int curr_nos_size = space_committed_size((Space*)fspace); + POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) return; - unsigned int used_mos_size = space_used_memory_size((Blocked_Space*)mspace); - unsigned int free_mos_size = space_free_memory_size((Blocked_Space*)mspace); + POINTER_SIZE_INT used_mos_size = space_used_memory_size((Blocked_Space*)mspace); + POINTER_SIZE_INT free_mos_size = space_free_memory_size((Blocked_Space*)mspace); - unsigned int new_free_mos_size = new_mos_size - used_mos_size; + POINTER_SIZE_INT new_free_mos_size = new_mos_size - used_mos_size; - unsigned int curr_mos_end = (unsigned int)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; - unsigned int mos_border = (unsigned int)mspace->heap_end; + POINTER_SIZE_INT curr_mos_end = (POINTER_SIZE_INT)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + POINTER_SIZE_INT mos_border = (POINTER_SIZE_INT)mspace->heap_end; if( curr_mos_end + new_free_mos_size > mos_border){ /* we can't let mos cross border */ new_free_mos_size = mos_border - curr_mos_end; Index: vm/gc_gen/src/jni/java_natives.cpp =================================================================== --- vm/gc_gen/src/jni/java_natives.cpp (revision 499692) +++ vm/gc_gen/src/jni/java_natives.cpp (working copy) @@ -20,9 +20,9 @@ return (jint)tls_gc_offset; } -JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c) +JNIEXPORT jobject JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c) { - return (jint)nos_boundary; + return (jobject)nos_boundary; } JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c) Index: vm/gc_gen/src/jni/java_support.cpp =================================================================== --- vm/gc_gen/src/jni/java_support.cpp (revision 499692) +++ vm/gc_gen/src/jni/java_support.cpp (working copy) @@ -32,6 +32,7 @@ unsigned int nfields = class_number_fields(GCHelper_clss); unsigned int i; + for(i=0; imark_task_pool); while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); + POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter; iter = vector_block_iterator_advance(mark_task,iter); @@ -176,3 +176,8 @@ return; } + +void trace_obj_in_fallback_marking(Collector *collector, void *p_ref) +{ + trace_object(collector, (Partial_Reveal_Object **)p_ref); +} Index: vm/gc_gen/src/mark_compact/mspace.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace.cpp (working copy) @@ -20,6 +20,8 @@ #include "mspace.h" +#include "../common/gc_space.h" + static void mspace_destruct_blocks(Mspace* mspace) { return; @@ -40,12 +42,19 @@ void* reserved_base = start; /* commit mspace mem */ - vm_commit_mem(reserved_base, commit_size); + if(!large_page_hint) + vm_commit_mem(reserved_base, commit_size); memset(reserved_base, 0, commit_size); mspace->committed_heap_size = commit_size; mspace->heap_start = reserved_base; - mspace->heap_end = (void *)((unsigned int)reserved_base + mspace_size); + +#ifdef STATIC_NOS_MAPPING + mspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + mspace_size); +#else + mspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + commit_size); +#endif + mspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; mspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base); @@ -62,6 +71,10 @@ mspace->move_object = TRUE; mspace->gc = gc; + + /*For_LOS adaptive: The threshold is initiated by half of MOS + NOS commit size.*/ + mspace->expected_threshold = (unsigned int)( ( (float)mspace->committed_heap_size * (1.f + 1.f / gc->survive_ratio) ) * 0.5f ); + gc_set_mos((GC_Gen*)gc, (Space*)mspace); return; @@ -147,3 +160,16 @@ return; } + +/*For_LOS adaptive.*/ +void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold) +{ + mspace->expected_threshold = threshold; + return; +} + +unsigned int mspace_get_expected_threshold(Mspace* mspace) +{ + return mspace->expected_threshold; +} + Index: vm/gc_gen/src/mark_compact/mspace.h =================================================================== --- vm/gc_gen/src/mark_compact/mspace.h (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace.h (working copy) @@ -36,6 +36,10 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; + /*Size allocted after last collection.*/ + unsigned int alloced_size; + /*For_statistic: size survived after major*/ + unsigned int surviving_size; /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ @@ -50,8 +54,9 @@ unsigned int num_total_blocks; /* END of Blocked_Space --> */ - volatile Block_Header* block_iterator; - + volatile Block_Header* block_iterator; + /*Threshold computed by NOS adaptive*/ + POINTER_SIZE_INT expected_threshold; }Mspace; void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size, unsigned int commit_size); @@ -67,4 +72,6 @@ void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace); +void mspace_set_expected_threshold(Mspace* mspace, unsigned int threshold); + #endif //#ifdef _MSC_SPACE_H_ Index: vm/gc_gen/src/mark_compact/mspace_alloc.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_alloc.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace_alloc.cpp (working copy) @@ -38,6 +38,8 @@ Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]); assert(alloc_block->status == BLOCK_FREE); alloc_block->status = BLOCK_IN_USE; + /*For_statistic mos allocation infomation*/ + mspace->alloced_size += GC_BLOCK_SIZE_BYTES; /* set allocation context */ void* new_free = alloc_block->free; @@ -52,7 +54,7 @@ /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */ unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES; - allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size); + allocator->ceiling = (void*)((POINTER_SIZE_INT)new_free + zeroing_size); memset(new_free, 0, zeroing_size); #endif /* #ifndef ALLOC_ZEROING */ Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -74,6 +74,9 @@ } } mspace->num_used_blocks = new_num_used; + /*For_statistic mos infomation*/ + mspace->surviving_size = new_num_used * GC_BLOCK_SIZE_BYTES; + mspace->alloced_size = 0; /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */ for(; i < mspace->num_managed_blocks; i++){ @@ -183,6 +186,9 @@ return NULL; } +#include "../trace_forward/fspace.h" +#include "../gen/gen.h" + Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace) { Block_Header* cur_target_block = (Block_Header*)next_block_for_target; @@ -205,8 +211,13 @@ assert( cur_target_block->status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)); */ - /* nos is higher than mos, we cant use nos block for compaction target */ - while( cur_target_block ){ + /* mos may be out of space, so we can use nos blocks for compaction target. + * but we can't use the blocks which are given to los when los extension happens. + * in this case, an out-of-mem should be given to user. + */ + Fspace *nos = ((GC_Gen*)collector->gc)->nos; + Block_Header *nos_end = ((Block_Header *)&nos->blocks[nos->num_managed_blocks-1])->next; + while( cur_target_block != nos_end){ //For_LOS_extend //assert( cur_target_block <= collector->cur_compact_block); Block_Header* next_target_block = cur_target_block->next; @@ -242,8 +253,6 @@ void mspace_collection(Mspace* mspace) { - // printf("Major Collection "); - mspace->num_collections++; GC* gc = mspace->gc; @@ -259,16 +268,13 @@ //For_LOS_extend if(gc->tuner->kind != TRANS_NOTHING){ - // printf("for LOS extention"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); }else if (gc->collect_kind == FALLBACK_COLLECTION){ - // printf("for Fallback"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); //IS_MOVE_COMPACT = TRUE; //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); //IS_MOVE_COMPACT = FALSE; - }else{ switch(mspace->collect_algorithm){ @@ -281,7 +287,7 @@ collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); IS_MOVE_COMPACT = FALSE; break; - + default: printf("\nThe speficied major collection algorithm doesn't exist!\n"); exit(0); @@ -289,8 +295,6 @@ } } - - // printf("...end.\n"); return; } Index: vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (working copy) @@ -47,24 +47,24 @@ static unsigned int fspace_shrink(Fspace *fspace) { - void *committed_nos_end = (void *)((unsigned int)space_heap_start((Space *)fspace) + fspace->committed_heap_size); + void *committed_nos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)fspace) + fspace->committed_heap_size); - unsigned int nos_used_size = (unsigned int)nos_first_free_block - (unsigned int)fspace->heap_start; - unsigned int nos_free_size = (unsigned int)committed_nos_end - (unsigned int)nos_first_free_block; - unsigned int decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size; + POINTER_SIZE_INT nos_used_size = (POINTER_SIZE_INT)nos_first_free_block - (POINTER_SIZE_INT)fspace->heap_start; + POINTER_SIZE_INT nos_free_size = (POINTER_SIZE_INT)committed_nos_end - (POINTER_SIZE_INT)nos_first_free_block; + POINTER_SIZE_INT decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size; assert(decommit_size); - void *decommit_base = (void *)((unsigned int)committed_nos_end - decommit_size); - decommit_base = (void *)round_down_to_size((unsigned int)decommit_base, SPACE_ALLOC_UNIT); + void *decommit_base = (void *)((POINTER_SIZE_INT)committed_nos_end - decommit_size); + decommit_base = (void *)round_down_to_size((POINTER_SIZE_INT)decommit_base, SPACE_ALLOC_UNIT); if(decommit_base < (void *)nos_first_free_block) - decommit_base = (void *)((unsigned int)decommit_base + SPACE_ALLOC_UNIT); - decommit_size = (unsigned int)committed_nos_end - (unsigned int)decommit_base; + decommit_base = (void *)((POINTER_SIZE_INT)decommit_base + SPACE_ALLOC_UNIT); + decommit_size = (POINTER_SIZE_INT)committed_nos_end - (POINTER_SIZE_INT)decommit_base; assert(decommit_size && !(decommit_size % SPACE_ALLOC_UNIT)); Boolean result = vm_decommit_mem(decommit_base, decommit_size); assert(result == TRUE); - fspace->committed_heap_size = (unsigned int)decommit_base - (unsigned int)fspace->heap_start; + fspace->committed_heap_size = (POINTER_SIZE_INT)decommit_base - (POINTER_SIZE_INT)fspace->heap_start; fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; Block_Header *new_last_block = (Block_Header *)&fspace->blocks[fspace->num_managed_blocks - 1]; @@ -78,24 +78,24 @@ { Block_Header *old_last_mos_block = (Block_Header *)(mos_first_new_block -1); old_last_mos_block->next = (Block_Header *)mos_first_new_block; - void *new_committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); + void *new_committed_mos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)mspace) + mspace->committed_heap_size); Block_Header *new_last_mos_block = (Block_Header *)((Block *)new_committed_mos_end -1); new_last_mos_block->next = (Block_Header *)space_heap_start((Space *)fspace); } -static Block *mspace_extend_without_link(Mspace *mspace, unsigned int commit_size) +static Block *mspace_extend_without_link(Mspace *mspace, Fspace *fspace, unsigned int commit_size) { - assert(commit_size && !(commit_size % SPACE_ALLOC_UNIT)); + assert(commit_size && !(commit_size % GC_BLOCK_SIZE_BYTES)); - void *committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); + void *committed_mos_end = (void *)((POINTER_SIZE_INT)space_heap_start((Space *)mspace) + mspace->committed_heap_size); void *commit_base = committed_mos_end; - assert(!((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT)); + assert(!((POINTER_SIZE_INT)committed_mos_end % SPACE_ALLOC_UNIT)); void *result = vm_commit_mem(commit_base, commit_size); assert(result == commit_base); - void *new_end = (void *)((unsigned int)commit_base + commit_size); - mspace->committed_heap_size = (unsigned int)new_end - (unsigned int)mspace->heap_start; + void *new_end = (void *)((POINTER_SIZE_INT)commit_base + commit_size); + mspace->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)mspace->heap_start; /* init the grown blocks */ Block_Header *block = (Block_Header *)commit_base; @@ -147,19 +147,19 @@ if (object_is_array(p_obj)) { Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; assert(!obj_is_primitive_array(p_obj)); - + int32 array_length = array->array_len; - Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); - + Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + for (int i = 0; i < array_length; i++) { Partial_Reveal_Object** p_ref = p_refs + i; Partial_Reveal_Object* p_element = *p_ref; if((p_element > start_address) && (p_element < end_address)) - *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff); + *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff); } return; } - + /* scan non-array object */ int *offset_scanner = init_object_scanner(p_obj); while (true) { @@ -168,7 +168,7 @@ Partial_Reveal_Object* p_element = *p_ref; if((p_element > start_address) && (p_element < end_address)) - *p_ref = (Partial_Reveal_Object*)((unsigned int)p_element - addr_diff); + *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff); offset_scanner = offset_next_ref(offset_scanner); } @@ -209,14 +209,14 @@ pool_iterator_init(pool); while(Vector_Block *root_set = pool_iterator_next(pool)){ - unsigned int *iter = vector_block_iterator_init(root_set); + POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter; iter = vector_block_iterator_advance(root_set,iter); Partial_Reveal_Object *p_obj = *p_ref; if((p_obj > start_address) && (p_obj < end_address)) - *p_ref = (Partial_Reveal_Object*)((unsigned int)p_obj - addr_diff); + *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj - addr_diff); } } } @@ -248,14 +248,14 @@ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; void *src_base = (void *)block->base; void *block_end = block->new_free; // new_free or free depends on whether reset is done or not - unsigned int size = (unsigned int)block_end - (unsigned int)src_base; - Block_Header *dest_block = GC_BLOCK_HEADER((void *)((unsigned int)src_base - addr_diff)); + POINTER_SIZE_INT size = (POINTER_SIZE_INT)block_end - (POINTER_SIZE_INT)src_base; + Block_Header *dest_block = GC_BLOCK_HEADER((void *)((POINTER_SIZE_INT)src_base - addr_diff)); memmove(dest_block->base, src_base, size); - dest_block->new_free = (void *)((unsigned int)block_end - addr_diff); + dest_block->new_free = (void *)((POINTER_SIZE_INT)block_end - addr_diff); if(verify_live_heap) while (p_obj < block_end) { - event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((unsigned int)p_obj - addr_diff), collector); - p_obj = obj_end(p_obj); + event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_obj - addr_diff), collector); + p_obj = obj_end(p_obj); } } } @@ -269,6 +269,9 @@ Mspace *mspace = gc_gen->mos; Fspace *fspace = gc_gen->nos; Lspace *lspace = gc_gen->los; + + /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/ + gc_gen->collect_kind = EXTEND_COLLECTION; unsigned int num_active_collectors = gc_gen->num_active_collectors; unsigned int old_num; @@ -299,8 +302,10 @@ } #else +static volatile unsigned int num_recomputing_collectors = 0; static volatile unsigned int num_refixing_collectors = 0; static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_restoring_collectors = 0; void mspace_extend_compact(Collector *collector) { @@ -308,6 +313,9 @@ Mspace *mspace = gc_gen->mos; Fspace *fspace = gc_gen->nos; Lspace *lspace = gc_gen->los; + + /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/ + gc_gen->collect_kind = EXTEND_COLLECTION; unsigned int num_active_collectors = gc_gen->num_active_collectors; unsigned int old_num; @@ -322,16 +330,16 @@ old_num = atomic_inc32(&num_space_changing_collectors); if( old_num == 0 ){ unsigned int mem_changed_size = fspace_shrink(fspace); - mos_first_new_block = mspace_extend_without_link(mspace, mem_changed_size); + mos_first_new_block = mspace_extend_without_link(mspace, fspace, mem_changed_size); set_first_and_end_block_to_move(collector, mem_changed_size); //mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); mspace_block_iter_init_for_extension(mspace, (Block_Header *)mspace->blocks); - + num_space_changing_collectors++; } while(num_space_changing_collectors != num_active_collectors + 1); - + atomic_cas32( &num_refixing_collectors, 0, num_active_collectors+1); mspace_refix_repointed_refs(collector, mspace, (void *)first_block_to_move, (void *)nos_first_free_block, (first_block_to_move - mos_first_new_block) << GC_BLOCK_SHIFT_COUNT); Index: vm/gc_gen/src/mark_compact/mspace_move_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (working copy) @@ -15,7 +15,7 @@ */ /** - * @author Chunrong Lai, 2006/12/01 + * @author Chunrong Lai, 2006/12/25 */ #include "mspace_collect_compact.h" @@ -56,22 +56,22 @@ /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */ obj_unmark_in_oi(p_obj); - unsigned int curr_sector_size = (unsigned int)start_pos - (unsigned int)src_sector_addr; + POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr; /* check if dest block is not enough to hold this sector. If yes, grab next one */ - unsigned int block_end = (unsigned int)GC_BLOCK_END(dest_block); - if( ((unsigned int)dest_sector_addr + curr_sector_size) > block_end ){ + POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); + if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){ dest_block->new_free = dest_sector_addr; dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ collector->result = FALSE; return; } - block_end = (unsigned int)GC_BLOCK_END(dest_block); + block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); dest_sector_addr = dest_block->base; } - assert(((unsigned int)dest_sector_addr + curr_sector_size) <= block_end ); + assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end ); /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */ p_obj = block_get_next_marked_object(curr_block, &start_pos); @@ -79,26 +79,26 @@ continue; /* current sector is done, let's move it. */ - unsigned int sector_distance = (unsigned int)src_sector_addr - (unsigned int)dest_sector_addr; + POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; curr_block->table[curr_sector] = sector_distance; if (verify_live_heap) { - Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr; - void *rescan_pos = (Partial_Reveal_Object *)((unsigned int)rescan_obj + vm_object_size(rescan_obj)); - while ((unsigned int)rescan_obj < (unsigned int)src_sector_addr + curr_sector_size) { - Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((unsigned int)rescan_obj- sector_distance); + Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr; + void *rescan_pos = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj + vm_object_size(rescan_obj)); + while ((POINTER_SIZE_INT)rescan_obj < (POINTER_SIZE_INT)src_sector_addr + curr_sector_size) { + Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj- sector_distance); if(is_fallback) event_collector_doublemove_obj(rescan_obj, targ_obj, collector); else event_collector_move_obj(rescan_obj, targ_obj, collector); - rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); - if(rescan_obj == NULL) break; - } + rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); + if(rescan_obj == NULL) break; + } } memmove(dest_sector_addr, src_sector_addr, curr_sector_size); - dest_sector_addr = (void*)((unsigned int) dest_sector_addr + curr_sector_size); + dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); src_sector_addr = p_obj; curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); } @@ -147,7 +147,7 @@ if(gc->collect_kind != FALLBACK_COLLECTION) mark_scan_heap(collector); else - fallback_mark_scan_heap(collector); + fallback_mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ @@ -160,10 +160,9 @@ #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); - update_ref_ignore_finref(collector); + gc_update_weakref_ignore_finref(gc); } #endif - /* let other collectors go */ num_marking_collectors++; @@ -208,7 +207,7 @@ } while(num_fixing_collectors != num_active_collectors + 1); - /* Dealing with out of space in mspace */ + /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > fspace->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); @@ -220,10 +219,6 @@ */ if( collector->thread_handle != 0 ) return; - - if(!IGNORE_FINREF ) - gc_put_finref_to_vm(gc); - mspace_reset_after_compaction(mspace); fspace_reset_for_allocation(fspace); Index: vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 499692) +++ vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -23,23 +23,12 @@ #include "../mark_sweep/lspace.h" #include "../finalizer_weakref/finalizer_weakref.h" -//#define VERIFY_SLIDING_COMPACT struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); Space* gc_get_mos(GC_Gen* gc); Space* gc_get_los(GC_Gen* gc); -#ifdef VERIFY_SLIDING_COMPACT -typedef struct { - unsigned int addr; - unsigned int dest_counter; - unsigned int collector; - Block_Header *src_list[1021]; -} Block_Verify_Info; -static Block_Verify_Info block_info[32*1024][2]; -#endif - static volatile Block_Header *last_block_for_dest; static void mspace_compute_object_target(Collector* collector, Mspace* mspace) @@ -49,10 +38,6 @@ void *dest_addr = dest_block->base; Block_Header *last_src; -#ifdef VERIFY_SLIDING_COMPACT - block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1; -#endif - assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); @@ -72,9 +57,9 @@ while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); - unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj; + unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); - if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){ + if( ((POINTER_SIZE_INT)dest_addr + obj_size) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)){ dest_block->new_free = dest_addr; dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ @@ -86,12 +71,8 @@ last_src = curr_block; if(p_obj != first_obj) ++curr_block->dest_counter; - -#ifdef VERIFY_SLIDING_COMPACT - block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1; -#endif } - assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block)); + assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)); Obj_Info_Type obj_info = get_obj_info(p_obj); @@ -103,7 +84,7 @@ obj_set_fw_in_oi(p_obj, dest_addr); /* FIXME: should use alloc to handle alignment requirement */ - dest_addr = (void *)((unsigned int) dest_addr + obj_size); + dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size); p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos); } @@ -177,37 +158,28 @@ } else { cur_dest_block = set_next_block_for_dest(mspace); } - -// printf("Getting next dest block:\n"); -// printf("next_block_for_dest: %d\n\n", next_block_for_dest ? next_block_for_dest->block_idx : 0); unsigned int total_dest_counter = 0; Block_Header *last_dest_block = (Block_Header *)last_block_for_dest; for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){ if(cur_dest_block->status == BLOCK_DEST){ -// printf("idx: %d DEST ", cur_dest_block->block_idx); continue; } if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){ -// printf("idx: %d DEST FOUND!\n\n", cur_dest_block->block_idx); cur_dest_block->status = BLOCK_DEST; return cur_dest_block; } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){ -// printf("idx: %d NON_DEST FOUND!\n\n", cur_dest_block->block_idx); return cur_dest_block; } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){ -// printf("idx: %d NO_SRC ", cur_dest_block->block_idx); cur_dest_block->status = BLOCK_DEST; } else { -// printf("OTHER "); total_dest_counter += cur_dest_block->dest_counter; } } - if(total_dest_counter){ -// printf("\nNeed refind!\n\n"); + if(total_dest_counter) return DEST_NOT_EMPTY; - } + return NULL; } @@ -316,7 +288,7 @@ assert(obj_is_marked_in_vt(p_obj)); obj_unmark_in_vt(p_obj); - unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj; + unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); if(p_obj != p_target_obj){ memmove(p_target_obj, p_obj, obj_size); @@ -336,101 +308,12 @@ p_target_obj = obj_get_fw_in_oi(p_obj); } while(GC_BLOCK_HEADER(p_target_obj) == dest_block); - -#ifdef VERIFY_SLIDING_COMPACT - printf("dest_block: %x src_block: %x collector: %x\n", (unsigned int)dest_block, (unsigned int)src_block, (unsigned int)collector->thread_handle); -#endif - + atomic_dec32(&src_block->dest_counter); } -#ifdef VERIFY_SLIDING_COMPACT - static unsigned int fax = 0; - fax++; - printf("\n\n\nCollector %d Sliding compact ends! %d \n\n\n", (unsigned int)collector->thread_handle, fax); -#endif - } -#ifdef VERIFY_SLIDING_COMPACT - -static void verify_sliding_compact(Mspace *mspace, Boolean before) -{ - unsigned int i, j, k; - Block_Header *header; - - if(before) - j = 0; - else - j = 1; - - for(i = 0, header = (Block_Header *)mspace->blocks; - header; - header=header->next, ++i) - { - block_info[i][j].addr = (unsigned int)header; - block_info[i][j].dest_counter = header->dest_counter; - if(header->src){ - Partial_Reveal_Object *src_obj = header->src; - k = 0; - printf("\nHeader: %x %x Collector: %x ", (unsigned int)header, block_info[i][j].dest_counter, block_info[i][j].collector); - Block_Header *dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj)); - while(dest_header == header){ - block_info[i][j].src_list[k] = dest_header; - Block_Header *src_header = GC_BLOCK_HEADER(src_obj); - printf("%x %x ", (unsigned int)src_header, src_header->dest_counter); - src_obj = src_header->next_src; - if(!src_obj) - break; - dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj)); - if(++k >= 1021) - assert(0); - } - } - } - - if(!before){ - for(i = 0, header = (Block_Header *)mspace->blocks; - header; - header=header->next, ++i) - { - Boolean correct = TRUE; - if(block_info[i][0].addr != block_info[i][1].addr) - correct = FALSE; - if(block_info[i][0].dest_counter != block_info[i][1].dest_counter) - correct = FALSE; - for(k = 0; k < 1021; k++){ - if(block_info[i][0].src_list[k] != block_info[i][1].src_list[k]){ - correct = FALSE; - break; - } - } - if(!correct) - printf("header: %x %x dest_counter: %x %x src: %x %x", - block_info[i][0].addr, block_info[i][1].addr, - block_info[i][0].dest_counter, block_info[i][1].dest_counter, - block_info[i][0].src_list[k], block_info[i][1].src_list[k]); - } - - unsigned int *array = (unsigned int *)block_info; - memset(array, 0, 1024*32*1024*2); - } -} -#endif - -/* -#define OI_RESTORING_THRESHOLD 8 -static volatile Boolean parallel_oi_restoring; -unsigned int mspace_saved_obj_info_size(GC*gc){ return pool_size(gc->metadata->collector_remset_pool);} -*/ - -static volatile unsigned int num_marking_collectors = 0; -static volatile unsigned int num_repointing_collectors = 0; -static volatile unsigned int num_fixing_collectors = 0; -static volatile unsigned int num_moving_collectors = 0; -static volatile unsigned int num_restoring_collectors = 0; -static volatile unsigned int num_extending_collectors = 0; - //For_LOS_extend void mspace_restore_block_chain(Mspace* mspace) { @@ -442,6 +325,13 @@ } } +static volatile unsigned int num_marking_collectors = 0; +static volatile unsigned int num_repointing_collectors = 0; +static volatile unsigned int num_fixing_collectors = 0; +static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_restoring_collectors = 0; +static volatile unsigned int num_extending_collectors = 0; + void slide_compact_mspace(Collector* collector) { GC* gc = collector->gc; @@ -472,7 +362,7 @@ #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); - update_ref_ignore_finref(collector); + gc_update_weakref_ignore_finref(gc); } #endif @@ -494,8 +384,7 @@ /* single thread world */ gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ - num_repointing_collectors++; - assert(0); // Now we should not be out of mem here. mspace_extend_compact() is backing up for this case. + num_repointing_collectors++; return; } @@ -517,15 +406,9 @@ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); - - if(!IGNORE_FINREF ) - gc_put_finref_to_vm(gc); - -#ifdef VERIFY_SLIDING_COMPACT - verify_sliding_compact(mspace, TRUE); -#endif gc_init_block_for_sliding_compact(gc, mspace); + num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); @@ -547,6 +430,7 @@ old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ){ + update_mspace_info_for_los_extension(mspace); num_restoring_collectors++; Index: vm/gc_gen/src/mark_sweep/free_area_pool.h =================================================================== --- vm/gc_gen/src/mark_sweep/free_area_pool.h (revision 499692) +++ vm/gc_gen/src/mark_sweep/free_area_pool.h (working copy) @@ -25,9 +25,9 @@ #include "../utils/bit_ops.h" #include "../utils/bidir_list.h" -#define ADDRESS_IS_KB_ALIGNED(addr) (!(((unsigned int)addr) & ((1 << BIT_SHIFT_TO_KILO)-1))) -#define ALIGN_UP_TO_KILO(addr) (((unsigned int)(addr) + (KB - 1)) & (~(KB- 1))) -#define ALIGN_DOWN_TO_KILO(addr) ((unsigned int)(addr) & (~(KB- 1))) +#define ADDRESS_IS_KB_ALIGNED(addr) (!(((POINTER_SIZE_INT)addr) & ((1 << BIT_SHIFT_TO_KILO)-1))) +#define ALIGN_UP_TO_KILO(addr) (((POINTER_SIZE_INT)(addr) + (KB - 1)) & (~(KB- 1))) +#define ALIGN_DOWN_TO_KILO(addr) ((POINTER_SIZE_INT)(addr) & (~(KB- 1))) #define NUM_FREE_LIST 128 @@ -37,7 +37,7 @@ Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ - unsigned int lock; + SpinLock lock; }Lockable_Bidir_List; typedef struct Free_Area{ @@ -120,7 +120,7 @@ /* set bit flag of the list */ Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]); if(list->next == list){ - pool_list_clear_flag(pool, index); + pool_list_clear_flag(pool, index); } } Index: vm/gc_gen/src/mark_sweep/lspace.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.cpp (revision 499692) +++ vm/gc_gen/src/mark_sweep/lspace.cpp (working copy) @@ -33,13 +33,14 @@ /* commit mspace mem */ void* reserved_base = start; unsigned int committed_size = lspace_size; - vm_commit_mem(reserved_base, lspace_size); + if(!large_page_hint) + vm_commit_mem(reserved_base, lspace_size); memset(reserved_base, 0, lspace_size); lspace->committed_heap_size = committed_size; lspace->reserved_heap_size = committed_size; lspace->heap_start = reserved_base; - lspace->heap_end = (void *)((unsigned int)reserved_base + committed_size); + lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size); lspace->move_object = FALSE; lspace->gc = gc; @@ -102,3 +103,8 @@ lspace_sweep(lspace); return; } + +unsigned int lspace_get_failure_size(Lspace* lspace) +{ + return lspace->failure_size; +} Index: vm/gc_gen/src/mark_sweep/lspace.h =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.h (revision 499692) +++ vm/gc_gen/src/mark_sweep/lspace.h (working copy) @@ -25,9 +25,6 @@ #include "../thread/gc_thread.h" #include "free_area_pool.h" -#define GC_MIN_LOS_SIZE ( 4 * 1024 * 1024) - - typedef struct Lspace{ /* <-- first couple of fields are overloadded as Space */ void* heap_start; @@ -37,13 +34,18 @@ unsigned int num_collections; int64 time_collections; float survive_ratio; + unsigned int collect_algorithm; GC* gc; Boolean move_object; + /*For_statistic: size allocated science last time collect los, ie. last major*/ + unsigned int alloced_size; + /*For_statistic: size survived after lspace_sweep*/ + unsigned int surviving_size; /* END of Space --> */ -// void* alloc_free; Free_Area_Pool* free_pool; - + /*Size of allocation which caused lspace alloc failure.*/ + unsigned int failure_size; }Lspace; void lspace_initialize(GC* gc, void* reserved_base, unsigned int lspace_size); @@ -58,7 +60,7 @@ inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index) { - unsigned int next_area_start = (unsigned int)lspace->heap_start + (*iterate_index) * KB; + POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB; BOOLEAN reach_heap_end = 0; while(!reach_heap_end){ @@ -66,11 +68,11 @@ while(!*((unsigned int *)next_area_start)){ next_area_start += ((Free_Area*)next_area_start)->size; } - if(next_area_start < (unsigned int)lspace->heap_end){ + if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ //If there is a living object at this addr, return it, and update iterate_index if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){ unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); - *iterate_index = (next_area_start + obj_size - (unsigned int)lspace->heap_start) >> BIT_SHIFT_TO_KILO; + *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO); return (Partial_Reveal_Object*)next_area_start; //If this is a dead object, go on to find a living one. }else{ @@ -94,4 +96,6 @@ void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace); +unsigned int lspace_get_failure_size(Lspace* lspace); + #endif /*_LSPACE_H_ */ Index: vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (revision 499692) +++ vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -22,30 +22,28 @@ #include "../gen/gen.h" #include "../common/space_tuner.h" -inline void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index){ +static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index) +{ Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index]; - while (apr_atomic_casptr( - (volatile void **) &(list_head->lock), - (void *) 1, (void *) 0) - != (void *) 0) { - while (list_head->lock == 1) { - ; - } - } + lock(list_head->lock); +} +static void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index) +{ + Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index]; + unlock(list_head->lock); } -inline void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index){ - ((Lockable_Bidir_List*)(&pool->sized_area_list[list_index]))->lock = 0; -} -inline unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index){ + +static unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index) +{ Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]); return (head->next == head); } -inline void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size) +static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size) { Free_Area* free_area; void* p_result; - unsigned int remain_size; + int remain_size; unsigned int alloc_size = ALIGN_UP_TO_KILO(size); unsigned int new_list_nr = 0; Lockable_Bidir_List* head = &pool->sized_area_list[list_hint]; @@ -60,10 +58,12 @@ } free_area = (Free_Area*)(head->next); + /*if the list head is not NULL, it definitely satisfies the request. */ remain_size = free_area->size - alloc_size; + assert(remain_size >= 0); if( remain_size >= GC_OBJ_SIZE_THRESHOLD){ new_list_nr = pool_list_index_with_size(remain_size); - p_result = (void*)((unsigned int)free_area + remain_size); + p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(new_list_nr == list_hint){ free_area->size = remain_size; free_pool_unlock_nr_list(pool, list_hint); @@ -78,23 +78,22 @@ return p_result; } } - else if(remain_size >= 0) + else { free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, list_hint); - p_result = (void*)((unsigned int)free_area + remain_size); + p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(remain_size > 0){ assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD)); free_area->size = remain_size; } return p_result; } - /*We never get here, because if the list head is not NULL, it definitely satisfy the request. */ assert(0); return NULL; } -inline void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size) +static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size) { void* p_result; int remain_size = 0; @@ -116,7 +115,7 @@ remain_size = free_area->size - alloc_size; if( remain_size >= GC_OBJ_SIZE_THRESHOLD){ new_list_nr = pool_list_index_with_size(remain_size); - p_result = (void*)((unsigned int)free_area + remain_size); + p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(new_list_nr == MAX_LIST_INDEX){ free_area->size = remain_size; free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); @@ -135,7 +134,7 @@ { free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); - p_result = (void*)((unsigned int)free_area + remain_size); + p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(remain_size > 0){ assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD)); free_area->size = remain_size; @@ -167,6 +166,12 @@ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); if(p_result){ memset(p_result, 0, size); + unsigned int vold = lspace->alloced_size; + unsigned int vnew = vold + alloc_size; + while( vold != atomic_cas32(&lspace->alloced_size, vnew, vold) ){ + vold = lspace->alloced_size; + vnew = vold + alloc_size; + } return p_result; }else{ list_hint ++; @@ -180,6 +185,12 @@ p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); if(p_result){ memset(p_result, 0, size); + unsigned int vold = lspace->alloced_size; + unsigned int vnew = vold + alloc_size; + while( vold != atomic_cas32(&lspace->alloced_size, vnew, vold) ){ + vold = lspace->alloced_size; + vnew = vold + alloc_size; + } return p_result; } else break; @@ -188,6 +199,7 @@ /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/ if(try_count == 0){ vm_gc_lock_enum(); + lspace->failure_size = alloc_size; gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL); vm_gc_unlock_enum(); try_count ++; @@ -214,19 +226,25 @@ lspace->committed_heap_size += trans_size; lspace->reserved_heap_size += trans_size; } + /*For_statistic los information.*/ + lspace->alloced_size = 0; + + lspace->failure_size = 0; + los_boundary = lspace->heap_end; } void lspace_sweep(Lspace* lspace) { + + lspace->surviving_size = 0; + /* reset the pool first because its info is useless now. */ free_area_pool_reset(lspace->free_pool); unsigned int mark_bit_idx = 0, cur_size = 0; void *cur_area_start, *cur_area_end; - - Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx); if(p_next_obj){ @@ -235,6 +253,8 @@ in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode, the last time marked object is thought to be already marked and not scanned for this cycle. */ obj_clear_dual_bits_in_oi(p_next_obj); + /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ + lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj)); } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); @@ -242,7 +262,7 @@ while(cur_area_end){ - cur_size = (unsigned int)cur_area_end - (unsigned int)cur_area_start; + cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start; Free_Area* cur_area = free_area_new(cur_area_start, cur_size); /* successfully create an area */ @@ -254,16 +274,18 @@ if(p_next_obj){ obj_unmark_in_vt(p_next_obj); obj_clear_dual_bits_in_oi(p_next_obj); + /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ + lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj)); } - cur_area_start = (void*)ALIGN_UP_TO_KILO((unsigned int)p_prev_obj + vm_object_size(p_prev_obj)); + cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj)); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); } /* cur_area_end == NULL */ cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end); - cur_size = (unsigned int)cur_area_end - (unsigned int)cur_area_start; + cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start; Free_Area* cur_area = free_area_new(cur_area_start, cur_size); /* successfully create an area */ if( cur_area ) @@ -271,6 +293,11 @@ mark_bit_idx = 0; assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx)); - return; + /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/ + /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/ + lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size; + + return; + } Index: vm/gc_gen/src/thread/collector.cpp =================================================================== --- vm/gc_gen/src/thread/collector.cpp (revision 499692) +++ vm/gc_gen/src/thread/collector.cpp (working copy) @@ -34,7 +34,7 @@ assert(!collector->rem_set); while(Vector_Block *oi_block = pool_get_entry(remset_pool)){ - unsigned int *iter = vector_block_iterator_init(oi_block); + POINTER_SIZE_INT *iter = vector_block_iterator_init(oi_block); while(!vector_block_iterator_end(oi_block, iter)){ Partial_Reveal_Object *p_target_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(oi_block, iter); Index: vm/gc_gen/src/thread/collector_alloc.h =================================================================== --- vm/gc_gen/src/thread/collector_alloc.h (revision 499692) +++ vm/gc_gen/src/thread/collector_alloc.h (working copy) @@ -26,12 +26,12 @@ void* mos_alloc(unsigned size, Allocator *allocator); /* NOS forward obj to MOS in MINOR_COLLECTION */ -inline Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj) +FORCE_INLINE Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj) { Obj_Info_Type oi = get_obj_info_raw(p_obj); /* forwarded by somebody else */ - if ((unsigned int)oi & FORWARD_BIT){ + if ((POINTER_SIZE_INT)oi & FORWARD_BIT){ return NULL; } @@ -50,7 +50,7 @@ /* else, take the obj by setting the forwarding flag atomically we don't put a simple bit in vt because we need compute obj size later. */ - if ((unsigned int)oi != atomic_cas32((unsigned int*)get_obj_info_addr(p_obj), ((unsigned int)p_targ_obj|FORWARD_BIT), (unsigned int)oi)) { + if ((void*)oi != atomic_casptr((volatile void**)get_obj_info_addr(p_obj), (void*)((POINTER_SIZE_INT)p_targ_obj|FORWARD_BIT), (void*)oi)) { /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched block. The remaining part of the switched block cannot be revivied for next allocation of object that has smaller size than this one. */ Index: vm/gc_gen/src/thread/gc_thread.h =================================================================== --- vm/gc_gen/src/thread/gc_thread.h (revision 499692) +++ vm/gc_gen/src/thread/gc_thread.h (working copy) @@ -32,13 +32,13 @@ inline void* gc_get_tls() { void* tls_base = vm_thread_local(); - return (void*)*(unsigned int*)((char*)tls_base + tls_gc_offset); + return (void*)*(POINTER_SIZE_INT*)((char*)tls_base + tls_gc_offset); } inline void gc_set_tls(void* gc_tls_info) { void* tls_base = vm_thread_local(); - *(unsigned int*)((char*)tls_base + tls_gc_offset) = (unsigned int)gc_tls_info; + *(POINTER_SIZE_INT*)((char*)tls_base + tls_gc_offset) = (POINTER_SIZE_INT)gc_tls_info; } /* NOTE:: don't change the position of free/ceiling, because the offsets are constants for inlining */ @@ -55,7 +55,7 @@ inline void thread_local_unalloc(unsigned int size, Allocator* allocator) { void* free = allocator->free; - allocator->free = (void*)((unsigned int)free - size); + allocator->free = (void*)((POINTER_SIZE_INT)free - size); return; } @@ -63,16 +63,16 @@ inline Partial_Reveal_Object* thread_local_alloc_zeroing(unsigned int size, Allocator* allocator) { - unsigned int free = (unsigned int)allocator->free; - unsigned int ceiling = (unsigned int)allocator->ceiling; + POINTER_SIZE_INT free = (POINTER_SIZE_INT)allocator->free; + POINTER_SIZE_INT ceiling = (POINTER_SIZE_INT)allocator->ceiling; - unsigned int new_free = free + size; + POINTER_SIZE_INT new_free = free + size; - unsigned int block_ceiling = (unsigned int)allocator->end; + POINTER_SIZE_INT block_ceiling = (POINTER_SIZE_INT)allocator->end; if( new_free > block_ceiling) return NULL; - unsigned int new_ceiling; + POINTER_SIZE_INT new_ceiling; new_ceiling = new_free + ZEROING_SIZE; if( new_ceiling > block_ceiling ) new_ceiling = block_ceiling; @@ -88,10 +88,10 @@ inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator) { - unsigned int free = (unsigned int)allocator->free; - unsigned int ceiling = (unsigned int)allocator->ceiling; + POINTER_SIZE_INT free = (POINTER_SIZE_INT)allocator->free; + POINTER_SIZE_INT ceiling = (POINTER_SIZE_INT)allocator->ceiling; - unsigned int new_free = free + size; + POINTER_SIZE_INT new_free = free + size; if (new_free <= ceiling){ allocator->free= (void*)new_free; Index: vm/gc_gen/src/thread/mutator.cpp =================================================================== --- vm/gc_gen/src/thread/mutator.cpp (revision 499692) +++ vm/gc_gen/src/thread/mutator.cpp (working copy) @@ -38,7 +38,7 @@ } if(!IGNORE_FINREF ) - mutator->obj_with_fin = finref_get_free_block(); + mutator->obj_with_fin = finref_get_free_block(gc); else mutator->obj_with_fin = NULL; @@ -110,8 +110,6 @@ Mutator *mutator = gc->mutator_list; while (mutator) { mutator->rem_set = free_set_pool_get_entry(gc->metadata); - if(!IGNORE_FINREF ) - mutator_reset_obj_with_fin(mutator); mutator = mutator->next; } return; Index: vm/gc_gen/src/thread/mutator_alloc.cpp =================================================================== --- vm/gc_gen/src/thread/mutator_alloc.cpp (revision 499692) +++ vm/gc_gen/src/thread/mutator_alloc.cpp (working copy) @@ -24,6 +24,33 @@ #include "../finalizer_weakref/finalizer_weakref.h" +//#define GC_OBJ_SIZE_STATISTIC + +#ifdef GC_OBJ_SIZE_STATISTIC +#define GC_OBJ_SIZE_STA_MAX 256*KB +unsigned int obj_size_distribution_map[GC_OBJ_SIZE_STA_MAX>>10]; +void gc_alloc_statistic_obj_distrubution(unsigned int size) +{ + unsigned int sta_precision = 16*KB; + unsigned int max_sta_size = 128*KB; + unsigned int sta_current = 0; + + assert(!(GC_OBJ_SIZE_STA_MAX % sta_precision)); + assert(!(max_sta_size % sta_precision)); + while( sta_current < max_sta_size ){ + if(size < sta_current){ + unsigned int index = sta_current >> 10; + obj_size_distribution_map[index] ++; + return; + } + sta_current += sta_precision; + } + unsigned int index = sta_current >> 10; + obj_size_distribution_map[index]++; + return; +} +#endif + Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls) { Managed_Object_Handle p_obj = NULL; @@ -33,7 +60,11 @@ assert(ah); Allocator* allocator = (Allocator*)gc_get_tls(); - + +#ifdef GC_OBJ_SIZE_STATISTIC + gc_alloc_statistic_obj_distrubution(size); +#endif + if ( size > GC_OBJ_SIZE_THRESHOLD ) p_obj = (Managed_Object_Handle)los_alloc(size, allocator); else{ @@ -59,6 +90,10 @@ if(type_has_finalizer((Partial_Reveal_VTable *)ah)) return NULL; + +#ifdef GC_OBJ_SIZE_STATISTIC + gc_alloc_statistic_obj_distrubution(size); +#endif /* object should be handled specially */ if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL; Index: vm/gc_gen/src/trace_forward/fspace.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace.cpp (revision 499692) +++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy) @@ -47,16 +47,17 @@ void* reserved_base = start; /* commit fspace mem */ - vm_commit_mem(reserved_base, commit_size); + if(!large_page_hint) + vm_commit_mem(reserved_base, commit_size); memset(reserved_base, 0, commit_size); fspace->committed_heap_size = commit_size; fspace->heap_start = reserved_base; #ifdef STATIC_NOS_MAPPING - fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->reserved_heap_size); + fspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + fspace->reserved_heap_size); #else /* for dynamic mapping, nos->heap_end is gc->heap_end */ - fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->committed_heap_size); + fspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + fspace->committed_heap_size); #endif fspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; @@ -111,6 +112,7 @@ fspace->free_block_idx = first_idx; fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1; forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/ + fspace->num_used_blocks = 0; }else{ if(forward_first_half){ @@ -124,6 +126,7 @@ marked_start_idx = 0; marked_last_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1 - first_idx; } + fspace->num_used_blocks = marked_last_idx - marked_start_idx + 1; forward_first_half = forward_first_half^1; } @@ -140,10 +143,8 @@ block->status = BLOCK_FREE; block->free = block->base; - num_freed ++; } - fspace->num_used_blocks = fspace->num_used_blocks - num_freed; return; } @@ -159,7 +160,13 @@ GC* gc = fspace->gc; mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx; - + + if(gc_is_gen_mode()){ + fspace->collect_algorithm = MINOR_GEN_FORWARD_POOL; + }else{ + fspace->collect_algorithm = MINOR_NONGEN_FORWARD_POOL; + } + /* we should not destruct rootset structure in case we need fall back */ pool_iterator_init(gc->metadata->gc_rootset_pool); @@ -178,7 +185,7 @@ break; default: - printf("\nSpecified minor collection algorithm doesn't exist in built module!\n"); + printf("\nSpecified minor collection algorithm doesn't exist!\n"); exit(0); break; } Index: vm/gc_gen/src/trace_forward/fspace.h =================================================================== --- vm/gc_gen/src/trace_forward/fspace.h (revision 499692) +++ vm/gc_gen/src/trace_forward/fspace.h (working copy) @@ -45,6 +45,10 @@ unsigned int collect_algorithm; GC* gc; Boolean move_object; + /*Size allocted after last collection. Not available in fspace now.*/ + unsigned int alloced_size; + /*For_statistic: not available now for fspace*/ + unsigned int surviving_size; /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ @@ -64,11 +68,6 @@ void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size); void fspace_destruct(Fspace *fspace); -inline Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj) -{ - return (!obj_is_marked_or_fw_in_oi(p_obj)) ; -} - void* fspace_alloc(unsigned size, Allocator *allocator); void fspace_reset_for_allocation(Fspace* fspace); @@ -88,5 +87,5 @@ void fspace_collection(Fspace* fspace); - + #endif // _FROM_SPACE_H_ Index: vm/gc_gen/src/trace_forward/fspace_alloc.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_alloc.cpp (revision 499692) +++ vm/gc_gen/src/trace_forward/fspace_alloc.cpp (working copy) @@ -51,7 +51,7 @@ #else /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */ unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES; - allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size); + allocator->ceiling = (void*)((POINTER_SIZE_INT)new_free + zeroing_size); memset(new_free, 0, zeroing_size); #endif /* #ifndef ALLOC_ZEROING */ Index: vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (revision 499692) +++ vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -24,13 +24,13 @@ #include "../common/gc_metadata.h" #include "../finalizer_weakref/finalizer_weakref.h" -static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) +static FORCE_INLINE Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) { assert(obj_belongs_to_nos(p_obj)); return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); } -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) { Partial_Reveal_Object *p_obj = *p_ref; if (p_obj == NULL) return; @@ -43,7 +43,7 @@ return; } -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { if (!object_has_ref_field(p_obj)) return; @@ -90,7 +90,7 @@ #include "../verify/verify_live_heap.h" -static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) { Space* space = collector->collect_space; GC* gc = collector->gc; @@ -184,7 +184,7 @@ /* first step: copy all root objects to trace tasks. */ while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(root_set,iter); @@ -205,7 +205,7 @@ Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); while(trace_task){ - unsigned int* iter = vector_block_iterator_init(trace_task); + POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(trace_task,iter); @@ -266,48 +266,19 @@ #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); - update_ref_ignore_finref(collector); + gc_update_weakref_ignore_finref(gc); } #endif gc_fix_rootset(collector); - if(!IGNORE_FINREF ) - gc_put_finref_to_vm(gc); - fspace_reset_for_allocation(space); return; } -void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref) +void trace_obj_in_gen_fw(Collector *collector, void *p_ref) { - GC *gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - collector->trace_stack = free_task_pool_get_entry(metadata); - collector_tracestack_push(collector, p_ref); - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - -//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */ - collector->trace_stack = free_task_pool_get_entry(metadata); - Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); - while(trace_task){ - unsigned int* iter = vector_block_iterator_init(trace_task); - while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(trace_task,iter); - assert(*p_ref); - trace_object(collector, p_ref); - } - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - trace_task = pool_get_entry(metadata->mark_task_pool); - } - - trace_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - collector->trace_stack = NULL; + trace_object(collector, (Partial_Reveal_Object **)p_ref); } Index: vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (revision 499692) +++ vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -27,7 +27,7 @@ #ifdef MARK_BIT_FLIPPING -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) { Partial_Reveal_Object *p_obj = *p_ref; if(p_obj == NULL) return; @@ -36,7 +36,7 @@ return; } -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { if (!object_has_ref_field_before_scan(p_obj)) return; @@ -46,7 +46,7 @@ Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; unsigned int array_length = array->array_len; - p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); + p_ref = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (unsigned int i = 0; i < array_length; i++) { scan_slot(collector, p_ref+i); @@ -81,7 +81,7 @@ */ #include "../verify/verify_live_heap.h" -static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) { GC* gc = collector->gc; Partial_Reveal_Object *p_obj = *p_ref; @@ -157,7 +157,7 @@ /* first step: copy all root objects to trace tasks. */ while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(root_set,iter); @@ -179,7 +179,7 @@ Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); while(trace_task){ - unsigned int* iter = vector_block_iterator_init(trace_task); + POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; iter = vector_block_iterator_advance(trace_task,iter); @@ -237,19 +237,21 @@ #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); - update_ref_ignore_finref(collector); + gc_update_weakref_ignore_finref(gc); } #endif gc_fix_rootset(collector); - if(!IGNORE_FINREF ) - gc_put_finref_to_vm(gc); - - fspace_reset_for_allocation(space); + fspace_reset_for_allocation(space); return; } +void trace_obj_in_nongen_fw(Collector *collector, void *p_ref) +{ + trace_object(collector, (Partial_Reveal_Object **)p_ref); +} + #endif /* MARK_BIT_FLIPPING */ Index: vm/gc_gen/src/utils/bit_ops.h =================================================================== --- vm/gc_gen/src/utils/bit_ops.h (revision 499692) +++ vm/gc_gen/src/utils/bit_ops.h (working copy) @@ -52,7 +52,7 @@ { unsigned int bit_offset; - assert((start_idx >= 0) && (start_idx < 128)); + assert(start_idx < 128); unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; @@ -79,9 +79,9 @@ inline void words_set_bit(unsigned int* words, unsigned int count, unsigned int start_idx) { - assert((start_idx >= 0) && (start_idx < 128)); + assert(start_idx < 128); - unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; + unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; if(word_index >= count) return; @@ -98,7 +98,7 @@ inline void words_clear_bit(unsigned int* words, unsigned int count, unsigned int start_idx) { - assert((start_idx >= 0) && (start_idx < 128)); + assert(start_idx < 128); unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; Index: vm/gc_gen/src/utils/sync_queue.h =================================================================== --- vm/gc_gen/src/utils/sync_queue.h (revision 499692) +++ vm/gc_gen/src/utils/sync_queue.h (working copy) @@ -72,24 +72,24 @@ node->value = value; node->next.ptr = NULL; while(TRUE){ - QLINK_VAL(tail) = QLINK_VAL(queue->tail); - QLINK_VAL(next) = QLINK_VAL(tail.ptr->next); - if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){ - if( next.ptr==NULL ){ - tmp1.ptr = node; - tmp1.count = next.count + 1; - node->next.count = tmp1.count; - QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1)) - if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2)) - break; - - }else{ - tmp1.ptr = next.ptr; - tmp1.count = tail.count + 1; - atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); - } - } - } + QLINK_VAL(tail) = QLINK_VAL(queue->tail); + QLINK_VAL(next) = QLINK_VAL(tail.ptr->next); + if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){ + if( next.ptr==NULL ){ + tmp1.ptr = node; + tmp1.count = next.count + 1; + node->next.count = tmp1.count; + QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1)) + if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2)) + break; + + }else{ + tmp1.ptr = next.ptr; + tmp1.count = tail.count + 1; + atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); + } + } + } tmp1.ptr = node; tmp1.count = tail.count + 1; atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); @@ -100,31 +100,31 @@ { Queue_Link head, tail, next, tmp1, tmp2; while(TRUE){ - QLINK_VAL(head) = QLINK_VAL(queue->head); - QLINK_VAL(tail) = QLINK_VAL(queue->tail); - QLINK_VAL(next) = QLINK_VAL(head.ptr->next); - - if( QLINK_VAL(head) == QLINK_VAL(queue->head)){ - if( head.ptr== tail.ptr ) - if( next.ptr == NULL ) - return FALSE; - else{ - tmp1.ptr = next.ptr; - tmp1.count = tail.count+1; + QLINK_VAL(head) = QLINK_VAL(queue->head); + QLINK_VAL(tail) = QLINK_VAL(queue->tail); + QLINK_VAL(next) = QLINK_VAL(head.ptr->next); + + if( QLINK_VAL(head) == QLINK_VAL(queue->head)){ + if( head.ptr== tail.ptr ) + if( next.ptr == NULL ) + return FALSE; + else{ + tmp1.ptr = next.ptr; + tmp1.count = tail.count+1; atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); - } - else{ - *pvalue = next.ptr->value; - tmp1.ptr = next.ptr; - tmp1.count = head.count+1; - QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1)); - if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1)) - break; - } - } - } + } + else{ + *pvalue = next.ptr->value; + tmp1.ptr = next.ptr; + tmp1.count = head.count+1; + QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1)); + if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1)) + break; + } + } + } free( head.ptr ); return TRUE; } - + #endif /* _SYNC_QUEUE_H_ */ Index: vm/gc_gen/src/utils/sync_stack.h =================================================================== --- vm/gc_gen/src/utils/sync_stack.h (revision 499692) +++ vm/gc_gen/src/utils/sync_stack.h (working copy) @@ -33,22 +33,32 @@ /* * ATTENTION: only for reference * Perhaps in some platforms compilers compile this struct in a way different from what we expect - */ + * GCC requires to specify "packed" attribute +#ifdef __linux__ typedef struct Stack_Top{ - unsigned int version: SYNC_STACK_VERSION_MASK_SHIFT; - unsigned int entry: (32-SYNC_STACK_VERSION_MASK_SHIFT); + POINTER_SIZE_INT version: SYNC_STACK_VERSION_MASK_SHIFT; + POINTER_SIZE_INT entry: (BITS_OF_POINTER_SIZE_INT-SYNC_STACK_VERSION_MASK_SHIFT); +}Stack_Top __attribute__((packed)); +#else +typedef struct Stack_Top{ + POINTER_SIZE_INT version: SYNC_STACK_VERSION_MASK_SHIFT; + POINTER_SIZE_INT entry: (BITS_OF_POINTER_SIZE_INT-SYNC_STACK_VERSION_MASK_SHIFT); }Stack_Top; +#endif + */ +typedef POINTER_SIZE_INT Stack_Top; + typedef struct Sync_Stack{ Stack_Top top; /* pointing to the first filled entry */ Node* cur; /* pointing to the current accessed entry, only for iterator */ }Sync_Stack; -#define stack_top_get_entry(top) ((Node*)((*(unsigned int*)&(top)) & ~SYNC_STACK_VERSION_MASK)) +#define stack_top_get_entry(top) ((Node*)((*(POINTER_SIZE_INT*)&(top)) & ~SYNC_STACK_VERSION_MASK)) /* The alternative way: (Node*)(top.entry<cur = NULL; - unsigned int temp_top = 0; + POINTER_SIZE_INT temp_top = 0; stack->top = *(Stack_Top*)&temp_top; return stack; } @@ -92,13 +102,13 @@ { Stack_Top cur_top = stack->top; Node* top_entry = stack_top_get_entry(cur_top); - unsigned int version = stack_top_get_version(cur_top); + POINTER_SIZE_INT version = stack_top_get_version(cur_top); while( top_entry != NULL ){ - unsigned int temp = stack_top_contruct(top_entry->next, version); + POINTER_SIZE_INT temp = stack_top_contruct(top_entry->next, version); Stack_Top new_top = *(Stack_Top*)&temp; - temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); - if(temp == *(unsigned int*)&cur_top){ /* got it */ + temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); + if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */ top_entry->next = NULL; return top_entry; } @@ -113,13 +123,13 @@ { Stack_Top cur_top = stack->top; node->next = stack_top_get_entry(cur_top); - unsigned int new_version = stack_top_get_next_version(cur_top); - unsigned int temp = stack_top_contruct(node, new_version); + POINTER_SIZE_INT new_version = stack_top_get_next_version(cur_top); + POINTER_SIZE_INT temp = stack_top_contruct(node, new_version); Stack_Top new_top = *(Stack_Top*)&temp; while( TRUE ){ - temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); - if(temp == *(unsigned int*)&cur_top){ /* got it */ + temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); + if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */ return TRUE; } cur_top = stack->top; Index: vm/gc_gen/src/utils/vector_block.h =================================================================== --- vm/gc_gen/src/utils/vector_block.h (revision 499692) +++ vm/gc_gen/src/utils/vector_block.h (working copy) @@ -23,46 +23,47 @@ typedef struct Vector_Block{ void* next; /* point to next block */ - unsigned int* head; /* point to the first filled entry */ - unsigned int* tail; /* point to the entry after the last filled one */ - unsigned int* heap_end; /* point to heap_end of the block (right after the last entry) */ - unsigned int entries[1]; + POINTER_SIZE_INT* head; /* point to the first filled entry */ + POINTER_SIZE_INT* tail; /* point to the entry after the last filled one */ + POINTER_SIZE_INT* heap_end; /* point to heap_end of the block (right after the last entry) */ + POINTER_SIZE_INT entries[1]; }Vector_Block; /* this size better be 2's power */ #define VECTOR_BLOCK_DATA_SIZE_BYTES (2*KB) -#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((unsigned int)((Vector_Block*)0)->entries) -#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_PER_WORD) +#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((POINTER_SIZE_INT)((Vector_Block*)0)->entries) +#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_OF_POINTER_SIZE_INT ) inline void vector_block_init(Vector_Block* block, unsigned int size) { - block->heap_end = (unsigned int*)((unsigned int)block + size); - block->head = (unsigned int*)block->entries; - block->tail = (unsigned int*)block->entries; - memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); + block->heap_end = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)block + size); + block->head = (POINTER_SIZE_INT*)block->entries; + block->tail = (POINTER_SIZE_INT*)block->entries; + memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries); return; } inline unsigned int vector_block_entry_count(Vector_Block* block) { return (unsigned int)(block->tail - block->head); } -/* + inline Boolean vector_block_is_full(Vector_Block* block) { return block->tail == block->heap_end; } +/* inline Boolean vector_block_is_empty(Vector_Block* block) { return block->tail == block->head; } -*/ inline Boolean vector_block_is_full(Vector_Block* block) { return (block->tail - block->entries) == VECTOR_BLOCK_ENTRY_NUM; } +*/ inline Boolean vector_block_is_empty(Vector_Block* block) { return block->tail == block->entries; } -inline void vector_block_add_entry(Vector_Block* block, unsigned int value) +inline void vector_block_add_entry(Vector_Block* block, POINTER_SIZE_INT value) { #ifdef _DEBUG assert(value && !*(block->tail)); @@ -73,21 +74,21 @@ inline void vector_block_clear(Vector_Block* block) { - block->head = (unsigned int*)block->entries; - block->tail = (unsigned int*)block->entries; + block->head = (POINTER_SIZE_INT*)block->entries; + block->tail = (POINTER_SIZE_INT*)block->entries; #ifdef _DEBUG - memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); + memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries); #endif } /* Below is for sequential local access */ -inline unsigned int* vector_block_iterator_init(Vector_Block* block) +inline POINTER_SIZE_INT* vector_block_iterator_init(Vector_Block* block) { return block->head; } -inline unsigned int* vector_block_iterator_advance(Vector_Block* block, unsigned int* iter) +inline POINTER_SIZE_INT* vector_block_iterator_advance(Vector_Block* block, POINTER_SIZE_INT* iter) { return ++iter; } -inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter) +inline Boolean vector_block_iterator_end(Vector_Block* block, POINTER_SIZE_INT* iter) { return iter == block->tail; } @@ -102,22 +103,22 @@ { vector_stack_init(block); #ifdef _DEBUG - memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); + memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries); #endif } -/* inline Boolean vector_stack_is_empty(Vector_Block* block) { return (block->head == block->tail); } -*/ +/* inline Boolean vector_stack_is_empty(Vector_Block* block) { return (block->head - block->entries) == VECTOR_BLOCK_ENTRY_NUM; } +*/ inline Boolean vector_stack_is_full(Vector_Block* block) { return (block->head == block->entries); } -inline void vector_stack_push(Vector_Block* block, unsigned int value) +inline void vector_stack_push(Vector_Block* block, POINTER_SIZE_INT value) { block->head--; #ifdef _DEBUG @@ -126,9 +127,9 @@ *(block->head) = value; } -inline unsigned int vector_stack_pop(Vector_Block* block) +inline POINTER_SIZE_INT vector_stack_pop(Vector_Block* block) { - unsigned int value = *block->head; + POINTER_SIZE_INT value = *block->head; #ifdef _DEBUG *block->head = 0; #endif @@ -138,7 +139,7 @@ inline void vector_block_integrity_check(Vector_Block* block) { - unsigned int* iter = vector_block_iterator_init(block); + POINTER_SIZE_INT* iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ assert(*iter); iter = vector_block_iterator_advance(block, iter);