Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 566495) +++ src/common/gc_common.cpp (working copy) @@ -65,7 +65,7 @@ return_value = atoi(value); destroy_property_value(value); }else{ - printf("property value %s is not set\n", property_name); + DIE2("gc.base","Warning: property value "< max_heap_size){ max_heap_size = min_heap_size; - printf("Max heap size: too small, reset to %d MB\n", max_heap_size / MB); + WARN2("gc.base","Warning: Max heap size is too small, reset to "<num_collections++; gc->cause = gc_cause; gc_decide_collection_kind((GC_Gen*)gc, gc_cause); +#ifndef USE_MARK_SWEEP_GC gc_gen_update_space_before_gc((GC_Gen*)gc); - -#ifndef ONLY_SSPACE_IN_HEAP gc_compute_space_tune_size_before_marking(gc, gc_cause); #endif #ifdef MARK_BIT_FLIPPING if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); #endif - + gc_metadata_verify(gc, TRUE); #ifndef BUILD_IN_REFERENT gc_finref_metadata_verify((GC*)gc, TRUE); #endif /* Stop the threads and collect the roots. */ + INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n"); gc_reset_rootset(gc); vm_enumerate_root_set_all_threads(); gc_copy_interior_pointer_table_to_rootset(); @@ -310,7 +317,7 @@ if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC gc_gen_reclaim_heap((GC_Gen*)gc); #else gc_ms_reclaim_heap((GC_MS*)gc); @@ -320,10 +327,14 @@ gc_metadata_verify(gc, FALSE); - int64 pause_time = time_now() - start_time; + collection_end_time = time_now(); + + int64 pause_time = collection_end_time - collection_start_time; gc->time_collections += pause_time; -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC + gc_gen_collection_verbose_info((GC_Gen*)gc, pause_time, mutator_time); + gc_gen_space_verbose_info((GC_Gen*)gc); gc_adjust_heap_size(gc, pause_time); gc_gen_adapt((GC_Gen*)gc, pause_time); @@ -332,6 +343,7 @@ if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); if(!IGNORE_FINREF ){ + INFO2("gc.process", "GC: finref process after collection ...\n"); gc_put_finref_to_vm(gc); gc_reset_finref_metadata(gc); gc_activate_finref_threads((GC*)gc); @@ -341,7 +353,7 @@ #endif } -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC gc_space_tuner_reset(gc); gc_gen_update_space_after_gc((GC_Gen*)gc); gc_assign_free_area_to_mutators(gc); @@ -349,7 +361,7 @@ vm_reclaim_native_objs(); vm_resume_threads_after(); + INFO2("gc.process", "GC: GC end\n"); return; } - Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 566495) +++ src/common/gc_common.h (working copy) @@ -22,6 +22,7 @@ #ifndef _GC_COMMON_H_ #define _GC_COMMON_H_ +#include "cxxlog.h" #include "port_vmem.h" #include "platform_lowlevel.h" @@ -37,10 +38,13 @@ #include "../gen/gc_for_barrier.h" +#define GC_GEN_STATS #define null 0 #define KB (1<<10) #define MB (1<<20) +/*used for print size info in verbose system*/ +#define verbose_print_size(size) (((size)/MB!=0)?(size)/MB:(((size)/KB!=0)?(size)/KB:(size)))<<(((size)/MB!=0)?"MB":(((size)/KB!=0)?"KB":"B")) #define BITS_PER_BYTE 8 #define BYTES_PER_WORD (sizeof(POINTER_SIZE_INT)) @@ -73,7 +77,7 @@ #define USE_32BITS_HASHCODE -//#define ONLY_SSPACE_IN_HEAP +//#define USE_MARK_SWEEP_GC typedef void (*TaskType)(void*); @@ -104,7 +108,8 @@ MAJOR_COLLECTION = 0x2, FALLBACK_COLLECTION = 0x4, EXTEND_COLLECTION = 0x8, - UNIQUE_SWEEP_COLLECTION = 0x10 + MARK_SWEEP_GC = 0x10, + SWEEP_COMPACT_GC = 0x20 }; extern Boolean IS_FALLBACK_COMPACTION; /* only for mark/fw bits debugging purpose */ @@ -394,6 +399,11 @@ Vector_Block* uncompressed_root_set; Space_Tuner* tuner; + + /* system info */ + unsigned int _system_alloc_unit; + unsigned int _machine_page_size_bytes; + unsigned int _num_processors; }GC; @@ -417,6 +427,8 @@ return gc->collect_kind & kind; } +inline unsigned int gc_get_processor_num(GC* gc) { return gc->_num_processors; } + void gc_parse_options(GC* gc); void gc_reclaim_heap(GC* gc, unsigned int gc_cause); Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 566495) +++ src/common/gc_for_vm.cpp (working copy) @@ -20,6 +20,7 @@ */ #include +#include "port_sysinfo.h" #include "vm_threads.h" #include "compressed_ref.h" @@ -41,11 +42,20 @@ Boolean gc_requires_barriers() { return p_global_gc->generate_barrier; } +static void gc_get_system_info(GC *gc) +{ + gc->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0]; + gc->_num_processors = port_CPUs_number(); + gc->_system_alloc_unit = vm_get_system_alloc_unit(); + SPACE_ALLOC_UNIT = max(gc->_system_alloc_unit, GC_BLOCK_SIZE_BYTES); +} + int gc_init() { + INFO2("gc.process", "GC: call GC init...\n"); assert(p_global_gc == NULL); -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC unsigned int gc_struct_size = sizeof(GC_Gen); #else unsigned int gc_struct_size = sizeof(GC_MS); @@ -58,10 +68,12 @@ gc_parse_options(gc); gc_tls_init(); - + + gc_get_system_info(gc); + gc_metadata_initialize(gc); /* root set and mark stack */ -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); #else gc_ms_initialize((GC_MS*)gc, min_heap_size_bytes, max_heap_size_bytes); @@ -78,14 +90,17 @@ mutator_need_block = FALSE; + INFO2("gc.process", "GC: end of GC init\n"); return JNI_OK; } void gc_wrapup() { + INFO2("gc.process", "GC: call GC wrapup ...."); GC* gc = p_global_gc; -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC + gc_gen_wrapup_verbose((GC_Gen*)gc); gc_gen_destruct((GC_Gen*)gc); #else gc_ms_destruct((GC_MS*)gc); @@ -104,6 +119,7 @@ STD_FREE(p_global_gc); p_global_gc = NULL; + INFO2("gc.process", "GC: end of GC wrapup\n"); } #ifdef COMPRESS_REFERENCE @@ -177,7 +193,7 @@ int64 gc_free_memory() { -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc); #else return (int64)gc_ms_free_memory_size((GC_MS*)p_global_gc); @@ -187,7 +203,7 @@ /* java heap size.*/ int64 gc_total_memory() { -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); #else return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc)); @@ -196,7 +212,7 @@ int64 gc_max_memory() { -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); #else return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc)); @@ -266,8 +282,8 @@ #else //USE_32BITS_HASHCODE int32 gc_get_hashcode(Managed_Object_Handle p_object) { -#ifdef ONLY_SSPACE_IN_HEAP - return (int32)p_object; +#ifdef USE_MARK_SWEEP_GC + return (int32)0;//p_object; #endif Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object; @@ -325,7 +341,7 @@ // data structures in not consistent for heap iteration if (!JVMTI_HEAP_ITERATION) return; -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC gc_gen_iterate_heap((GC_Gen *)p_global_gc); #else gc_ms_iterate_heap((GC_MS*)p_global_gc); Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 566495) +++ src/common/gc_metadata.cpp (working copy) @@ -38,7 +38,7 @@ /* FIXME:: since we use a list to arrange the root sets and tasks, we can dynamically alloc space for metadata. We just don't have this dynamic support at the moment. */ - + TRACE2("gc.process", "GC: GC metadata init ...\n"); unsigned int seg_size = GC_METADATA_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES; void* metadata = STD_MALLOC(seg_size); memset(metadata, 0, seg_size); @@ -86,6 +86,7 @@ void gc_metadata_destruct(GC* gc) { + TRACE2("gc.process", "GC: GC metadata destruct ..."); GC_Metadata* metadata = gc->metadata; sync_pool_destruct(metadata->free_task_pool); sync_pool_destruct(metadata->mark_task_pool); @@ -120,7 +121,7 @@ unsigned int num_alloced = metadata->num_alloc_segs; if(num_alloced == GC_METADATA_SEGMENT_NUM){ - printf("Run out GC metadata, please give it more segments!\n"); + DIE2("gc.verbose","Warning: Run out GC metadata, please give it more segments!"); exit(0); } @@ -178,26 +179,27 @@ iter = vector_block_iterator_advance(root_set,iter); Partial_Reveal_Object* p_obj = read_slot(p_ref); - if(IS_MOVE_COMPACT){ - /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/ - //if(obj_is_moved(p_obj)) - /*Fixme: los_boundery ruined the modularity of gc_common.h*/ - if(p_obj < los_boundary){ - write_slot(p_ref, obj_get_fw_in_oi(p_obj)); - }else{ - *p_ref = obj_get_fw_in_table(p_obj); - } + if(IS_MOVE_COMPACT){ + /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/ + //if(obj_is_moved(p_obj)) + /*Fixme: los_boundery ruined the modularity of gc_common.h*/ + if(p_obj < los_boundary){ + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); }else{ - if(obj_is_fw_in_oi(p_obj)){ - /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. - * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. - * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. - * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, - * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ - assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); - write_slot(p_ref, obj_get_fw_in_oi(p_obj)); - } + *p_ref = obj_get_fw_in_table(p_obj); } + }else{ + if(obj_is_fw_in_oi(p_obj)){ + /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. + * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. + * This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. + * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, + * since those which can be scanned in MOS & NOS must have been set fw bit in oi. + */ + assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc)); + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + } + } } root_set = pool_iterator_next(pool); } @@ -374,4 +376,3 @@ } - Index: src/common/large_pages.cpp =================================================================== --- src/common/large_pages.cpp (revision 566495) +++ src/common/large_pages.cpp (working copy) @@ -68,15 +68,15 @@ alloc_addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE); release_lock_memory_priv(); if(alloc_addr == NULL){ - printf("GC large_page: No required number of large pages found. Please reboot.....\n"); + WARN2("gc.base","GC large_page: No required number of large pages found. Please reboot.....\n"); return NULL; }else return alloc_addr; }else{ - printf("GC large_page: Check that you have permissions:\n"); - printf("GC large_page: Control Panel->Administrative Tools->Local Security Settings->->User Rights Assignment->Lock pages in memory;\n"); - printf("GC large_page: Start VM as soon after reboot as possible, because large pages become fragmented and unusable after a while;\n"); - printf("GC large_page: Heap size should be multiple of large page size.\n"); + WARN2("gc.base","GC large_page: Check that you have permissions:"); + WARN2("gc.base","GC large_page: Control Panel->Administrative Tools->Local Security Settings->->User Rights Assignment->Lock pages in memory;"); + WARN2("gc.base","GC large_page: Start VM as soon after reboot as possible, because large pages become fragmented and unusable after a while;"); + WARN2("gc.base","GC large_page: Heap size should be multiple of large page size."); return NULL; } } @@ -107,7 +107,7 @@ static void parse_proc_meminfo(size_t required_size){ FILE* f = fopen("/proc/meminfo", "r"); if (f == NULL){ - printf("GC large_page: Can't open /proc/meminfo \n"); + WARN2("gc.base","GC large_page: Can't open /proc/meminfo"); return; } @@ -124,18 +124,18 @@ if (buf) free(buf); if (proc_huge_pages_total == (size_t)-1){ - printf("GC large_page: Large pages are not supported by kernel.\n"); - printf("GC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled.\n"); + WARN2("gc.base","GC large_page: Large pages are not supported by kernel."); + WARN2("gc.base","GC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled."); } else if (proc_huge_pages_total == 0){ - printf("GC large_page: No large pages reserved, Use following command: echo num> /proc/sys/vm/nr_hugepages.\n"); - printf("GC large_page: Do it just after kernel boot before huge pages become fragmented.\n"); + WARN2("gc.base","GC large_page: No large pages reserved, Use following command: echo num> /proc/sys/vm/nr_hugepages."); + WARN2("gc.base","GC large_page: Do it just after kernel boot before huge pages become fragmented."); } else if (proc_huge_pages_free * proc_huge_page_size < required_size) { if (proc_huge_pages_total * proc_huge_page_size >= required_size) { - printf("GC large_page: Not enough free large pages, some of reserved space is already busy.\n"); + WARN2("gc.base","GC large_page: Not enough free large pages, some of reserved space is already busy."); } else { - printf("GC large_page: Not enough reserved large pages.\n"); + WARN2("gc.base","GC large_page: Not enough free large pages, some of reserved space is already busy."); } - printf("GC large_page: Large pages can be only allocated.\n"); + WARN2("gc.base","GC large_page: Large pages can be only allocated."); } } @@ -150,9 +150,9 @@ int fd = open(buf, O_CREAT | O_RDWR, 0700); if (fd == -1){ - printf("GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfsi.\n"); - printf("GC large_page: Check you have appropriate permissions to /mnt/huge.\n"); - printf("GC large_page: Use command line switch -Dgc.lp=/mnt/huge.\n"); + WARN2("gc.base","GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfsi."); + WARN2("gc.base","GC large_page: Check you have appropriate permissions to /mnt/huge."); + WARN2("gc.base","GC large_page: Use command line switch -Dgc.lp=/mnt/huge."); free(buf); return NULL; } @@ -160,7 +160,7 @@ void* addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (addr == MAP_FAILED){ - printf("GC large_page: Map failed.\n"); + WARN2("gc.base","GC large_page: Map failed."); close(fd); free(buf); return NULL; @@ -174,7 +174,7 @@ parse_proc_meminfo(size); void* alloc_addr = mmap_large_pages(size, hint); if(alloc_addr == NULL){ - printf("GC large_page: Large pages allocation failed.\n"); + WARN2("gc.base","GC large_page: Large pages allocation failed."); return NULL; } return alloc_addr; Index: src/common/mark_scan_pool.cpp =================================================================== --- src/common/mark_scan_pool.cpp (revision 566495) +++ src/common/mark_scan_pool.cpp (working copy) @@ -24,6 +24,9 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { Partial_Reveal_Object *p_obj = read_slot(p_ref); @@ -31,6 +34,10 @@ if(obj_mark_in_vt(p_obj)) collector_tracestack_push(collector, p_obj); +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_obj_stats_major(stats); +#endif return; } @@ -110,6 +117,9 @@ { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int num_active_collectors = gc->num_active_collectors; @@ -137,8 +147,13 @@ and the second time the value is the ref slot is the old position as expected. This can be worked around if we want. */ - if(obj_mark_in_vt(p_obj)) + if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); +#ifdef GC_GEN_STATS + gc_gen_collector_update_rootset_ref_num(stats); + gc_gen_collector_update_marked_obj_stats_major(stats); +#endif + } } root_set = pool_iterator_next(metadata->gc_rootset_pool); @@ -192,5 +207,6 @@ void trace_obj_in_normal_marking(Collector *collector, void *p_obj) { + obj_mark_in_vt((Partial_Reveal_Object*)p_obj); trace_object(collector, (Partial_Reveal_Object *)p_obj); } Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 566495) +++ src/common/space_tuner.cpp (working copy) @@ -540,4 +540,3 @@ } - Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 566495) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -56,9 +56,9 @@ return !obj_is_marked_in_vt(p_obj); } -#ifdef ONLY_SSPACE_IN_HEAP +#ifdef USE_MARK_SWEEP_GC extern Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj); -static inline Boolean obj_is_dead_in_unique_sweep_gc(Partial_Reveal_Object * p_obj) +static inline Boolean obj_is_dead_in_mark_sweep_gc(Partial_Reveal_Object * p_obj) { return !obj_is_marked_in_table(p_obj); } @@ -68,8 +68,8 @@ { assert(p_obj); -#ifdef ONLY_SSPACE_IN_HEAP - return obj_is_dead_in_unique_sweep_gc(p_obj); +#ifdef USE_MARK_SWEEP_GC + return obj_is_dead_in_mark_sweep_gc(p_obj); #endif if(gc_match_kind(gc, MINOR_COLLECTION)){ @@ -91,7 +91,7 @@ { assert(!gc_obj_is_dead(gc, p_obj)); -#ifdef ONLY_SSPACE_IN_HEAP +#ifdef USE_MARK_SWEEP_GC Sspace *sspace = gc_ms_get_sspace((GC_MS*)gc); return sspace->move_object; #endif @@ -112,7 +112,7 @@ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ REF *p_ref = (REF*)iter; - Partial_Reveal_Object* p_obj = read_slot(p_ref); + Partial_Reveal_Object *p_obj = read_slot(p_ref); if(*p_ref && obj_need_move(gc, p_obj)) finref_repset_add_entry(gc, p_ref); } @@ -172,7 +172,7 @@ } gc_put_finalizable_objects(gc); - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) finref_add_repset_from_pool(gc, obj_with_fin_pool); } @@ -220,11 +220,10 @@ }else{ trace_object = trace_obj_in_normal_marking; } - obj_mark_in_vt(p_obj); } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){ trace_object = trace_obj_in_fallback_marking; } else { - assert(gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION)); + assert(gc_match_kind(gc, MARK_SWEEP_GC|SWEEP_COMPACT_GC)); p_ref_or_obj = p_obj; trace_object = trace_obj_in_ms_marking; } @@ -241,7 +240,7 @@ void *p_ref_or_obj = (void*)*iter; assert((gc_match_kind(gc, MINOR_COLLECTION | FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj) || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj) - || (gc_match_kind(gc, UNIQUE_SWEEP_COLLECTION) && p_ref_or_obj)); + || (gc_match_kind(gc, MARK_SWEEP_GC|SWEEP_COMPACT_GC) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */ @@ -255,7 +254,7 @@ break; /* force return */ } - task_block = pool_get_entry(metadata->mark_task_pool); + task_block = pool_get_entry(metadata->mark_task_pool); } task_block = (Vector_Block*)collector->trace_stack; @@ -275,9 +274,6 @@ DURING_RESURRECTION = TRUE; - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) - finref_reset_repset(gc); - pool_iterator_init(finalizable_obj_pool); Vector_Block *block = pool_iterator_next(finalizable_obj_pool); while(block){ @@ -287,13 +283,6 @@ Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); - /* In major & fallback collection we need record p_ref of the root dead obj to update it later. - * Because it is outside heap, we can't update in ref fixing. - * In minor collection p_ref of the root dead obj is automatically updated while tracing. - */ - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) - finref_repset_add_entry(gc, p_ref); - /* Perhaps obj has been resurrected by previous resurrections */ if(!gc_obj_is_dead(gc, p_obj)){ if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj)) @@ -312,9 +301,12 @@ block = pool_iterator_next(finalizable_obj_pool); } - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) - finref_put_repset(gc); - + /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later. + * Because it is outside heap, we can't update it in ref fixing. + * In minor collection p_ref of the root dead obj is automatically updated while tracing. + */ + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) + finref_add_repset_from_pool(gc, finalizable_obj_pool); metadata->pending_finalizers = TRUE; DURING_RESURRECTION = FALSE; @@ -324,7 +316,7 @@ static void identify_dead_refs(GC *gc, Pool *pool) { - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) finref_reset_repset(gc); pool_iterator_init(pool); Vector_Block *block = pool_iterator_next(pool); @@ -348,7 +340,7 @@ if(gc_match_kind(gc, MINOR_COLLECTION)){ assert(obj_is_fw_in_oi(p_referent)); write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); - } else { + } else if(!gc_match_kind(gc, MARK_SWEEP_GC)){ finref_repset_add_entry(gc, p_referent_field); } } @@ -361,7 +353,7 @@ block = pool_iterator_next(pool); } - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){ + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)){ finref_put_repset(gc); finref_add_repset_from_pool(gc, pool); } @@ -397,7 +389,7 @@ Finref_Metadata *metadata = gc->finref_metadata; Pool *phanref_pool = metadata->phanref_pool; - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) finref_reset_repset(gc); // collector_reset_repset(collector); pool_iterator_init(phanref_pool); @@ -422,7 +414,7 @@ if(gc_match_kind(gc, MINOR_COLLECTION)){ assert(obj_is_fw_in_oi(p_referent)); write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); - } else { + } else if(!gc_match_kind(gc, MARK_SWEEP_GC)){ finref_repset_add_entry(gc, p_referent_field); } *p_ref = (REF)NULL; @@ -440,7 +432,7 @@ block = pool_iterator_next(phanref_pool); } // collector_put_repset(collector); - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)){ + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)){ finref_put_repset(gc); finref_add_repset_from_pool(gc, phanref_pool); } @@ -709,16 +701,18 @@ { Finref_Metadata *metadata = gc->finref_metadata; - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) finref_reset_repset(gc); - update_referent_field_ignore_finref(gc, metadata->softref_pool); - update_referent_field_ignore_finref(gc, metadata->weakref_pool); - update_referent_field_ignore_finref(gc, metadata->phanref_pool); - if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION)) + if(!gc_match_kind(gc, MARK_SWEEP_GC)){ + update_referent_field_ignore_finref(gc, metadata->softref_pool); + update_referent_field_ignore_finref(gc, metadata->weakref_pool); + update_referent_field_ignore_finref(gc, metadata->phanref_pool); + } + if(gc_match_kind(gc, MAJOR_COLLECTION|FALLBACK_COLLECTION|SWEEP_COMPACT_GC)) finref_put_repset(gc); } -extern void* los_boundary; +extern void *los_boundary; /* Move compaction needs special treament when updating referent field */ static inline void move_compaction_update_ref(GC *gc, REF *p_ref) { @@ -728,8 +722,6 @@ * So if p_ref belongs to heap, it must be a referent field pointer. * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool. */ -// if(address_belongs_to_gc_heap(p_ref, gc) && !address_belongs_to_space(p_ref, gc_get_los((GC_Gen*)gc))){ -// && space_of_addr(gc, p_ref)->move_object //comment this out because all spaces are movable in major collection. if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){ unsigned int offset = get_gc_referent_offset(); Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset); @@ -737,14 +729,40 @@ p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset); } Partial_Reveal_Object *p_obj = read_slot(p_ref); - assert(space_of_addr(gc, (void*)p_obj)->move_object); -// if(obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))) + assert(space_of_addr(gc, p_obj)->move_object); + if(p_obj < los_boundary) - write_slot(p_ref , obj_get_fw_in_oi(p_obj)); + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); else *p_ref = obj_get_fw_in_table(p_obj); } +static inline void sweep_compaction_update_ref(GC *gc, REF *p_ref) +{ + /* There are only two kinds of p_ref being added into finref_repset_pool: + * 1. p_ref is in a vector block from one finref pool; + * 2. p_ref is a referent field. + * So if p_ref belongs to heap, it must be a referent field pointer. + * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool. + */ + if(address_belongs_to_gc_heap((void*)p_ref, gc)){ + unsigned int offset = get_gc_referent_offset(); + Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset); + if(obj_is_fw_in_oi(p_old_ref)){ + Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref); + p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset); + } + } + Partial_Reveal_Object *p_obj = read_slot(p_ref); + /* assert(obj_need_move(gc, p_obj)); + * This assertion is commented out because it assert(!obj_is_dead(gc, p_obj)). + * When gc_fix_rootset is invoked, mark bit and alloc bit have been flipped in Mark-Sweep, + * so this assertion will fail. + * But for sure p_obj here must be an one needing moving. + */ + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); +} + extern Boolean IS_MOVE_COMPACT; /* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */ @@ -765,12 +783,14 @@ p_ref = (REF*)iter; p_obj = read_slot(p_ref); - if(!IS_MOVE_COMPACT){ - assert(obj_is_marked_in_vt(p_obj)); - assert(obj_is_fw_in_oi(p_obj)); + if(IS_MOVE_COMPACT){ + move_compaction_update_ref(gc, p_ref); + } else if(gc_match_kind(gc, SWEEP_COMPACT_GC)){ + if(obj_is_fw_in_oi(p_obj)) + sweep_compaction_update_ref(gc, p_ref); + } else { + assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj))); write_slot(p_ref , obj_get_fw_in_oi(p_obj)); - } else { - move_compaction_update_ref(gc, p_ref); } } vector_block_clear(repset); @@ -799,11 +819,13 @@ p_obj = read_slot(p_ref); if(!IS_MOVE_COMPACT){ - assert(obj_is_marked_in_vt(p_obj)); - assert(obj_is_fw_in_oi(p_obj)); + move_compaction_update_ref(gc, p_ref); + } else if(gc_match_kind(gc, SWEEP_COMPACT_GC)){ + if(obj_is_fw_in_oi(p_obj)) + sweep_compaction_update_ref(gc, p_ref); + } else { + assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj))); write_slot(p_ref , obj_get_fw_in_oi(p_obj)); - } else { - move_compaction_update_ref(gc, p_ref); } } repset = pool_iterator_next(pool); Index: src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 566495) +++ src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -44,6 +44,7 @@ void gc_finref_metadata_initialize(GC *gc) { + TRACE2("gc.process", "GC: gc finref metadata init ... \n"); unsigned int seg_size = FINREF_METADATA_SEG_SIZE_BYTES + FINREF_METADATA_BLOCK_SIZE_BYTES; void *first_segment = STD_MALLOC(seg_size); memset(first_segment, 0, seg_size); @@ -82,6 +83,7 @@ void gc_finref_metadata_destruct(GC *gc) { + TRACE2("gc.process", "GC: GC finref metadata destruct ..."); Finref_Metadata *metadata = gc->finref_metadata; sync_pool_destruct(metadata->free_pool); Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 566495) +++ src/gen/gen.cpp (working copy) @@ -19,8 +19,6 @@ * @author Xiao-Feng Li, 2006/10/05 */ -#include "port_sysinfo.h" - #include "gen.h" #include "../finalizer_weakref/finalizer_weakref.h" #include "../verify/verify_live_heap.h" @@ -31,6 +29,9 @@ #include "../common/hashcode.h" #endif +#ifdef GC_GEN_STATS +#include "gen_stats.h" +#endif /* fspace size limit is not interesting. only for manual tuning purpose */ POINTER_SIZE_INT min_nos_size_bytes = 16 * MB; POINTER_SIZE_INT max_nos_size_bytes = 256 * MB; @@ -54,20 +55,13 @@ #define RESERVE_BOTTOM ((void*)0x1000000) -static void gc_gen_get_system_info(GC_Gen *gc_gen) -{ - gc_gen->_machine_page_size_bytes = (unsigned int)port_vmem_page_sizes()[0]; - gc_gen->_num_processors = port_CPUs_number(); - gc_gen->_system_alloc_unit = vm_get_system_alloc_unit(); - SPACE_ALLOC_UNIT = max(gc_gen->_system_alloc_unit, GC_BLOCK_SIZE_BYTES); -} - void* alloc_large_pages(size_t size, const char* hint); +void gc_gen_initial_verbose_info(GC_Gen *gc); void gc_gen_initialize(GC_Gen *gc_gen, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size) { - assert(gc_gen); - gc_gen_get_system_info(gc_gen); + TRACE2("gc.process", "GC: GC_Gen heap init ... \n"); + assert(gc_gen); max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT); min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT); @@ -130,8 +124,8 @@ assert((POINTER_SIZE_INT)nos_boundary%SPACE_ALLOC_UNIT == 0); nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size); if( nos_base != nos_boundary ){ - printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size); - printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n"); + DIE2("gc.base","Warning: Static NOS mapping: Can't reserve memory at address"<= nos_base){ los_mos_base = (void*)((POINTER_SIZE_INT)los_mos_base - SPACE_ALLOC_UNIT); if(los_mos_base < RESERVE_BOTTOM){ - printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size); + DIE2("gc.base","Static NOS mapping: Can't reserve memory at address"<mos) + space_committed_size((Space*)gc_gen->los); +#ifdef GC_GEN_STATS + gc_gen_stats_initialize(gc_gen); +#endif + + gc_gen_initial_verbose_info(gc_gen); return; } void gc_gen_destruct(GC_Gen *gc_gen) { + TRACE2("gc.process", "GC: GC_Gen heap destruct ......"); Space* nos = (Space*)gc_gen->nos; Space* mos = (Space*)gc_gen->mos; Space* los = (Space*)gc_gen->los; @@ -262,6 +262,9 @@ vm_unmap_mem(nos_start, nos_size); vm_unmap_mem(mos_start, mos_size); vm_unmap_mem(los_start, los_size); +#ifdef GC_GEN_STATS + gc_gen_stats_destruct(gc_gen); +#endif return; } @@ -280,8 +283,6 @@ void* los_try_alloc(POINTER_SIZE_INT size, GC* gc){ return lspace_try_alloc((Lspace*)((GC_Gen*)gc)->los, size); } -unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;} - Boolean FORCE_FULL_COMPACT = FALSE; void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) @@ -294,8 +295,8 @@ else gc->collect_kind = MINOR_COLLECTION; -#ifdef ONLY_SSPACE_IN_HEAP - gc->collect_kind = UNIQUE_SWEEP_COLLECTION; +#ifdef USE_MARK_SWEEP_GC + gc->collect_kind = MARK_SWEEP_GC; #endif return; } @@ -318,7 +319,7 @@ gc_enable_gen_mode(); }else{ - printf("\nGC algorithm setting incorrect. Will use default value.\n"); + WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n"); } } @@ -336,7 +337,7 @@ MAJOR_ALGO= MAJOR_COMPACT_MOVE; }else{ - printf("\nGC algorithm setting incorrect. Will use default algorithm.\n"); + WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n"); } } @@ -372,8 +373,9 @@ Mspace* mos = gc->mos; Fspace* nos = gc->nos; Lspace* los = gc->los; - /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously. - *Or, we must adjust heap size */ + /* We can not tolerate gc->survive_ratio be greater than threshold twice continuously. + * Or, we must adjust heap size + */ static unsigned int tolerate = 0; POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size; @@ -415,8 +417,41 @@ #else assert(!large_page_hint); POINTER_SIZE_INT old_nos_size = nos->committed_heap_size; + INFO2("gc.process", "GC: gc_gen heap extension after GC["<num_collections<<"] ..."); blocked_space_extend(nos, (unsigned int)adjust_size); - nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size; + INFO2("gc.space","GC: heap extension: from "<survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size; + if( NOS_PARTIAL_FORWARD ) + object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks >>1 ]; + else + object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks]; + } + else { + /*if user specified NOS_SIZE, adjust mos and nos size to keep nos size as an constant*/ + old_nos_size = nos->committed_heap_size; + nos_boundary = (void*)((POINTER_SIZE_INT)nos->heap_end - NOS_SIZE); + nos->committed_heap_size = NOS_SIZE; + nos->heap_start = nos_boundary; + nos->blocks = (Block*)nos_boundary; + nos->first_block_idx = ((Block_Header*)nos_boundary)->block_idx; + nos->num_managed_blocks = (unsigned int)(NOS_SIZE >> GC_BLOCK_SHIFT_COUNT); + nos->num_total_blocks = nos->num_managed_blocks; + nos->free_block_idx = nos->first_block_idx; + if( NOS_PARTIAL_FORWARD ) + object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks >>1 ]; + else + object_forwarding_boundary = (void*)&nos->blocks[nos->num_managed_blocks]; + + mos->heap_end = nos_boundary; + mos->committed_heap_size += old_nos_size-NOS_SIZE; + mos->num_managed_blocks = (unsigned int)(mos->committed_heap_size >> GC_BLOCK_SHIFT_COUNT); + mos->num_total_blocks = mos->num_managed_blocks; + mos->ceiling_block_idx = ((Block_Header*)nos_boundary)->block_idx - 1; + + mos->survive_ratio = (float) mos->last_surviving_size / (float)mos->committed_heap_size; + } + /*Fixme: gc fields should be modified according to nos extend*/ gc->committed_heap_size += adjust_size; //debug_adjust @@ -430,8 +465,11 @@ Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ static unsigned int mspace_num_used_blocks_before_minor; static unsigned int mspace_num_used_blocks_after_minor; +void gc_gen_stats_verbose(GC_Gen* gc); void gc_gen_reclaim_heap(GC_Gen* gc) { + INFO2("gc.process", "GC: start GC_Gen ...\n"); + if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); Blocked_Space* fspace = (Blocked_Space*)gc->nos; @@ -440,24 +478,44 @@ fspace->num_used_blocks = fspace->free_block_idx - fspace->first_block_idx; gc->collect_result = TRUE; +#ifdef GC_GEN_STATS + gc_gen_stats_reset_before_collection((GC_Gen*)gc); + gc_gen_collector_stats_reset((GC_Gen*)gc); +#endif if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + + INFO2("gc.process", "GC: start minor collection ...\n"); + /* FIXME:: move_object is only useful for nongen_slide_copy */ gc->mos->move_object = 0; /* This is for compute mspace->last_alloced_size */ mspace_num_used_blocks_before_minor = mspace->free_block_idx - mspace->first_block_idx; fspace_collection(gc->nos); + +#ifdef GC_GEN_STATS + gc_gen_collector_stats_verbose_minor_collection(gc); +#endif mspace_num_used_blocks_after_minor = mspace->free_block_idx - mspace->first_block_idx; assert( mspace_num_used_blocks_before_minor <= mspace_num_used_blocks_after_minor ); mspace->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mspace_num_used_blocks_after_minor - mspace_num_used_blocks_before_minor ); /*If the current minor collection failed, i.e. there happens a fallback, we should not do the minor sweep of LOS*/ - if(gc->collect_result != FALSE && !gc_is_gen_mode()) + if(gc->collect_result != FALSE && !gc_is_gen_mode()) { +#ifdef GC_GEN_STATS + gc->stats->num_minor_collections++; +#endif lspace_collection(gc->los); + } + gc->mos->move_object = 1; - gc->mos->move_object = 1; + INFO2("gc.process", "GC: end of minor collection ...\n"); + }else{ + + INFO2("gc.process", "GC: start major collection ...\n"); + /* process mos and nos together in one compaction */ gc->los->move_object = 1; @@ -465,9 +523,19 @@ lspace_collection(gc->los); gc->los->move_object = 0; + +#ifdef GC_GEN_STATS + gc->stats->num_major_collections++; + gc_gen_collector_stats_verbose_major_collection(gc); +#endif + + INFO2("gc.process", "GC: end of major collection ...\n"); } if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + + INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ..."); + if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); /* runout mspace in minor collection */ @@ -477,6 +545,11 @@ IS_FALLBACK_COMPACTION = TRUE; gc_reset_collect_result((GC*)gc); gc->collect_kind = FALLBACK_COLLECTION; +#ifdef GC_GEN_STATS + /*since stats is changed in minor collection, we need to reset stats before fallback collection*/ + gc_gen_stats_reset_before_collection((GC_Gen*)gc); + gc_gen_collector_stats_reset((GC_Gen*)gc); +#endif if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc); @@ -486,10 +559,18 @@ gc->los->move_object = 0; IS_FALLBACK_COMPACTION = FALSE; + +#ifdef GC_GEN_STATS + gc->stats->num_fallback_collections++; + gc_gen_collector_stats_verbose_major_collection(gc); +#endif + + INFO2("gc.process", "GC: end of fallback collection ..."); + } if( gc->collect_result == FALSE){ - printf("Out of Memory!\n"); + DIE2("gc.collect", "Out of Memory!\n"); assert(0); exit(0); } @@ -500,7 +581,15 @@ #ifdef COMPRESS_REFERENCE gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); #endif + assert(!gc->los->move_object); +#ifdef GC_GEN_STATS + gc_gen_stats_update_after_collection((GC_Gen*)gc); + gc_gen_stats_verbose(gc); +#endif + + INFO2("gc.process", "GC: end of GC_Gen\n"); + return; } @@ -602,3 +691,95 @@ } } } + +void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time) +{ + +#ifdef GC_GEN_STATS + GC_Gen_Stats* stats = ((GC_Gen*)gc)->stats; + stats->total_mutator_time += mutator_time; + stats->total_pause_time += pause_time; +#endif + + INFO2("gc.collect","GC: GC_Gen Collection Info:" + <<"\nGC: GC id: GC["<num_collections<<"]" + <<"\nGC: current collection num: "<num_collections); + + switch(gc->collect_kind) { + case MINOR_COLLECTION: + INFO2("gc.collect","GC: collection type: minor"); +#ifdef GC_GEN_STATS + INFO2("gc.collect","GC: current minor collection num: "<stats->num_minor_collections); +#endif + break; + case MAJOR_COLLECTION: + INFO2("gc.collect","GC: collection type: major"); +#ifdef GC_GEN_STATS + INFO2("gc.collect","GC: current major collection num: "<stats->num_major_collections); +#endif + break; + case FALLBACK_COLLECTION: + INFO2("gc.collect","GC: collection type: fallback"); +#ifdef GC_GEN_STATS + INFO2("gc.collect","GC: current fallback collection num: "<stats->num_fallback_collections); +#endif + } + + switch(gc->cause) { + case GC_CAUSE_NOS_IS_FULL: + INFO2("gc.collect","GC: collection cause: nursery object space is full"); + break; + case GC_CAUSE_LOS_IS_FULL: + INFO2("gc.collect","GC: collection cause: large object space is full"); + break; + case GC_CAUSE_RUNTIME_FORCE_GC: + INFO2("gc.collect","GC: collection cause: runtime force gc"); + } + + INFO2("gc.collect","GC: pause time: "<<(pause_time>>10)<<"ms" + <<"\nGC: mutator time from last collection: "<<(mutator_time>>10)<<"ms\n"); + +} + +void gc_gen_space_verbose_info(GC_Gen *gc) +{ + INFO2("gc.space","GC: Heap info after GC["<num_collections<<"]:" + <<"\nGC: Heap size: "<committed_heap_size)<<", free size:"<los->committed_heap_size)<<", free size:"<los)) + <<"\nGC: MOS size: "<mos->committed_heap_size)<<", free size:"<mos)) + <<"\nGC: NOS size: "<nos->committed_heap_size)<<", free size:"<nos))<<"\n"); +} + +inline void gc_gen_initial_verbose_info(GC_Gen *gc) +{ + INFO2("gc.base","GC_Gen initial:" + <<"\nmax heap size: "<committed_heap_size) + <<"\ninitial num collectors: "<num_collectors + <<"\ninitial nos size: "<nos->committed_heap_size) + <<"\nnos collection algo: " + <<((gc->nos->collect_algorithm==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward") + <<"\ninitial mos size: "<mos->committed_heap_size) + <<"\nmos collection algo: " + <<((gc->mos->collect_algorithm==MAJOR_COMPACT_MOVE)?"move compact":"slide compact") + <<"\ninitial los size: "<los->committed_heap_size)<<"\n"); +} + +void gc_gen_wrapup_verbose(GC_Gen* gc) +{ +#ifdef GC_GEN_STATS + GC_Gen_Stats* stats = gc->stats; + + INFO2("gc.base", "GC: All Collection info: " + <<"\nGC: total nos alloc obj size: "<total_size_nos_alloc) + <<"\nGC: total los alloc obj num: "<obj_num_los_alloc + <<"\nGC: total nos alloc obj size:"<total_size_los_alloc) + <<"\nGC: total collection num: "<num_collections + <<"\nGC: minor collection num: "<num_minor_collections + <<"\nGC: major collection num: "<num_major_collections + <<"\nGC: total collection time: "<total_pause_time + <<"\nGC: total appliction execution time: "<total_mutator_time<<"\n"); +#endif +} + Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 566495) +++ src/gen/gen.h (working copy) @@ -30,6 +30,10 @@ #include "../los/lspace.h" #include "../finalizer_weakref/finalizer_weakref_metadata.h" +#ifdef GC_GEN_STATS +struct GC_Gen_Stats; +#endif + enum Write_Barrier_Kind{ WRITE_BARRIER_NIL, WRITE_BARRIER_SLOT, @@ -86,7 +90,12 @@ Vector_Block* uncompressed_root_set; //For_LOS_extend - Space_Tuner* tuner; + Space_Tuner* tuner; + + /* system info */ + unsigned int _system_alloc_unit; + unsigned int _machine_page_size_bytes; + unsigned int _num_processors; /* END of GC --> */ Block* blocks; @@ -97,18 +106,21 @@ Boolean force_major_collect; Gen_Mode_Adaptor* gen_mode_adaptor; Boolean force_gen_mode; - - /* system info */ - unsigned int _system_alloc_unit; - unsigned int _machine_page_size_bytes; - unsigned int _num_processors; - + +#ifdef GC_GEN_STATS + GC_Gen_Stats* stats; /*used to record stats when collection*/ +#endif + } GC_Gen; ////////////////////////////////////////////////////////////////////////////////////////// void gc_gen_initialize(GC_Gen *gc, POINTER_SIZE_INT initial_heap_size, POINTER_SIZE_INT final_heap_size); void gc_gen_destruct(GC_Gen *gc); +void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time); +void gc_gen_space_verbose_info(GC_Gen *gc); +void gc_gen_initial_verbose_info(GC_Gen *gc); +void gc_gen_wrapup_verbose(GC_Gen* gc); inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc) { return space_free_memory_size((Blocked_Space*)gc->nos) + @@ -161,8 +173,6 @@ void gc_set_mos(GC_Gen* gc, Space* mos); void gc_set_los(GC_Gen* gc, Space* los); -unsigned int gc_get_processor_num(GC_Gen* gc); - void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo); void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause); @@ -184,6 +194,3 @@ extern Boolean GEN_NONGEN_SWITCH ; #endif /* ifndef _GC_GEN_H_ */ - - - Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 566495) +++ src/gen/gen_adapt.cpp (working copy) @@ -369,9 +369,22 @@ POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA ) - if( new_nos_size == curr_nos_size ) + if( new_nos_size == curr_nos_size ){ return; - + }else if ( new_nos_size >= curr_nos_size ){ + INFO2("gc.process", "GC: gc_gen space adjustment after GC["<num_collections<<"] ..."); + POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size; + INFO2("gc.space", "GC: Space Adapt: nos ---> mos (" + <num_collections<<"] ..."); + POINTER_SIZE_INT adapt_size = curr_nos_size - new_nos_size; + INFO2("gc.space", "GC: Space Adapt: mos ---> nos (" + <heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size; @@ -384,6 +397,10 @@ fspace->num_total_blocks = fspace->num_managed_blocks; fspace->first_block_idx = ((Block_Header*)nos_boundary)->block_idx; fspace->free_block_idx = fspace->first_block_idx; + if( NOS_PARTIAL_FORWARD ) + object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1]; + else + object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks]; mspace->heap_end = nos_boundary; mspace->committed_heap_size = new_mos_size; @@ -423,9 +440,22 @@ POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA ) - if( new_nos_size == curr_nos_size) + if( new_nos_size == curr_nos_size ){ return; - + }else if ( new_nos_size >= curr_nos_size ){ + INFO2("gc.process", "GC: gc_gen space adjustment after GC["<num_collections<<"] ...\n"); + POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size; + INFO2("gc.space", "GC: Space Adapt: mos ---> nos (" + <num_collections<<"] ...\n"); + POINTER_SIZE_INT adapt_size = curr_nos_size - new_nos_size; + INFO2("gc.space", "GC: Space Adapt: nos ---> mos (" + <is_los_collected = false; + + gc->stats = stats; +} + +void gc_gen_stats_destruct(GC_Gen* gc) +{ + STD_FREE(gc->stats); +} + +void gc_gen_stats_reset_before_collection(GC_Gen* gc) +{ + GC_Gen_Stats* stats = gc->stats; + + if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + stats->nos_surviving_obj_num_minor = 0; + stats->nos_surviving_obj_size_minor = 0; + stats->los_suviving_obj_num = 0; + stats->los_suviving_obj_size = 0; + stats->is_los_collected = false; + }else{ + stats->nos_mos_suviving_obj_num_major = 0; + stats->nos_mos_suviving_obj_size_major = 0; + stats->los_suviving_obj_num = 0; + stats->los_suviving_obj_size = 0; + stats->is_los_collected = false; + } +} + +void gc_gen_stats_update_after_collection(GC_Gen* gc) +{ + Collector** collector = gc->collectors; + GC_Gen_Stats* gc_gen_stats = gc->stats; + GC_Gen_Collector_Stats* collector_stats; + Boolean is_los_collected = gc_gen_stats->is_los_collected; + + if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) { + + for (unsigned int i=0; inum_active_collectors; i++) { + collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats; + gc_gen_stats->nos_surviving_obj_num_minor += collector_stats->nos_obj_num_moved_minor; + gc_gen_stats->nos_surviving_obj_size_minor += collector_stats->nos_obj_size_moved_minor; + } + + gc_gen_stats->nos_surviving_ration_minor = ((float)gc_gen_stats->nos_surviving_obj_size_minor)/gc->nos->committed_heap_size; + + }else{ + + for (unsigned int i=0; i < gc->num_active_collectors; i++) { + collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats; + gc_gen_stats->nos_mos_suviving_obj_num_major += collector_stats->nos_mos_obj_num_moved_major; + gc_gen_stats->nos_mos_suviving_obj_size_major += collector_stats->nos_mos_obj_size_moved_major; + + /*need to accumulate the los related info if los is collected when major*/ + if(is_los_collected) { + gc_gen_stats->los_suviving_obj_num += collector_stats->los_obj_num_moved_major; + gc_gen_stats->los_suviving_obj_size += collector_stats->los_obj_size_moved_major; + } + } + + gc_gen_stats->nos_mos_suviving_ratio_major = ((float)gc_gen_stats->nos_mos_suviving_obj_size_major)/(gc->nos->committed_heap_size+gc->mos->committed_heap_size); + } + + if (is_los_collected) { + gc_gen_stats->los_surviving_ration = ((float)gc_gen_stats->los_suviving_obj_size)/gc->los->committed_heap_size; + } +} + +void gc_gen_stats_verbose(GC_Gen* gc) +{ + GC_Gen_Stats* stats = gc->stats; + Boolean is_los_collected = stats->is_los_collected; + if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){ + TRACE2("gc.space", "GC: Fspace Collection stats: " + <<"\nGC: collection algo: "<<((stats->nos_collection_algo_minor==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward") + <<"\nGC: num surviving objs: "<nos_surviving_obj_num_minor + <<"\nGC: size surviving objs: "<nos_surviving_obj_size_minor) + <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ration_minor*100)<<"%\n"); + }else{ + TRACE2("gc.space", "GC: Mspace Collection stats: " + <<"\nGC: collection algo: "<<((stats->nos_mos_collection_algo_major==MAJOR_COMPACT_SLIDE)?"slide compact":"move compact") + <<"\nGC: num surviving objs: "<nos_mos_suviving_obj_num_major + <<"\nGC: size surviving objs: "<nos_mos_suviving_obj_size_major) + <<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n"); + } + + if(stats->is_los_collected) { /*if los is collected, need to output los related info*/ + TRACE2("gc.space", "GC: Lspace Collection stats: " + <<"\nGC: collection algo: "<<((stats->los_collection_algo==MAJOR_COMPACT_SLIDE)?"slide compact":"mark sweep") + <<"\nGC: num surviving objs: "<los_suviving_obj_num + <<"\nGC: size surviving objs: "<los_suviving_obj_size) + <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ration*100)<<"%\n"); + } + +} + +void gc_gen_collector_stats_initialize(Collector* collector) +{ + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)STD_MALLOC(sizeof(GC_Gen_Collector_Stats)); + memset(stats, 0, sizeof(GC_Gen_Collector_Stats)); + collector->stats = (void*)stats; +} + + +void gc_gen_collector_stats_destruct(Collector* collector) +{ + STD_FREE(collector->stats); +} + +void gc_gen_collector_stats_reset(GC_Gen* gc) +{ + Collector** collector = gc->collectors; + GC_Gen_Collector_Stats* stats; + for (unsigned int i=0; inum_active_collectors; i++){ + stats = (GC_Gen_Collector_Stats*)collector[i]->stats; + memset(stats, 0, sizeof(GC_Gen_Collector_Stats)); + } +} + + +void gc_gen_collector_stats_verbose_minor_collection(GC_Gen* gc) +{ + Collector** collector = gc->collectors; + GC_Gen_Collector_Stats* stats; + + /*variable used to accumulate each collector's stats when minor collection*/ + unsigned int total_process_rootset_ref = 0; + unsigned int total_mark_nos_obj_num = 0; + unsigned int total_mark_non_nos_obj_num = 0; + unsigned int total_forward_obj_num = 0; + POINTER_SIZE_INT total_forward_obj_size = 0; + + for (unsigned int i=0; inum_active_collectors; i++){ + stats = (GC_Gen_Collector_Stats*)collector[i]->stats; + + total_process_rootset_ref += stats->process_rootset_ref_num; + total_mark_nos_obj_num += stats->nos_obj_num_marked_minor; + total_mark_non_nos_obj_num += stats->nonnos_obj_num_marked_minor; + total_forward_obj_num += stats->nos_obj_num_moved_minor; + total_forward_obj_size += stats->nos_obj_size_moved_minor; + + /*output each collector's stats*/ + TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:" + <<"\nGC: process rootset ref num: "<process_rootset_ref_num + <<"\nGC: mark nos obj num: "<nos_obj_num_marked_minor + <<"\nGC: mark nonnos obj num: "<nonnos_obj_num_marked_minor + <<" \nGC: forword obj num: "<nos_obj_num_moved_minor + <<" \nGC: forward obj size: "<nos_obj_size_moved_minor)<<"\n"); + } + + /*output accumulated info for all collectors*/ + TRACE2("gc.collect", "GC: Total Collector Stats when collection: " + <<"\nGC: process rootset ref num: "<collectors; + GC_Gen_Collector_Stats* stats; + + Boolean is_los_collected = gc->stats->is_los_collected; + + /*variable used to accumulate each collector's stats when major collection*/ + unsigned int total_process_rootset_ref = 0; + unsigned int total_mark_heap_live_obj_num = 0; + unsigned int total_move_mos_nos_live_obj_num = 0; + POINTER_SIZE_INT total_move_mos_nos_live_obj_size = 0; + unsigned int total_move_los_live_obj_num = 0; + POINTER_SIZE_INT total_move_los_live_obj_size = 0; + + for (unsigned int i=0; inum_active_collectors; i++){ + stats = (GC_Gen_Collector_Stats*)collector[i]->stats; + + total_process_rootset_ref = stats->process_rootset_ref_num; + total_mark_heap_live_obj_num = stats->num_obj_marked_major; + total_move_mos_nos_live_obj_num = stats->nos_mos_obj_num_moved_major; + total_move_mos_nos_live_obj_size = stats->nos_mos_obj_size_moved_major; + if (is_los_collected){/*if los is collected when major collection happened, need to accumulate los related info*/ + total_move_los_live_obj_num = stats->los_obj_num_moved_major; + total_move_los_live_obj_size = stats->los_obj_size_moved_major; + } + if(is_los_collected){ + TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:" + <<"\nGC: process rootset ref num: "<process_rootset_ref_num + <<"\nGC: mark obj num: "<num_obj_marked_major + <<"\nGC: move mos and nos obj num: "<nos_mos_obj_num_moved_major + <<"\nGC: move obj size: "<nos_mos_obj_size_moved_major) + <<"\nGC: move los obj num: "<los_obj_num_moved_major + <<"\nGC: move obj size: "<los_obj_size_moved_major)<<"\n"); + }else{ + TRACE2("gc.collect", "GC: Collector["<<((POINTER_SIZE_INT)collector[i]->thread_handle)<<"] stats when collection:" + <<"\nGC: process rootset ref num: "<process_rootset_ref_num + <<"\nGC: mark obj num: "<num_obj_marked_major + <<"\nGC: move obj num: "<nos_mos_obj_num_moved_major + <<"\nGC: move obj size: "<nos_mos_obj_size_moved_major)<<"\n"); + } + } + + + if(is_los_collected){/*if los is collected when major collection happened, need to output los related collector info*/ + TRACE2("gc.collect", "GC: Total Collector Stats when collection: " + <<"\nGC: process rootset ref num: "<stats->nos_collection_algo_minor = algo; +} + +inline void gc_gen_stats_set_mos_algo(GC_Gen* gc, int algo) +{ + gc->stats->nos_mos_collection_algo_major = algo; +} + +inline void gc_gen_stats_set_los_algo(GC_Gen* gc, int algo) +{ + gc->stats->los_collection_algo = algo; +} + +inline void gc_gen_stats_set_los_collected_flag(GC_Gen* gc, Boolean flag) +{ + gc->stats->is_los_collected = flag; +} + +inline void gc_gen_update_nos_alloc_obj_stats(GC_Gen_Stats* stats, POINTER_SIZE_INT size) +{ + stats->total_size_nos_alloc += size; +} + +inline void gc_gen_update_los_alloc_obj_stats(GC_Gen_Stats* stats, POINTER_SIZE_INT size) +{ + stats->obj_num_nos_alloc++; + stats->total_size_nos_alloc += size; +} + + +void gc_gen_stats_initialize(GC_Gen* gc); +void gc_gen_stats_destruct(GC_Gen* gc); +void gc_gen_stats_reset_before_collection(GC_Gen* gc); +void gc_gen_stats_update_after_collection(GC_Gen* gc); +void gc_gen_stats_verbose(GC_Gen* gc); + +typedef struct GC_Gen_Collector_Stats { + unsigned int process_rootset_ref_num; + + /*minor related info*/ + unsigned int nos_obj_num_moved_minor; + POINTER_SIZE_INT nos_obj_size_moved_minor; + unsigned int nos_obj_num_marked_minor; + unsigned int nonnos_obj_num_marked_minor; + + /*major related info*/ + unsigned int num_obj_marked_major; + unsigned int nos_mos_obj_num_moved_major; + POINTER_SIZE_INT nos_mos_obj_size_moved_major; + unsigned int los_obj_num_moved_major; + POINTER_SIZE_INT los_obj_size_moved_major; + +}GC_Gen_Collector_Stats; + +inline void gc_gen_collector_update_rootset_ref_num(GC_Gen_Collector_Stats* stats) +{ + stats->process_rootset_ref_num++; +} + +inline void gc_gen_collector_update_moved_nos_obj_stats_minor(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size) +{ + stats->nos_obj_num_moved_minor++; + stats->nos_obj_size_moved_minor += size; +} + +inline void gc_gen_collector_update_marked_nos_obj_stats_minor(GC_Gen_Collector_Stats* stats) +{ + stats->nos_obj_num_marked_minor++; +} + +inline void gc_gen_collector_update_marked_nonnos_obj_stats_minor(GC_Gen_Collector_Stats* stats) +{ + stats->nos_obj_num_marked_minor++; +} + +inline void gc_gen_collector_update_marked_obj_stats_major(GC_Gen_Collector_Stats* stats) +{ + stats->num_obj_marked_major++; +} + +inline void gc_gen_collector_update_moved_nos_mos_obj_stats_major(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size) +{ + stats->nos_mos_obj_num_moved_major++; + stats->nos_mos_obj_size_moved_major += size; +} + +inline void gc_gen_collector_update_moved_los_obj_stats_major(GC_Gen_Collector_Stats* stats, POINTER_SIZE_INT size) +{ + stats->los_obj_num_moved_major++; + stats->los_obj_size_moved_major += size; +} + +void gc_gen_collector_stats_reset(GC_Gen* gc); +void gc_gen_collector_stats_initialize(Collector* collector); +void gc_gen_collector_stats_destruct(Collector* collector); +void gc_gen_collector_stats_verbose_minor_collection(GC_Gen* gc); +void gc_gen_collector_stats_verbose_major_collection(GC_Gen* gc); + +#endif Index: src/los/lspace.cpp =================================================================== --- src/los/lspace.cpp (revision 566495) +++ src/los/lspace.cpp (working copy) @@ -62,6 +62,7 @@ lspace->num_collections = 0; lspace->time_collections = 0; lspace->survive_ratio = 0.5f; + lspace->last_alloced_size = 0; lspace->accumu_alloced_size = 0; lspace->total_alloced_size = 0; @@ -129,3 +130,4 @@ { return lspace->failure_size; } + Index: src/los/lspace.h =================================================================== --- src/los/lspace.h (revision 566495) +++ src/los/lspace.h (working copy) @@ -84,7 +84,10 @@ void lspace_reset_for_slide(Lspace* lspace); void lspace_collection(Lspace* lspace); -inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; } +inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace) +{ /* FIXME:: */ + return (lspace->committed_heap_size - (POINTER_SIZE_INT)lspace->last_surviving_size); +} inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; } inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index) Index: src/los/lspace_alloc_collect.cpp =================================================================== --- src/los/lspace_alloc_collect.cpp (revision 566495) +++ src/los/lspace_alloc_collect.cpp (working copy) @@ -23,6 +23,9 @@ #include "../gen/gen.h" #include "../common/space_tuner.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index) { Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index]; @@ -246,9 +249,15 @@ collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); #endif +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); unsigned int obj_size = vm_object_size(p_obj); +#ifdef GC_GEN_STATS + gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj)); +#endif assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end); #ifdef USE_32BITS_HASHCODE obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ; @@ -276,6 +285,7 @@ lspace->scompact_fa_start = dest_addr; lspace->scompact_fa_end= lspace->heap_end; + lspace->last_surviving_size = (POINTER_SIZE_INT)dest_addr - (POINTER_SIZE_INT)lspace->heap_start; return; } @@ -353,7 +363,6 @@ assert(tuner->kind == TRANS_NOTHING); assert(!tuner->tuning_size); new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; - if(new_fa_size == 0) break; Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); break; @@ -377,6 +386,12 @@ void lspace_sweep(Lspace* lspace) { + TRACE2("gc.process", "GC: lspace sweep algo start ...\n"); + +#ifdef GC_GEN_STATS + GC_Gen_Stats* stats = ((GC_Gen*)lspace->gc)->stats; + gc_gen_stats_set_los_collected_flag((GC_Gen*)lspace->gc, true); +#endif unsigned int mark_bit_idx = 0; POINTER_SIZE_INT cur_size = 0; void *cur_area_start, *cur_area_end; @@ -395,6 +410,10 @@ obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); +#ifdef GC_GEN_STATS + stats->los_suviving_obj_num++; + stats->los_suviving_obj_size += obj_size; +#endif } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); @@ -425,6 +444,10 @@ obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); +#ifdef GC_GEN_STATS + stats->los_suviving_obj_num++; + stats->los_suviving_obj_size += obj_size; +#endif } #ifdef USE_32BITS_HASHCODE @@ -448,6 +471,7 @@ mark_bit_idx = 0; assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx)); + TRACE2("gc.process", "GC: end of lspace sweep algo ...\n"); return; } Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 566495) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -24,6 +24,9 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif static void scan_slot(Collector* collector, REF *p_ref) { if( read_slot(p_ref) == NULL) return; @@ -48,6 +51,10 @@ if(!obj_mark_in_vt(p_obj)) return; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_obj_stats_major(stats); +#endif if( !object_has_ref_field(p_obj) ) return; /* scan array object */ @@ -102,6 +109,9 @@ { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif assert(gc_match_kind(gc, FALLBACK_COLLECTION)); @@ -126,6 +136,10 @@ collector_tracestack_push(collector, p_ref); +#ifdef GC_GEN_STATS + gc_gen_collector_update_rootset_ref_num(stats); +#endif + } root_set = pool_iterator_next(metadata->gc_rootset_pool); } @@ -211,4 +225,3 @@ } #endif - Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 566495) +++ src/mark_compact/mspace.cpp (working copy) @@ -172,6 +172,3 @@ { return mspace->expected_threshold_ratio; } - - - Index: src/mark_compact/mspace_alloc.cpp =================================================================== --- src/mark_compact/mspace_alloc.cpp (revision 566495) +++ src/mark_compact/mspace_alloc.cpp (working copy) @@ -75,4 +75,3 @@ return p_return; } - Index: src/mark_compact/mspace_collect_compact.cpp =================================================================== --- src/mark_compact/mspace_collect_compact.cpp (revision 566495) +++ src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -30,6 +30,9 @@ static volatile Block_Header* next_block_for_compact; static volatile Block_Header* next_block_for_target; +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif void mspace_update_info_after_space_tuning(Mspace* mspace) { Space_Tuner *tuner = mspace->gc->tuner; @@ -308,9 +311,25 @@ //For_LOS_extend if(gc->tuner->kind != TRANS_NOTHING){ + + TRACE2("gc.process", "GC: slide compact algo start ... \n"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); + +#ifdef GC_GEN_STATS + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE); +#endif + }else if (gc_match_kind(gc, FALLBACK_COLLECTION)){ + + TRACE2("gc.process", "GC: slide compact algo start ... \n"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); + +#ifdef GC_GEN_STATS + gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true); + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE); +#endif //IS_MOVE_COMPACT = TRUE; //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); //IS_MOVE_COMPACT = FALSE; @@ -318,17 +337,29 @@ switch(mspace->collect_algorithm){ case MAJOR_COMPACT_SLIDE: - collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "GC: slide compact algo start ... \n"); + collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); +#ifdef GC_GEN_STATS + gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true); + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE); +#endif break; case MAJOR_COMPACT_MOVE: IS_MOVE_COMPACT = TRUE; - collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); - IS_MOVE_COMPACT = FALSE; - break; - + + TRACE2("gc.process", "GC: move compact algo start ... \n"); + collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); + TRACE2("gc.process", "\nGC: end of move compact algo ... \n"); + IS_MOVE_COMPACT = FALSE; +#ifdef GC_GEN_STATS + gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE); +#endif + break; + default: - printf("\nThe speficied major collection algorithm doesn't exist!\n"); + DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!"); exit(0); break; } @@ -337,8 +368,3 @@ return; } - - - - - Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 566495) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -27,6 +27,10 @@ #include "../common/hashcode.h" #endif +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); Space* gc_get_mos(GC_Gen* gc); @@ -46,7 +50,11 @@ Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); hashcode_buf_init(new_hashcode_buf); #endif - + +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif + while( curr_block ){ void* start_pos; Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); @@ -66,7 +74,11 @@ assert( obj_is_marked_in_vt(p_obj)); /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */ obj_unmark_in_oi(p_obj); - + +#ifdef GC_GEN_STATS + gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj)); +#endif + #ifdef USE_32BITS_HASHCODE move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf); #endif @@ -168,6 +180,9 @@ /* Pass 1: ************************************************** mark all live objects in heap, and save all the slots that have references that are going to be repointed */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ..."); + unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(!gc_match_kind(gc, FALLBACK_COLLECTION)) @@ -194,9 +209,14 @@ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); - + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1"); + /* Pass 2: ************************************************** move object and set the forwarding offset table */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ..."); + atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); mspace_move_objects(collector, mspace); @@ -217,9 +237,14 @@ } while(num_moving_collectors != num_active_collectors + 1); if(!gc->collect_result) return; - + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2"); + /* Pass 3: ************************************************** update all references whose pointed objects were moved */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ..."); + old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); @@ -234,9 +259,13 @@ } while(num_fixing_collectors != num_active_collectors + 1); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3"); /* Pass 4: ************************************************** restore obj_info */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ..."); + atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); collector_restore_obj_info(collector); @@ -252,16 +281,21 @@ atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } - + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4"); + /* Leftover: ************************************************** */ - if( collector->thread_handle != 0 ) return; - + if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){ + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); + return; + } mspace_reset_after_compaction(mspace); fspace_reset_for_allocation(fspace); gc_set_pool_clear(gc->metadata->gc_rootset_pool); + TRACE2("gc.process", "GC: collector[0] finished"); return; } Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 566495) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -24,7 +24,11 @@ #include "../los/lspace.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); Space* gc_get_mos(GC_Gen* gc); @@ -51,7 +55,11 @@ #ifdef USE_32BITS_HASHCODE collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); #endif - + +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif + while( curr_block ){ void* start_pos; Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos); @@ -70,6 +78,11 @@ unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); + +#ifdef GC_GEN_STATS + gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size); +#endif + Obj_Info_Type obj_info = get_obj_info(p_obj); unsigned int obj_size_precompute = obj_size; @@ -417,6 +430,9 @@ *mark all live objects in heap, and save all the slots that *have references that are going to be repointed. */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ..."); + unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(gc_match_kind(gc, FALLBACK_COLLECTION)) @@ -454,6 +470,8 @@ } while(num_marking_collectors != num_active_collectors + 1); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1"); + /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); @@ -462,13 +480,16 @@ if(gc_match_kind(gc, FALLBACK_COLLECTION)) fallback_clear_fwd_obj_oi(collector); #endif - + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in mos and nos ..."); mspace_compute_object_target(collector, mspace); old_num = atomic_inc32(&num_repointing_collectors); /*last collector's world here*/ if( ++old_num == num_active_collectors ){ - if(lspace->move_object) lspace_compute_object_target(collector, lspace); + if(lspace->move_object) { + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: computer target addresses for to-be-moved objects in los ..."); + lspace_compute_object_target(collector, lspace); + } gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_repointing_collectors++; @@ -480,10 +501,12 @@ } while(num_repointing_collectors != num_active_collectors + 1); if(!gc->collect_result) return; + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2"); /* Pass 3: ************************************************** *update all references whose objects are to be moved */ + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ..."); old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); @@ -506,8 +529,13 @@ } while(num_fixing_collectors != num_active_collectors + 1); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3"); + /* Pass 4: ************************************************** move objects */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: move objects to target address ..."); + atomic_cas32( &num_moving_collectors, 0, num_active_collectors); mspace_sliding_compact(collector, mspace); @@ -515,8 +543,12 @@ atomic_inc32(&num_moving_collectors); while(num_moving_collectors != num_active_collectors); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4"); + /* Pass 5: ************************************************** restore obj_info */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass5: restore obj_info ..."); atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); collector_restore_obj_info(collector); @@ -541,8 +573,13 @@ atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } - if( collector->thread_handle != 0 ) + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 ..."); + + if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){ + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); return; + } /* Leftover: ************************************************** */ @@ -555,5 +592,6 @@ gc_set_pool_clear(gc->metadata->gc_rootset_pool); + TRACE2("gc.process", "GC: collector[0] finished"); return; } Index: src/mark_compact/space_tune_mark_scan.cpp =================================================================== --- src/mark_compact/space_tune_mark_scan.cpp (revision 566495) +++ src/mark_compact/space_tune_mark_scan.cpp (working copy) @@ -20,12 +20,20 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { Partial_Reveal_Object *p_obj = read_slot(p_ref); if(p_obj == NULL) return; if(obj_mark_in_vt(p_obj)){ +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_obj_stats_major(stats); +#endif collector_tracestack_push(collector, p_obj); unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE @@ -117,6 +125,9 @@ { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int num_active_collectors = gc->num_active_collectors; @@ -146,6 +157,11 @@ */ if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); + +#ifdef GC_GEN_STATS + gc_gen_collector_update_rootset_ref_num(stats); +#endif + unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; @@ -210,5 +226,6 @@ void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj) { + obj_mark_in_vt((Partial_Reveal_Object*)p_obj); trace_object(collector, (Partial_Reveal_Object *)p_obj); } Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 566495) +++ src/thread/collector.cpp (working copy) @@ -101,7 +101,7 @@ collector_reset_weakref_sets(collector); #endif -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC /*For LOS_Shrink and LOS_Extend*/ if(collector->gc->tuner->kind != TRANS_NOTHING){ collector->non_los_live_obj_size = 0; @@ -224,12 +224,37 @@ return; } +#include "../common/gc_common.h" +#ifdef GC_GEN_STATS + +#include "../gen/gen_stats.h" + +void collector_init_stats(Collector* collector) +{ +#ifndef USE_MARK_SWEEP_GC + gc_gen_collector_stats_initialize(collector); +#endif +} + +void collector_destruct_stats(Collector* collector) +{ +#ifndef USE_MARK_SWEEP_GC + gc_gen_collector_stats_destruct(collector); +#endif +} + +#endif + void collector_destruct(GC* gc) { + TRACE2("gc.process", "GC: GC collectors destruct ..."); for(unsigned int i=0; inum_collectors; i++) { Collector* collector = gc->collectors[i]; collector_terminate_thread(collector); +#ifdef GC_GEN_STATS + collector_destruct_stats(collector); +#endif STD_FREE(collector); } @@ -241,21 +266,12 @@ unsigned int NUM_COLLECTORS = 0; -struct GC_Gen; -unsigned int gc_get_processor_num(GC_Gen*); -#ifdef ONLY_SSPACE_IN_HEAP -struct GC_MS; -unsigned int gc_ms_get_processor_num(GC_MS *gc); -#endif - void collector_initialize(GC* gc) { + TRACE2("gc.process", "GC: GC collectors init ... \n"); + //FIXME:: -#ifndef ONLY_SSPACE_IN_HEAP - unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc); -#else - unsigned int num_processors = gc_ms_get_processor_num((GC_MS*)gc); -#endif + unsigned int num_processors = gc_get_processor_num(gc); unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); @@ -273,10 +289,14 @@ collector->gc = gc; collector_init_thread(collector); -#ifdef ONLY_SSPACE_IN_HEAP +#ifdef USE_MARK_SWEEP_GC collector_init_free_chunk_list(collector); #endif - + +#ifdef GC_GEN_STATS + collector_init_stats(collector); +#endif + gc->collectors[i] = collector; } Index: src/thread/collector.h =================================================================== --- src/thread/collector.h (revision 566495) +++ src/thread/collector.h (working copy) @@ -80,6 +80,12 @@ POINTER_SIZE_INT los_live_obj_size; POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM]; unsigned int result; + + /*for collect statistics info*/ +#ifdef GC_GEN_STATS + void* stats; +#endif + }Collector; Index: src/thread/collector_alloc.h =================================================================== --- src/thread/collector_alloc.h (revision 566495) +++ src/thread/collector_alloc.h (working copy) @@ -54,6 +54,7 @@ if(p_targ_obj == NULL){ /* failed to forward an obj */ collector->result = FALSE; + TRACE2("gc.collect", "failed to forward an obj, minor collection failed."); return NULL; } Index: src/thread/mutator.cpp =================================================================== --- src/thread/mutator.cpp (revision 566495) +++ src/thread/mutator.cpp (working copy) @@ -44,7 +44,7 @@ else mutator->obj_with_fin = NULL; -#ifdef ONLY_SSPACE_IN_HEAP +#ifdef USE_MARK_SWEEP_GC allocator_init_local_chunks((Allocator*)mutator); #endif @@ -68,7 +68,7 @@ alloc_context_reset((Allocator*)mutator); -#ifdef ONLY_SSPACE_IN_HEAP +#ifdef USE_MARK_SWEEP_GC allocactor_destruct_local_chunks((Allocator*)mutator); #endif @@ -105,6 +105,7 @@ void gc_reset_mutator_context(GC* gc) { + TRACE2("gc.process", "GC: reset mutator context ...\n"); Mutator *mutator = gc->mutator_list; while (mutator) { alloc_context_reset((Allocator*)mutator); @@ -123,5 +124,3 @@ return; } - - Index: src/thread/mutator_alloc.cpp =================================================================== --- src/thread/mutator_alloc.cpp (revision 566495) +++ src/thread/mutator_alloc.cpp (working copy) @@ -24,6 +24,10 @@ #include "../mark_sweep/gc_ms.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + //#define GC_OBJ_SIZE_STATISTIC #ifdef GC_OBJ_SIZE_STATISTIC @@ -73,11 +77,19 @@ gc_alloc_statistic_obj_distrubution(size); #endif -#ifndef ONLY_SSPACE_IN_HEAP - if ( size > GC_OBJ_SIZE_THRESHOLD ) +#ifndef USE_MARK_SWEEP_GC + if ( size > GC_OBJ_SIZE_THRESHOLD ){ p_obj = (Managed_Object_Handle)los_alloc(size, allocator); - else - p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); +#ifdef GC_GEN_STATS + if (p_obj != NULL){ + GC_Gen* gc = (GC_Gen*)allocator->gc; + gc->stats->obj_num_los_alloc++; + gc->stats->total_size_los_alloc += size; + } +#endif + }else{ + p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); + } #else p_obj = (Managed_Object_Handle)gc_ms_alloc(size, allocator); #endif @@ -116,7 +128,7 @@ /* Try to allocate an object from the current Thread Local Block */ Managed_Object_Handle p_obj; -#ifndef ONLY_SSPACE_IN_HEAP +#ifndef USE_MARK_SWEEP_GC p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator); #else p_obj = (Managed_Object_Handle)gc_ms_fast_alloc(size, allocator); Index: src/trace_forward/fspace.cpp =================================================================== --- src/trace_forward/fspace.cpp (revision 566495) +++ src/trace_forward/fspace.cpp (working copy) @@ -185,6 +185,11 @@ unsigned int mspace_free_block_idx; + +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + /* world is stopped when starting fspace_collection */ void fspace_collection(Fspace *fspace) { @@ -206,20 +211,30 @@ #ifdef MARK_BIT_FLIPPING - case MINOR_NONGEN_FORWARD_POOL: - collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace); - break; +case MINOR_NONGEN_FORWARD_POOL: + TRACE2("gc.process", "GC: nongen_forward_pool algo start ... \n"); + collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace); + TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n"); +#ifdef GC_GEN_STATS + gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); +#endif + break; #endif /*#ifdef MARK_BIT_FLIPPING */ - case MINOR_GEN_FORWARD_POOL: - collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace); - break; +case MINOR_GEN_FORWARD_POOL: + TRACE2("gc.process", "gen_forward_pool algo start ... \n"); + collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace); + TRACE2("gc.process", "\nGC: end of gen forward algo ... \n"); +#ifdef GC_GEN_STATS + gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL); +#endif + break; - default: - printf("\nSpecified minor collection algorithm doesn't exist!\n"); - exit(0); - break; +default: + DIE2("gc.collection","Specified minor collection algorithm doesn't exist!"); + exit(0); + break; } return; Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 566495) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -48,6 +48,9 @@ } /* FIXME:: the collection should be separated from the allocation */ +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif void* fspace_alloc(unsigned size, Allocator *allocator) { void* p_return = NULL; @@ -64,6 +67,11 @@ /* after holding lock, try if other thread collected already */ if ( !space_has_free_block((Blocked_Space*)fspace) ) { if(attempts < 2) { +#ifdef GC_GEN_STATS + GC_Gen* gc = (GC_Gen*)allocator->gc; + GC_Gen_Stats* stats = gc->stats; + gc_gen_update_nos_alloc_obj_stats(stats, fspace->committed_heap_size); +#endif gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); if(allocator->alloc_block){ vm_gc_unlock_enum(); @@ -85,6 +93,3 @@ return p_return; } - - - Index: src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_gen_forward_pool.cpp (revision 566495) +++ src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -26,6 +26,9 @@ #include "../finalizer_weakref/finalizer_weakref.h" #include "../common/compressed_ref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif static FORCE_INLINE Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) { assert(obj_belongs_to_nos(p_obj)); @@ -114,9 +117,14 @@ if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); - if(obj_mark_in_oi(p_obj)) + if(obj_mark_in_oi(p_obj)){ scan_object(collector, p_obj); - +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_nos_obj_stats_minor(stats); +#endif + + } return; } @@ -142,6 +150,13 @@ return; } /* otherwise, we successfully forwarded */ + +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_nos_obj_stats_minor(stats); + gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); +#endif + write_slot(p_ref, p_target_obj); @@ -170,6 +185,9 @@ { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32( &num_finished_collectors, 0, num_active_collectors); @@ -181,6 +199,8 @@ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to trace tasks. */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......"); while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ @@ -189,6 +209,11 @@ if(!*p_ref) continue; /* root ref cann't be NULL, but remset can be */ Partial_Reveal_Object *p_obj = read_slot(p_ref); + +#ifdef GC_GEN_STATS + gc_gen_collector_update_rootset_ref_num(stats); +#endif + if(obj_belongs_to_nos(p_obj)){ collector_tracestack_push(collector, p_ref); } @@ -201,6 +226,10 @@ /* second step: iterate over the trace tasks and forward objects */ collector->trace_stack = free_task_pool_get_entry(metadata); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack."); + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......"); + retry: Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); @@ -238,6 +267,7 @@ atomic_dec32(&num_finished_collectors); goto retry; } + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects."); /* now we are done, but each collector has a private stack that is empty */ trace_task = (Vector_Block*)collector->trace_stack; @@ -256,7 +286,10 @@ collector_trace_rootsets(collector); /* the rest work is not enough for parallelization, so let only one thread go */ - if( collector->thread_handle != 0 ) return; + if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) { + TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished"); + return; + } gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ @@ -281,6 +314,8 @@ fspace_reset_for_allocation(space); + TRACE2("gc.process", "GC: collector[0] finished"); + return; } Index: src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_nongen_forward_pool.cpp (revision 566495) +++ src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -26,6 +26,10 @@ #include "../common/gc_metadata.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef GC_GEN_STATS +#include "../gen/gen_stats.h" +#endif + #ifdef MARK_BIT_FLIPPING static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) @@ -87,8 +91,13 @@ Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!obj_belongs_to_nos(p_obj)){ - if(obj_mark_in_oi(p_obj)) + if(obj_mark_in_oi(p_obj)){ +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats); +#endif scan_object(collector, p_obj); + } return; } @@ -114,6 +123,12 @@ return; } /* otherwise, we successfully forwarded */ + +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; + gc_gen_collector_update_marked_nos_obj_stats_minor(stats); + gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); +#endif write_slot(p_ref, p_target_obj); scan_object(collector, p_target_obj); @@ -140,6 +155,9 @@ { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; +#ifdef GC_GEN_STATS + GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; +#endif unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32( &num_finished_collectors, 0, num_active_collectors); @@ -151,6 +169,8 @@ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to trace tasks. */ + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ..."); while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ @@ -160,6 +180,10 @@ assert(*p_ref); /* root ref cann't be NULL, but remset can be */ collector_tracestack_push(collector, p_ref); + +#ifdef GC_GEN_STATS + gc_gen_collector_update_rootset_ref_num(stats); +#endif } root_set = pool_iterator_next(metadata->gc_rootset_pool); } @@ -169,6 +193,10 @@ /* second step: iterate over the trace tasks and forward objects */ collector->trace_stack = free_task_pool_get_entry(metadata); + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack."); + + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ..."); + retry: Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); @@ -205,6 +233,8 @@ goto retry; } + TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects."); + /* now we are done, but each collector has a private stack that is empty */ trace_task = (Vector_Block*)collector->trace_stack; vector_stack_clear(trace_task); @@ -221,8 +251,10 @@ collector_trace_rootsets(collector); /* the rest work is not enough for parallelization, so let only one thread go */ - if( collector->thread_handle != 0 ) return; - + if( (POINTER_SIZE_INT)collector->thread_handle != 0 ) { + TRACE2("gc.process", "GC: collector["<<(POINTER_SIZE_INT)collector->thread_handle<<"] finished"); + return; + } gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ #ifndef BUILD_IN_REFERENT @@ -246,6 +278,8 @@ fspace_reset_for_allocation(space); + TRACE2("gc.process", "GC: collector[0] finished"); + return; } Index: src/utils/bit_ops.h =================================================================== --- src/utils/bit_ops.h (revision 566495) +++ src/utils/bit_ops.h (working copy) @@ -1,130 +1,130 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Ji, Qi, 2006/10/25 - */ - -#ifndef _BIT_OPS_H_ -#define _BIT_OPS_H_ - -#include "../common/gc_common.h" - -inline unsigned int word_get_first_set_lsb(POINTER_SIZE_INT target_word) -{ - assert(target_word != 0); - POINTER_SIZE_INT bit_offset = 0; - -#if defined(_IPF_) || defined(_WIN64) - while( ! (target_word & ((POINTER_SIZE_INT)1 << bit_offset)) ){ - bit_offset++; - } -#else /* !_IPF_ && !_WIN64 */ -#ifdef PLATFORM_POSIX /* linux X86 32/64 */ - __asm__ __volatile__( - "bsf %1,%0\n" - :"=r"(bit_offset) - :"m"(target_word) - ); -#else /* !PLATFORM_POSIX */ -#ifdef WIN32 - __asm{ - bsf eax, target_word - mov bit_offset, eax - } -#endif /* WIN32 */ -#endif /* !PLATFORM_POSIX */ -#endif /* !_IPF_ && !_WIN64 */ - - assert(bit_offset < BITS_PER_WORD); - return (unsigned int)bit_offset; - -} - -inline unsigned int words_get_next_set_lsb(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) -{ - unsigned int bit_offset; - - assert(start_idx < 128); - - unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; - unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; - - bit_offset = start_idx - start_bit_offset; - for(unsigned int i = start_word_index; i < count; i ++ ){ - POINTER_SIZE_INT cur_word = *(words + i); - - if(start_word_index == i){ - POINTER_SIZE_INT mask = ~(((POINTER_SIZE_INT)1 << start_bit_offset) - 1); - cur_word = cur_word & mask; - } - - if(cur_word != 0){ - bit_offset += word_get_first_set_lsb(cur_word); - return bit_offset; - } - - bit_offset += BITS_PER_WORD; - } - - return bit_offset; -} - -inline void words_set_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) -{ - assert(start_idx < 128); - - unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; - unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; - - if(word_index >= count) return; - - volatile POINTER_SIZE_INT* p_word = words + word_index; - POINTER_SIZE_INT old_value = *p_word; - POINTER_SIZE_INT mask = (POINTER_SIZE_INT)1 << bit_offset; - POINTER_SIZE_INT new_value = old_value|mask; - while (true) { - POINTER_SIZE_INT temp = atomic_casptrsz(p_word, new_value, old_value); - if (temp == old_value) break; - old_value = *p_word; - new_value = old_value|mask; - } - return; -} - -inline void words_clear_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) -{ - assert(start_idx < 128); - - unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; - unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; - - if(word_index >= count) return; - - volatile POINTER_SIZE_INT* p_word = words + word_index; - POINTER_SIZE_INT old_value = *p_word; - POINTER_SIZE_INT mask = ~((POINTER_SIZE_INT)1 << bit_offset); - POINTER_SIZE_INT new_value = old_value & mask; - while (true) { - POINTER_SIZE_INT temp = atomic_casptrsz(p_word, new_value, old_value); - if (temp == old_value) break; - old_value = *p_word; - new_value = old_value & mask; - } - return; -} -#endif +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Ji, Qi, 2006/10/25 + */ + +#ifndef _BIT_OPS_H_ +#define _BIT_OPS_H_ + +#include "../common/gc_common.h" + +inline unsigned int word_get_first_set_lsb(POINTER_SIZE_INT target_word) +{ + assert(target_word != 0); + POINTER_SIZE_INT bit_offset = 0; + +#if defined(_IPF_) || defined(_WIN64) + while( ! (target_word & ((POINTER_SIZE_INT)1 << bit_offset)) ){ + bit_offset++; + } +#else /* !_IPF_ && !_WIN64 */ +#ifdef PLATFORM_POSIX /* linux X86 32/64 */ + __asm__ __volatile__( + "bsf %1,%0\n" + :"=r"(bit_offset) + :"m"(target_word) + ); +#else /* !PLATFORM_POSIX */ +#ifdef WIN32 + __asm{ + bsf eax, target_word + mov bit_offset, eax + } +#endif /* WIN32 */ +#endif /* !PLATFORM_POSIX */ +#endif /* !_IPF_ && !_WIN64 */ + + assert(bit_offset < BITS_PER_WORD); + return (unsigned int)bit_offset; + +} + +inline unsigned int words_get_next_set_lsb(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) +{ + unsigned int bit_offset; + + assert(start_idx < 128); + + unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; + unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; + + bit_offset = start_idx - start_bit_offset; + for(unsigned int i = start_word_index; i < count; i ++ ){ + POINTER_SIZE_INT cur_word = *(words + i); + + if(start_word_index == i){ + POINTER_SIZE_INT mask = ~(((POINTER_SIZE_INT)1 << start_bit_offset) - 1); + cur_word = cur_word & mask; + } + + if(cur_word != 0){ + bit_offset += word_get_first_set_lsb(cur_word); + return bit_offset; + } + + bit_offset += BITS_PER_WORD; + } + + return bit_offset; +} + +inline void words_set_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) +{ + assert(start_idx < 128); + + unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; + unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; + + if(word_index >= count) return; + + volatile POINTER_SIZE_INT* p_word = words + word_index; + POINTER_SIZE_INT old_value = *p_word; + POINTER_SIZE_INT mask = (POINTER_SIZE_INT)1 << bit_offset; + POINTER_SIZE_INT new_value = old_value|mask; + while (true) { + POINTER_SIZE_INT temp = atomic_casptrsz(p_word, new_value, old_value); + if (temp == old_value) break; + old_value = *p_word; + new_value = old_value|mask; + } + return; +} + +inline void words_clear_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) +{ + assert(start_idx < 128); + + unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; + unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; + + if(word_index >= count) return; + + volatile POINTER_SIZE_INT* p_word = words + word_index; + POINTER_SIZE_INT old_value = *p_word; + POINTER_SIZE_INT mask = ~((POINTER_SIZE_INT)1 << bit_offset); + POINTER_SIZE_INT new_value = old_value & mask; + while (true) { + POINTER_SIZE_INT temp = atomic_casptrsz(p_word, new_value, old_value); + if (temp == old_value) break; + old_value = *p_word; + new_value = old_value & mask; + } + return; +} +#endif Index: src/utils/sync_pool.h =================================================================== --- src/utils/sync_pool.h (revision 566495) +++ src/utils/sync_pool.h (working copy) @@ -30,6 +30,7 @@ inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); } inline Boolean pool_is_empty(Pool* pool){ return sync_stack_is_empty(pool);} +inline void pool_empty(Pool* pool) { sync_stack_empty(pool); } inline unsigned int pool_size(Pool* pool){ return sync_stack_size(pool); } Index: src/utils/sync_stack.h =================================================================== --- src/utils/sync_stack.h (revision 566495) +++ src/utils/sync_stack.h (working copy) @@ -147,6 +147,12 @@ return (stack_top_get_entry(stack->top) == NULL); } +inline void sync_stack_empty(Sync_Stack* stack) +{ + stack->top = (Stack_Top)NULL; + stack->cur = NULL; +} + inline unsigned int sync_stack_size(Sync_Stack* stack) { unsigned int entry_count = 0; Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 566495) +++ src/verify/verifier_scanner.cpp (working copy) @@ -421,9 +421,3 @@ heap_verifier->live_obj_scanner = verifier_scan_live_objects; heap_verifier->all_obj_scanner = verifier_scan_all_objects; } - - - - - - Index: src/verify/verify_gc_effect.cpp =================================================================== --- src/verify/verify_gc_effect.cpp (revision 566495) +++ src/verify/verify_gc_effect.cpp (working copy) @@ -532,6 +532,3 @@ void verifier_reset_hash_distance() { hash_obj_distance = 0;} - - - Index: src/verify/verify_gc_effect.h =================================================================== --- src/verify/verify_gc_effect.h (revision 566495) +++ src/verify/verify_gc_effect.h (working copy) @@ -98,4 +98,3 @@ #endif -