Index: javasrc_uncomp/org/apache/harmony/drlvm/gc_gen/GCHelper.java =================================================================== --- javasrc_uncomp/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 611111) +++ javasrc_uncomp/org/apache/harmony/drlvm/gc_gen/GCHelper.java (working copy) @@ -136,7 +136,10 @@ p_objSlot.store(p_target); return; } - + Address p_obj_info = p_objBase.plus(4); + int obj_info = p_obj_info.loadInt(); + if((obj_info & 0x80) != 0) return; + VMHelper.writeBarrier(p_objBase, p_objSlot, p_target); } Index: src/common/collection_scheduler.cpp =================================================================== --- src/common/collection_scheduler.cpp (revision 611186) +++ src/common/collection_scheduler.cpp (working copy) @@ -252,4 +252,3 @@ } - Index: src/common/collection_scheduler.h =================================================================== --- src/common/collection_scheduler.h (revision 611186) +++ src/common/collection_scheduler.h (working copy) @@ -52,4 +52,3 @@ #endif - Index: src/common/gc_block.h =================================================================== --- src/common/gc_block.h (revision 611186) +++ src/common/gc_block.h (working copy) @@ -281,4 +281,3 @@ - Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 611186) +++ src/common/gc_common.cpp (working copy) @@ -397,13 +397,6 @@ void gc_prepare_rootset(GC* gc) { - if(!USE_CONCURRENT_GC){ - gc_metadata_verify(gc, TRUE); -#ifndef BUILD_IN_REFERENT - gc_finref_metadata_verify((GC*)gc, TRUE); -#endif - } - /* Stop the threads and collect the roots. */ lock(gc->enumerate_rootset_lock); INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n"); @@ -432,23 +425,17 @@ #ifdef MARK_BIT_FLIPPING if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); #endif - + if(!USE_CONCURRENT_GC){ gc_metadata_verify(gc, TRUE); #ifndef BUILD_IN_REFERENT gc_finref_metadata_verify((GC*)gc, TRUE); #endif } - /* Stop the threads and collect the roots. */ - lock(gc->enumerate_rootset_lock); - INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n"); - gc_clear_rootset(gc); - gc_reset_rootset(gc); + int disable_count = hythread_reset_suspend_disable(); - vm_enumerate_root_set_all_threads(); - gc_copy_interior_pointer_table_to_rootset(); - gc_set_rootset(gc); - unlock(gc->enumerate_rootset_lock); + /* stop the world and get rootset here */ + gc_prepare_rootset(gc); if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){ if(gc_is_concurrent_sweep_phase()) @@ -537,4 +524,3 @@ } - Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 611186) +++ src/common/gc_common.h (working copy) @@ -411,6 +411,26 @@ } /***************************************************************/ + +inline Boolean obj_is_remembered(Partial_Reveal_Object* p_obj) +{ + return get_obj_info_raw(p_obj) & OBJ_REM_BIT; +} + +inline void obj_set_rem_bit(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type oi = get_obj_info_raw(p_obj); + return set_obj_info( p_obj, oi |OBJ_REM_BIT) ; +} + +inline void obj_clear_rem_bit(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type oi = get_obj_info_raw(p_obj); + return set_obj_info( p_obj, oi & ~OBJ_REM_BIT) ; +} + +/***************************************************************/ + /* all GCs inherit this GC structure */ struct Marker; struct Mutator; Index: src/common/gc_concurrent.cpp =================================================================== --- src/common/gc_concurrent.cpp (revision 611186) +++ src/common/gc_concurrent.cpp (working copy) @@ -341,4 +341,3 @@ } } } - Index: src/common/gc_for_barrier.cpp =================================================================== --- src/common/gc_for_barrier.cpp (revision 611186) +++ src/common/gc_for_barrier.cpp (working copy) @@ -31,8 +31,74 @@ volatile unsigned int write_barrier_function; +void allocator_object_write_barrier(Partial_Reveal_Object* p_object, Collector* allocator) +{ + if( addr_belongs_to_nos(p_object)) return; + + REF* p_slot; + /* scan array object */ + if (object_is_array((Partial_Reveal_Object*)p_object)) { + Partial_Reveal_Object* array = p_object; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + p_slot = (REF *)vector_get_element_address_ref((Vector_Handle)array, i); + if( read_slot(p_slot) != NULL && addr_belongs_to_nos(read_slot(p_slot))){ + collector_remset_add_entry(allocator, p_slot); + } + } + return; + } + + /* scan non-array object */ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object; + unsigned int num_refs = object_ref_field_num(p_obj); + int *ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; i= nos_boundary && p_obj_holding_ref < nos_boundary) + mutator_rem_obj(p_obj_holding_ref); + + return; +} + /* The implementations are only temporary */ -static void write_barrier_rem_source_slot(Managed_Object_Handle *p_slot, +static void gen_write_barrier_rem_slot(Managed_Object_Handle *p_slot, Managed_Object_Handle p_target) { if(p_target >= nos_boundary && p_slot < nos_boundary){ @@ -178,11 +244,18 @@ } mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER); - if( !gc_is_gen_mode() ) return; - if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){ - /* for array copy and object clone */ - gc_object_write_barrier(p_obj_written); - } + if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written)) + return; + + /* for array copy and object clone */ +#ifdef USE_REM_SLOTS + gc_object_write_barrier(p_obj_written); +#else + if( p_obj_written >= nos_boundary ) return; + + mutator_rem_obj( p_obj_written ); +#endif + return; } /* FIXME:: this is not the right interface for write barrier */ @@ -197,7 +270,11 @@ break; case WRITE_BARRIER_REM_SOURCE_REF: *p_slot = p_target; - write_barrier_rem_source_slot(p_slot, p_target); +#ifdef USE_REM_SLOTS + gen_write_barrier_rem_slot(p_slot, p_target); +#else /* USE_REM_OBJS */ + gen_write_barrier_rem_obj(p_obj_holding_ref, p_target); +#endif break; case WRITE_BARRIER_REM_SOURCE_OBJ: *p_slot = p_target; Index: src/common/gc_for_barrier.h =================================================================== --- src/common/gc_for_barrier.h (revision 611186) +++ src/common/gc_for_barrier.h (working copy) @@ -39,5 +39,6 @@ { write_barrier_function = wb_function; } + #endif /* _GC_FOR_BARRIER_H_ */ Index: src/common/gc_for_class.h =================================================================== --- src/common/gc_for_class.h (revision 611186) +++ src/common/gc_for_class.h (working copy) @@ -67,8 +67,12 @@ #define OBJ_DIRTY_BIT 0x20 +/* used by semispace GC to indicate the object is a survivor in NOS */ #define OBJ_AGE_BIT 0x40 +/* used by generational GC to indicate the object has been remembered */ +#define OBJ_REM_BIT 0x80 + #ifdef POINTER64 // Like in VM #define COMPRESS_VTABLE #endif @@ -320,4 +324,3 @@ - Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 611186) +++ src/common/gc_for_vm.cpp (working copy) @@ -64,6 +64,7 @@ #endif vm_helper_register_magic_helper(VM_RT_NEW_RESOLVED_USING_VTABLE_AND_SIZE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "alloc"); vm_helper_register_magic_helper(VM_RT_NEW_VECTOR_USING_VTABLE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "allocArray"); + vm_helper_register_magic_helper(VM_RT_GC_HEAP_WRITE_REF, "org/apache/harmony/drlvm/gc_gen/GCHelper", "write_barrier_slot_rem"); } int gc_init() @@ -445,4 +446,3 @@ return address_belongs_to_gc_heap(p_obj, p_global_gc); } - Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 611186) +++ src/common/gc_metadata.cpp (working copy) @@ -305,14 +305,34 @@ assert(gc_match_either_kind(gc, MINOR_COLLECTION|NORMAL_MAJOR_COLLECTION)); if( gc_match_kind(gc, NORMAL_MAJOR_COLLECTION )){ /* all the remsets are useless now */ - /* clean and put back mutator remsets */ + /* clean and put back mutator remsets */ +#ifdef USE_REM_SLOTS root_set = pool_get_entry( mutator_remset_pool ); while(root_set){ vector_block_clear(root_set); pool_put_entry(free_set_pool, root_set); root_set = pool_get_entry( mutator_remset_pool ); } - +#else + Vector_Block* rem_set = pool_get_entry(mutator_remset_pool); + + while(rem_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(rem_set); + while(!vector_block_iterator_end(rem_set,iter)){ + Partial_Reveal_Object* p_obj_holding_ref = (Partial_Reveal_Object*)*iter; + iter = vector_block_iterator_advance(rem_set,iter); + + assert( !obj_belongs_to_nos(p_obj_holding_ref)); + assert( obj_is_remembered(p_obj_holding_ref)); + obj_clear_rem_bit(p_obj_holding_ref); + } + vector_block_clear(rem_set); + pool_put_entry(free_set_pool, rem_set); + rem_set = pool_get_entry(metadata->mutator_remset_pool); + } + +#endif /* ifdef USE_REM_SLOTS else */ + /* clean and put back collector remsets */ root_set = pool_get_entry( collector_remset_pool ); while(root_set){ @@ -322,12 +342,45 @@ } }else{ /* generational MINOR_COLLECTION */ + /* all the remsets are put into the shared pool */ +#ifdef USE_REM_SLOTS root_set = pool_get_entry( mutator_remset_pool ); while(root_set){ pool_put_entry(gc_rootset_pool, root_set); root_set = pool_get_entry( mutator_remset_pool ); } +#else /* USE_REM_OBJS */ + /* scan mutator remembered objects, and put the p_refs to collector_remset_pool if they + hold references to NOS. The pool will be moved to rootset_pool next. */ + + void allocator_object_write_barrier(Partial_Reveal_Object* p_object, Collector* allocator); + /* temporarily use collector[0]'s rem_set for the moving. Hope to be parallelized in future. */ + Collector* collector = gc->collectors[0]; + collector->rem_set = free_set_pool_get_entry(metadata); + + Vector_Block* rem_set = pool_get_entry(mutator_remset_pool); + + while(rem_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(rem_set); + while(!vector_block_iterator_end(rem_set,iter)){ + Partial_Reveal_Object* p_obj_holding_ref = (Partial_Reveal_Object*)*iter; + iter = vector_block_iterator_advance(rem_set,iter); + + assert( !obj_belongs_to_nos(p_obj_holding_ref)); + assert( obj_is_remembered(p_obj_holding_ref)); + obj_clear_rem_bit(p_obj_holding_ref); + allocator_object_write_barrier(p_obj_holding_ref, collector); + } + vector_block_clear(rem_set); + pool_put_entry(free_set_pool, rem_set); + rem_set = pool_get_entry(metadata->mutator_remset_pool); + } + + pool_put_entry(collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + +#endif /* ifdef USE_REM_SLOTS else */ /* put back collector remsets */ root_set = pool_get_entry( collector_remset_pool ); @@ -379,9 +432,12 @@ void gc_clear_remset(GC* gc) { - assert(gc->root_set != NULL); + /* this function clears all the remset before fallback */ + assert(gc_match_kind(gc, FALLBACK_COLLECTION)); + /* rootset pool has some entries that are actually remset, because all the remsets are put into rootset pool before the collection. gc->root_set is a pointer pointing to the boundary between remset and rootset in the pool */ + assert(gc->root_set != NULL); Pool* pool = gc_metadata.gc_rootset_pool; Vector_Block* rem_set = pool_get_entry(pool); while(rem_set != gc->root_set){ @@ -396,16 +452,16 @@ /* put back last remset block of each collector (saved in the minor collection before fallback) */ unsigned int num_active_collectors = gc->num_active_collectors; + pool = gc_metadata.collector_remset_pool; for(unsigned int i=0; icollectors[i]; assert(collector->rem_set != NULL); - pool_put_entry(gc_metadata.collector_remset_pool, collector->rem_set); + pool_put_entry(pool, collector->rem_set); collector->rem_set = NULL; } /* cleanup remset pool */ - pool = gc_metadata.collector_remset_pool; rem_set = pool_get_entry(pool); while(rem_set){ vector_block_clear(rem_set); @@ -416,6 +472,8 @@ return; } +//#include +/* FIXME:: should better move to verifier dir */ extern Boolean verify_live_heap; void gc_metadata_verify(GC* gc, Boolean is_before_gc) { @@ -424,16 +482,84 @@ assert(pool_is_empty(metadata->collector_repset_pool)); assert(pool_is_empty(metadata->mark_task_pool)); - if(!is_before_gc || !gc_is_gen_mode()) + if(!is_before_gc || !gc_is_gen_mode()){ assert(pool_is_empty(metadata->mutator_remset_pool)); - + }else if(gc_is_gen_mode() && verify_live_heap ){ + unsigned int remset_size = pool_size(metadata->mutator_remset_pool); + printf("Size of mutator remset pool %s: %d\n", is_before_gc?"before GC":"after GC", remset_size); +/* + using namespace stdext; + hash_set pref_hash; + unsigned int num_rem_slots = 0; + unsigned int num_ref_to_nos = 0; + + pool_iterator_init(metadata->mutator_remset_pool); + Vector_Block* rem_set = pool_iterator_next(metadata->mutator_remset_pool); + while(rem_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(rem_set); + while(!vector_block_iterator_end(rem_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter; + iter = vector_block_iterator_advance(rem_set,iter); + + pref_hash.insert(p_ref); + num_rem_slots ++; +#ifdef USE_REM_SLOTS + Partial_Reveal_Object *p_obj = *p_ref; + if( p_obj && addr_belongs_to_nos(p_obj)) + num_ref_to_nos++; +#endif + if(addr_belongs_to_nos(p_ref)){ + printf("wrong remset value!!!\n"); + } + } + rem_set = pool_iterator_next(metadata->mutator_remset_pool); + } + printf("pref hashset size is %d\n", pref_hash.size()); + printf("Num of rem slots: %d, refs to NOS: %d\n", num_rem_slots, num_ref_to_nos); +*/ + } + if(!gc_is_gen_mode()){ - /* FIXME:: even for gen gc, it should be empty if NOS is forwarding_all */ assert(pool_is_empty(metadata->collector_remset_pool)); - } + }else if( verify_live_heap){ + unsigned int remset_size = pool_size(metadata->collector_remset_pool); + printf("Size of collector remset pool %s: %d\n", is_before_gc?"before GC":"after GC", remset_size); +/* + if(!is_before_gc){ + + using namespace stdext; + hash_set pref_hash; + + unsigned int num_rem_slots = 0; + pool_iterator_init(metadata->collector_remset_pool); + Vector_Block* rem_set = pool_iterator_next(metadata->collector_remset_pool); + while(rem_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(rem_set); + while(!vector_block_iterator_end(rem_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter; + iter = vector_block_iterator_advance(rem_set,iter); + pref_hash.insert(p_ref); + num_rem_slots ++; + Partial_Reveal_Object *p_obj = *p_ref; + assert( obj_is_survivor(p_obj)); + assert( addr_belongs_to_nos(p_obj) && !addr_belongs_to_nos(p_ref)); + if( !obj_is_survivor(p_obj) || !addr_belongs_to_nos(p_obj) || addr_belongs_to_nos(p_ref)){ + printf("wrong remset value!!!\n"); + } + } + rem_set = pool_iterator_next(metadata->collector_remset_pool); + } + printf("pref hashset size is %d\n", pref_hash.size()); + printf("Num of rem slots: %d\n", num_rem_slots); + + } +*/ + }/* if verify_live_heap */ + if(verify_live_heap ){ unsigned int free_pool_size = pool_size(metadata->free_set_pool); + printf("Size of free pool %s: %d\n\n\n", is_before_gc?"before GC":"after GC", free_pool_size); } return; Index: src/common/interior_pointer.cpp =================================================================== --- src/common/interior_pointer.cpp (revision 611186) +++ src/common/interior_pointer.cpp (working copy) @@ -93,4 +93,3 @@ - Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 611186) +++ src/common/space_tuner.cpp (working copy) @@ -537,4 +537,3 @@ return; } - Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 611186) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -813,4 +813,3 @@ - Index: src/finalizer_weakref/finalizer_weakref.h =================================================================== --- src/finalizer_weakref/finalizer_weakref.h (revision 611186) +++ src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -22,7 +22,7 @@ #ifndef _FINREF_H_ #define _FINREF_H_ -//#define BUILD_IN_REFERENT +#define BUILD_IN_REFERENT #include "finalizer_weakref_metadata.h" #include "../thread/collector.h" Index: src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 611186) +++ src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -441,4 +441,3 @@ return TRUE; } - Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 611186) +++ src/gen/gen.cpp (working copy) @@ -775,10 +775,7 @@ if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ..."); - - if(gc_is_gen_mode()) - gc_clear_remset((GC*)gc); - + /* runout mos in minor collection */ if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ assert(((Blocked_Space*)mos)->free_block_idx == ((Blocked_Space*)mos)->ceiling_block_idx + 1); @@ -795,6 +792,9 @@ gc_gen_collector_stats_reset((GC_Gen*)gc); #endif + if(gc_is_gen_mode()) + gc_clear_remset((GC*)gc); + if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP)) event_gc_collect_kind_changed((GC*)gc); @@ -1058,4 +1058,3 @@ STD_FREE(collector->backup_allocator); } } - Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 611186) +++ src/gen/gen.h (working copy) @@ -243,4 +243,3 @@ - Index: src/gen/gen_stats.cpp =================================================================== --- src/gen/gen_stats.cpp (revision 611186) +++ src/gen/gen_stats.cpp (working copy) @@ -29,7 +29,7 @@ GC_Gen_Stats* stats = (GC_Gen_Stats*)STD_MALLOC(sizeof(GC_Gen_Stats)); memset(stats, 0, sizeof(GC_Gen_Stats)); - stats->is_los_collected = false; + stats->is_los_collected = FALSE; gc->stats = stats; } Index: src/los/lspace_alloc_collect.cpp =================================================================== --- src/los/lspace_alloc_collect.cpp (revision 611186) +++ src/los/lspace_alloc_collect.cpp (working copy) @@ -481,4 +481,3 @@ } - Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 611186) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -258,4 +258,3 @@ - Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 611186) +++ src/mark_compact/mspace.cpp (working copy) @@ -172,4 +172,3 @@ return mspace->expected_threshold_ratio; } - Index: src/mark_compact/mspace_alloc.cpp =================================================================== --- src/mark_compact/mspace_alloc.cpp (revision 611186) +++ src/mark_compact/mspace_alloc.cpp (working copy) @@ -73,4 +73,3 @@ } - Index: src/semi_space/sspace_gen_ss_pool.cpp =================================================================== --- src/semi_space/sspace_gen_ss_pool.cpp (revision 611569) +++ src/semi_space/sspace_gen_ss_pool.cpp (working copy) @@ -143,9 +143,11 @@ }else{ /* otherwise, we successfully forwarded */ #ifdef GC_GEN_STATS + if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nos_obj_stats_minor(stats); gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); + } #endif scan_object(collector, p_target_obj); Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 611186) +++ src/thread/collector.cpp (working copy) @@ -350,4 +350,3 @@ } - Index: src/thread/marker.cpp =================================================================== --- src/thread/marker.cpp (revision 611186) +++ src/thread/marker.cpp (working copy) @@ -277,4 +277,3 @@ return; } - Index: src/thread/marker.h =================================================================== --- src/thread/marker.h (revision 611186) +++ src/thread/marker.h (working copy) @@ -100,4 +100,3 @@ #endif //_MARKER_H_ - Index: src/thread/mutator.cpp =================================================================== --- src/thread/mutator.cpp (revision 611186) +++ src/thread/mutator.cpp (working copy) @@ -173,4 +173,3 @@ } - Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 611186) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -98,4 +98,3 @@ } - Index: src/verify/verifier_common.cpp =================================================================== --- src/verify/verifier_common.cpp (revision 611186) +++ src/verify/verifier_common.cpp (working copy) @@ -270,4 +270,3 @@ } - Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 611186) +++ src/verify/verifier_scanner.cpp (working copy) @@ -443,4 +443,3 @@ } - Index: src/verify/verify_gc_effect.cpp =================================================================== --- src/verify/verify_gc_effect.cpp (revision 611186) +++ src/verify/verify_gc_effect.cpp (working copy) @@ -553,4 +553,3 @@ - Index: src/verify/verify_live_heap.cpp =================================================================== --- src/verify/verify_live_heap.cpp (revision 611186) +++ src/verify/verify_live_heap.cpp (working copy) @@ -152,4 +152,3 @@ Heap_Verifier* get_heap_verifier() { return heap_verifier; } -