Index: gc_gen/src/common/compressed_ref.cpp =================================================================== --- gc_gen/src/common/compressed_ref.cpp (revision 525692) +++ gc_gen/src/common/compressed_ref.cpp (working copy) @@ -63,8 +63,7 @@ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter; iter = vector_block_iterator_advance(root_set, iter); assert(!vector_block_iterator_end(root_set, iter)); - REF ref = (REF)*iter; - Partial_Reveal_Object *p_obj = uncompress_ref(ref); + Partial_Reveal_Object *p_obj = read_slot((REF*)iter); *p_ref = p_obj; } } Index: gc_gen/src/common/gc_block.h =================================================================== --- gc_gen/src/common/gc_block.h (revision 525692) +++ gc_gen/src/common/gc_block.h (working copy) @@ -108,16 +108,14 @@ *((REF*)obj + 1) = 0; return; } - REF ref = compress_ref(raw_prefetched_next); + REF ref = obj_ptr_to_ref(raw_prefetched_next); *((REF*)obj + 1) = ref; } inline Partial_Reveal_Object* obj_get_prefetched_next_pointer(Partial_Reveal_Object* obj){ /*Fixme: em64t: This may be not necessary!*/ - assert(obj); - - REF ref = *( (REF*)obj + 1); - return uncompress_ref(ref); + assert(obj); + return read_slot( (REF*)obj + 1); } inline Partial_Reveal_Object *next_marked_obj_in_block(Partial_Reveal_Object *cur_obj, Partial_Reveal_Object *block_end) @@ -256,7 +254,7 @@ unsigned int index = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); Block_Header *curr_block = GC_BLOCK_HEADER(p_obj); Partial_Reveal_Object* new_addr = (Partial_Reveal_Object *)(((POINTER_SIZE_INT)p_obj) - curr_block->table[index]); - REF new_ref = compress_ref(new_addr); + REF new_ref = obj_ptr_to_ref(new_addr); return new_ref; } Index: gc_gen/src/common/gc_common.h =================================================================== --- gc_gen/src/common/gc_common.h (revision 525692) +++ gc_gen/src/common/gc_common.h (working copy) @@ -115,7 +115,6 @@ #define COMPRESS_REFERENCE #endif -#define COMPRESSED_NULL ((REF)0) extern POINTER_SIZE_INT HEAP_NULL; #ifdef POINTER64 @@ -131,49 +130,49 @@ ///////////////////////////////////////////// //Compress reference related!/////////////////// ///////////////////////////////////////////// -#ifdef COMPRESS_REFERENCE -FORCE_INLINE REF compress_ref(Partial_Reveal_Object *p_obj) +FORCE_INLINE REF obj_ptr_to_ref(Partial_Reveal_Object *p_obj) { +#ifdef COMPRESS_REFERENCE if(!p_obj){ - /*Fixme: em64t: vm performs a simple compress/uncompress machenism*/ - /* i.e. just add or minus HEAP_NULL to p_obj*/ - /*But in gc we distinguish zero from other p_obj*/ - /*Now only in prefetch next live object we can hit this point.*/ - return COMPRESSED_NULL; + /*Fixme: em64t: vm performs a simple compress/uncompress machenism + i.e. just add or minus HEAP_NULL to p_obj + But in gc we distinguish zero from other p_obj + Now only in prefetch next live object we can hit this point. */ + return (REF)0; } else - return (REF) ( (POINTER_SIZE_INT) p_obj - HEAP_NULL); + return (REF) ((POINTER_SIZE_INT) p_obj - HEAP_NULL); +#else + + return (REF)p_obj; + +#endif + } -FORCE_INLINE Partial_Reveal_Object *uncompress_ref(REF ref) +FORCE_INLINE Partial_Reveal_Object *ref_to_obj_ptr(REF ref) { +#ifdef COMPRESS_REFERENCE if(!ref){ return NULL; } return (Partial_Reveal_Object *)(HEAP_NULL + ref); -} -FORCE_INLINE Partial_Reveal_Object *read_slot(REF *p_slot) -{ return uncompress_ref(*p_slot); } +#else -FORCE_INLINE void write_slot(REF *p_slot, Partial_Reveal_Object *p_obj) -{ *p_slot = compress_ref(p_obj); } + return (Partial_Reveal_Object *)ref; -#else /* COMPRESS_REFERENCE */ +#endif -FORCE_INLINE REF compress_ref(Partial_Reveal_Object *p_obj) -{ return (REF)p_obj; } +} -FORCE_INLINE Partial_Reveal_Object *uncompress_ref(REF ref) -{ return (Partial_Reveal_Object *)ref; } - FORCE_INLINE Partial_Reveal_Object *read_slot(REF *p_slot) -{ return *p_slot; } +{ return ref_to_obj_ptr(*p_slot); } FORCE_INLINE void write_slot(REF *p_slot, Partial_Reveal_Object *p_obj) -{ *p_slot = p_obj; } -#endif +{ *p_slot = obj_ptr_to_ref(p_obj); } + inline POINTER_SIZE_INT round_up_to_size(POINTER_SIZE_INT size, int block_size) { return (size + block_size - 1) & ~(block_size - 1); } @@ -246,7 +245,7 @@ inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { assert(get_obj_info_raw(obj) & CONST_FORWARD_BIT); - return (Partial_Reveal_Object*)(uncompress_ref((REF)(get_obj_info_raw(obj) & ~CONST_FORWARD_BIT))); + return (Partial_Reveal_Object*)(ref_to_obj_ptr((REF)(get_obj_info_raw(obj) & ~CONST_FORWARD_BIT))); } inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) @@ -255,7 +254,7 @@ inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj,void *dest) { assert(!(get_obj_info_raw(obj) & CONST_FORWARD_BIT)); - REF dest = compress_ref((Partial_Reveal_Object *) dest); + REF dest = obj_ptr_to_ref((Partial_Reveal_Object *) dest); set_obj_info(obj,(Obj_Info_Type)dest | CONST_FORWARD_BIT); } @@ -292,7 +291,7 @@ inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { assert(get_obj_info_raw(obj) & FLIP_FORWARD_BIT); - return (Partial_Reveal_Object*) ( uncompress_ref( (REF)get_obj_info(obj) ) ); + return (Partial_Reveal_Object*) ( ref_to_obj_ptr( (REF)get_obj_info(obj) ) ); } inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) @@ -308,7 +307,7 @@ /* It's important to clear the FLIP_FORWARD_BIT before collection ends, since it is the same as next minor cycle's FLIP_MARK_BIT. And if next cycle is major, it is also confusing as FLIP_FORWARD_BIT. (The bits are flipped only in minor collection). */ - Obj_Info_Type dst = (Obj_Info_Type)compress_ref((Partial_Reveal_Object *) dest); + Obj_Info_Type dst = (Obj_Info_Type)obj_ptr_to_ref((Partial_Reveal_Object *) dest); set_obj_info(obj, dst | FLIP_FORWARD_BIT); } Index: gc_gen/src/common/gc_for_vm.cpp =================================================================== --- gc_gen/src/common/gc_for_vm.cpp (revision 525692) +++ gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -120,7 +120,7 @@ void gc_add_compressed_root_set_entry(REF* ref, Boolean is_pinned) { REF *p_ref = (REF *)ref; - if(*p_ref == COMPRESSED_NULL) return; + if(read_slot(p_ref) == NULL) return; Partial_Reveal_Object* p_obj = read_slot(p_ref); assert(!obj_is_marked_in_vt(p_obj)); assert( address_belongs_to_gc_heap(p_obj, p_global_gc)); @@ -213,7 +193,7 @@ Obj_Info_Type info = get_obj_info_raw(obj); int hash = info & GCGEN_HASH_MASK; if (!hash) { - hash = (((POINTER_SIZE_INT)obj) >> 3) & GCGEN_HASH_MASK; + hash = (int)((((POINTER_SIZE_INT)obj) >> 3) & GCGEN_HASH_MASK); if(!hash) hash = (23 & GCGEN_HASH_MASK); unsigned int new_info = (unsigned int)(info | hash); while (true) { @@ -259,3 +239,5 @@ mutator_need_block = FALSE; return old_flag; } + + Index: gc_gen/src/common/gc_metadata.h =================================================================== --- gc_gen/src/common/gc_metadata.h (revision 525692) +++ gc_gen/src/common/gc_metadata.h (working copy) @@ -171,7 +171,7 @@ assert(uncompressed_root_set); Partial_Reveal_Object *p_obj = *p_ref; - REF ref = compress_ref(p_obj); + REF ref = obj_ptr_to_ref(p_obj); /* construct an Uncompressed_Root */ vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_ref); Index: gc_gen/src/common/interior_pointer.cpp =================================================================== --- gc_gen/src/common/interior_pointer.cpp (revision 525692) +++ gc_gen/src/common/interior_pointer.cpp (working copy) @@ -91,4 +91,3 @@ } - Index: gc_gen/src/common/mark_scan_pool.cpp =================================================================== --- gc_gen/src/common/mark_scan_pool.cpp (revision 525692) +++ gc_gen/src/common/mark_scan_pool.cpp (working copy) @@ -25,10 +25,9 @@ static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { - REF ref = *p_ref; - if(ref == COMPRESSED_NULL) return; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if( p_obj == NULL) return; - Partial_Reveal_Object *p_obj = uncompress_ref(ref); if(obj_mark_in_vt(p_obj)) collector_tracestack_push(collector, p_obj); Index: gc_gen/src/common/space_tuner.cpp =================================================================== --- gc_gen/src/common/space_tuner.cpp (revision 525692) +++ gc_gen/src/common/space_tuner.cpp (working copy) @@ -163,7 +163,7 @@ else { POINTER_SIZE_INT los_fail_sz = lspace_get_failure_size((Lspace*)lspace); - + if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){ /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/ tuner->tuning_size = los_fail_sz; @@ -192,7 +192,7 @@ tuner->tuning_size = 0; } } - + /*Fixme: Should MOS heap_start must be 64k aligned?*/ tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; @@ -221,8 +221,8 @@ POINTER_SIZE_INT tuning_block_num = tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT; POINTER_SIZE_INT heap_block_num = gc->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; POINTER_SIZE_INT los_block_num = los->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; - POINTER_SIZE_INT live_block_num = los_block_num + non_los_live_block_num; + while(live_block_num + tuning_block_num > heap_block_num){ if(tuning_block_num == min_tuning_block_num){ //has not enough space to extend los tuner->tuning_size = 0; Index: gc_gen/src/common/space_tuner.h =================================================================== --- gc_gen/src/common/space_tuner.h (revision 525692) +++ gc_gen/src/common/space_tuner.h (working copy) @@ -24,7 +24,7 @@ #include "gc_common.h" #include "gc_space.h" -#define GC_LOS_MIN_VARY_SIZE ( 2 * MB ) +#define GC_LOS_MIN_VARY_SIZE ( 2 * MB ) #define GC_FIXED_SIZE_TUNER //For_LOS_extend Index: gc_gen/src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 525692) +++ gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -635,7 +635,7 @@ if(address_belongs_to_gc_heap((void *)p_ref, gc) && (space_of_addr(gc, p_ref))->move_object){ unsigned int offset = get_gc_referent_offset(); Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_ref - offset); - Partial_Reveal_Object *p_new_ref = uncompress_ref(obj_get_fw_in_table(p_old_ref)); + Partial_Reveal_Object *p_new_ref = ref_to_obj_ptr(obj_get_fw_in_table(p_old_ref)); p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset); } Partial_Reveal_Object* p_obj = read_slot(p_ref); Index: gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 525692) +++ gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -170,7 +170,13 @@ { Mutator *mutator = gc->mutator_list; while(mutator){ - assert(!mutator->obj_with_fin); + //assert(!mutator->obj_with_fin); + /* Workaround for the mutator num changing during GC bug */ + if(mutator->obj_with_fin){ + assert(vector_block_is_empty(mutator->obj_with_fin)); + mutator = mutator->next; + continue; + } mutator->obj_with_fin = finref_get_free_block(gc); mutator = mutator->next; } @@ -270,41 +276,41 @@ { GC *gc = mutator->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); + finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } /* This function is only used by resurrection fallback */ void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj) { Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); + finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *p_obj) { Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); + finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)compress_ref(ref)); + finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)compress_ref(ref)); + finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)compress_ref(ref)); + finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref)); } void finref_repset_add_entry(GC *gc, REF* p_ref) @@ -320,7 +326,7 @@ { assert(p_obj); Finref_Metadata *metadata = gc->finref_metadata; - finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); + finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(p_obj)); } static inline Boolean pool_has_no_ref(Pool *pool) Index: gc_gen/src/jni/java_natives.cpp =================================================================== --- gc_gen/src/jni/java_natives.cpp (revision 525690) +++ gc_gen/src/jni/java_natives.cpp (working copy) @@ -34,8 +34,9 @@ { java_helper_inlined = TRUE; - unsigned int obj = *(unsigned int*)c; - + POINTER_SIZE_INT obj = *(POINTER_SIZE_INT*)c; + /* a trick to get the GCHelper_class j.l.c in order to manipulate its + fields in GC native code */ Class_Handle *vm_class_ptr = (Class_Handle *)(obj + VM_Global_State::loader_env->vm_class_offset); GCHelper_clss = *vm_class_ptr; } Index: gc_gen/src/mark_compact/fallback_mark_scan.cpp =================================================================== --- gc_gen/src/mark_compact/fallback_mark_scan.cpp (revision 525690) +++ gc_gen/src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -25,8 +25,7 @@ static void scan_slot(Collector* collector, REF *p_ref) { - REF ref = *p_ref; - if(ref == COMPRESSED_NULL) return; + if( read_slot(p_ref) == NULL) return; collector_tracestack_push(collector, p_ref); return; Index: gc_gen/src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- gc_gen/src/mark_compact/los_extention_mark_scan.cpp (revision 525690) +++ gc_gen/src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -21,10 +21,9 @@ static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { - REF ref = *p_ref; - if(ref == COMPRESSED_NULL) return; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if(p_obj == NULL) return; - Partial_Reveal_Object *p_obj = uncompress_ref(ref); if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))) Index: gc_gen/src/mark_compact/mspace.cpp =================================================================== --- gc_gen/src/mark_compact/mspace.cpp (revision 525690) +++ gc_gen/src/mark_compact/mspace.cpp (working copy) @@ -173,4 +173,3 @@ return mspace->expected_threshold; } - Index: gc_gen/src/mark_compact/mspace_collect_compact.h =================================================================== --- gc_gen/src/mark_compact/mspace_collect_compact.h (revision 525690) +++ gc_gen/src/mark_compact/mspace_collect_compact.h (working copy) @@ -48,4 +48,3 @@ #endif /* _MSPACE_COLLECT_COMPACT_H_ */ - Index: gc_gen/src/mark_compact/mspace_slide_compact.cpp =================================================================== --- gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 525690) +++ gc_gen/src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -78,7 +78,7 @@ if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); - collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); + collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info); } obj_set_fw_in_oi(p_obj, dest_addr); @@ -244,7 +244,7 @@ } Partial_Reveal_Object *next_src_obj = GC_BLOCK_HEADER(first_src_obj)->next_src; - if(next_src_obj && GC_BLOCK_HEADER(uncompress_ref((REF)get_obj_info_raw(next_src_obj))) != next_dest_block){ + if(next_src_obj && GC_BLOCK_HEADER(ref_to_obj_ptr((REF)get_obj_info_raw(next_src_obj))) != next_dest_block){ next_src_obj = NULL; } next_dest_block->src = next_src_obj; Index: gc_gen/src/thread/collector.cpp =================================================================== --- gc_gen/src/thread/collector.cpp (revision 525690) +++ gc_gen/src/thread/collector.cpp (working copy) @@ -231,7 +231,7 @@ memset(collector, 0, size); /* FIXME:: thread_handle is for temporary control */ - collector->thread_handle = (VmThreadHandle)i; + collector->thread_handle = (VmThreadHandle)(POINTER_SIZE_INT)i; collector->gc = gc; collector_init_thread(collector); Index: gc_gen/src/thread/collector_alloc.h =================================================================== --- gc_gen/src/thread/collector_alloc.h (revision 525690) +++ gc_gen/src/thread/collector_alloc.h (working copy) @@ -50,7 +50,7 @@ /* else, take the obj by setting the forwarding flag atomically we don't put a simple bit in vt because we need compute obj size later. */ - REF target = compress_ref(p_targ_obj); + REF target = obj_ptr_to_ref(p_targ_obj); if (oi != (Obj_Info_Type)atomic_cas32(get_obj_info_addr(p_obj), ( ( (unsigned int)target |FORWARD_BIT)), oi)) { /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched block. The remaining part of the switched block cannot be revivied for next allocation of Index: gc_gen/src/trace_forward/fspace_alloc.cpp =================================================================== --- gc_gen/src/trace_forward/fspace_alloc.cpp (revision 525690) +++ gc_gen/src/trace_forward/fspace_alloc.cpp (working copy) @@ -99,4 +99,3 @@ } - Index: gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (revision 525690) +++ gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -33,12 +33,11 @@ static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) { - REF ref = *p_ref; - if(ref == COMPRESSED_NULL) return; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if( p_obj == NULL) return; /* the slot can be in tspace or fspace, we don't care. we care only if the reference in the slot is pointing to fspace */ - Partial_Reveal_Object *p_obj = uncompress_ref(ref); if (obj_belongs_to_nos(p_obj)) collector_tracestack_push(collector, p_ref); Index: gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (revision 525690) +++ gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -29,8 +29,7 @@ static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) { - REF ref = *p_ref; - if(ref == COMPRESSED_NULL) return; + if(read_slot(p_ref) == NULL) return; collector_tracestack_push(collector, p_ref); return; Index: gc_gen/src/verify/verifier_common.cpp =================================================================== --- gc_gen/src/verify/verifier_common.cpp (revision 525690) +++ gc_gen/src/verify/verifier_common.cpp (working copy) @@ -50,7 +50,7 @@ while(!vector_block_iterator_end(root_set,iter)){ REF* p_ref = (REF* )*iter; iter = vector_block_iterator_advance(root_set,iter); - if(*p_ref == COMPRESSED_NULL) continue; + if( read_slot(p_ref) == NULL) continue; verifier_rootset_push(p_ref,gc_verifier->root_set); } root_set = pool_iterator_next(gc_metadata->gc_rootset_pool); Index: gc_gen/src/verify/verifier_scanner.cpp =================================================================== --- gc_gen/src/verify/verifier_scanner.cpp (revision 525690) +++ gc_gen/src/verify/verifier_scanner.cpp (working copy) @@ -233,9 +233,9 @@ for (unsigned int i = 0; i < array_length; i++) { if(!is_unreachable_obj(p_obj)){ verify_write_barrier(p_ref+i, heap_verifier); - if(*(p_ref+i) != COMPRESSED_NULL) verify_live_object_slot(p_ref+i, heap_verifier); + if( read_slot(p_ref+i) != NULL) verify_live_object_slot(p_ref+i, heap_verifier); }else{ - if(*(p_ref+i) != COMPRESSED_NULL) verify_all_object_slot(p_ref+i, heap_verifier); + if( read_slot(p_ref+i) != NULL) verify_all_object_slot(p_ref+i, heap_verifier); } } }else{ @@ -246,9 +246,9 @@ p_ref = object_ref_iterator_get(ref_iterator+i, p_obj); if(!is_unreachable_obj(p_obj)){ verify_write_barrier(p_ref, heap_verifier); - if(*p_ref != COMPRESSED_NULL) verify_live_object_slot(p_ref, heap_verifier); + if( read_slot(p_ref) != NULL) verify_live_object_slot(p_ref, heap_verifier); }else{ - if(*p_ref != COMPRESSED_NULL) verify_all_object_slot(p_ref, heap_verifier); + if( read_slot(p_ref) != NULL) verify_all_object_slot(p_ref, heap_verifier); } } @@ -261,9 +261,9 @@ p_ref = obj_get_referent_field(p_obj); if(!is_unreachable_obj(p_obj)){ verify_write_barrier(p_ref, heap_verifier); - if(*p_ref != COMPRESSED_NULL) verify_live_object_slot(p_ref, heap_verifier); + if( read_slot(p_ref) != NULL) verify_live_object_slot(p_ref, heap_verifier); }else{ - if(*p_ref != COMPRESSED_NULL) verify_all_object_slot(p_ref, heap_verifier); + if( read_slot(p_ref) != NULL) verify_all_object_slot(p_ref, heap_verifier); } } #endif Index: gc_gen/src/verify/verify_mutator_effect.cpp =================================================================== --- gc_gen/src/verify/verify_mutator_effect.cpp (revision 525690) +++ gc_gen/src/verify/verify_mutator_effect.cpp (working copy) @@ -318,7 +318,7 @@ while(!vector_block_iterator_end(rem_set, iter)){ REF* p_ref = (REF* )*iter; wb_verifier->num_slots_in_remset ++; - if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space((void*)*p_ref, nspace)){ + if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space(read_slot(p_ref), nspace)){ if(!wb_is_marked_in_slot(p_ref)){ wb_mark_in_slot(p_ref); wb_verifier->num_ref_wb_in_remset ++; @@ -341,7 +341,7 @@ WriteBarrier_Verifier* wb_verifier = heap_verifier->writebarrier_verifier; assert(wb_verifier); - if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space((void*)*p_ref, nspace)){ + if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space(read_slot(p_ref), nspace)){ if(!wb_is_marked_in_slot(p_ref)){ assert(0); printf("GC Verify ==> Verify Write Barrier: error!!!\n"); Index: vmcore/include/finalizer_thread.h =================================================================== --- vmcore/include/finalizer_thread.h (revision 525690) +++ vmcore/include/finalizer_thread.h (working copy) @@ -52,6 +52,7 @@ hythread_t *thread_ids; unsigned int thread_num; + volatile unsigned int thread_attached_num; volatile Boolean shutdown; volatile Boolean on_exit; Index: vmcore/include/ref_enqueue_thread.h =================================================================== --- vmcore/include/ref_enqueue_thread.h (revision 525690) +++ vmcore/include/ref_enqueue_thread.h (working copy) @@ -34,6 +34,7 @@ typedef struct Ref_Enqueue_Thread_Info { hysem_t pending_sem; Boolean shutdown; + volatile unsigned int thread_attached; }Ref_Enqueue_Thread_Info; Index: vmcore/src/init/finalizer_thread.cpp =================================================================== --- vmcore/src/init/finalizer_thread.cpp (revision 525690) +++ vmcore/src/init/finalizer_thread.cpp (working copy) @@ -24,6 +24,7 @@ #include "vm_threads.h" #include "init.h" #include "open/jthread.h" +#include "jni_direct.h" static Boolean native_fin_thread_flag = FALSE; static Fin_Thread_Info *fin_thread_info = NULL; @@ -88,6 +89,7 @@ assert(status == TM_ERROR_NONE); fin_thread_info->thread_ids = (hythread_t *)STD_MALLOC(sizeof(hythread_t) * fin_thread_info->thread_num); + fin_thread_info->thread_attached_num = 0; for(unsigned int i = 0; i < fin_thread_info->thread_num; i++){ void **args = (void **)STD_MALLOC(sizeof(void *) * 2); @@ -97,6 +99,8 @@ status = hythread_create(&fin_thread_info->thread_ids[i], 0, FINALIZER_THREAD_PRIORITY, 0, (hythread_entrypoint_t)finalizer_thread_func, args); assert(status == TM_ERROR_NONE); } + + while(fin_thread_info->thread_attached_num < fin_thread_info->thread_num); } void finalizer_shutdown(Boolean start_finalization_on_exit) @@ -168,15 +172,21 @@ { JavaVM *java_vm = (JavaVM *)args[0]; JNIEnv *jni_env; - jthread java_thread; + //jthread java_thread; char *name = "finalizer"; - jboolean daemon = JNI_TRUE; + //jboolean daemon = JNI_TRUE; - IDATA status = vm_attach_internal(&jni_env, &java_thread, java_vm, NULL, name, daemon); + //IDATA status = vm_attach_internal(&jni_env, &java_thread, java_vm, NULL, name, daemon); + //assert(status == JNI_OK); + //status = jthread_attach(jni_env, java_thread, daemon); + //assert(status == TM_ERROR_NONE); + JavaVMAttachArgs *jni_args = (JavaVMAttachArgs*)STD_MALLOC(sizeof(JavaVMAttachArgs)); + jni_args->version = JNI_VERSION_1_2; + jni_args->name = name; + jni_args->group = NULL; + IDATA status = AttachCurrentThreadAsDaemon(java_vm, (void**)&jni_env, jni_args); assert(status == JNI_OK); - status = jthread_attach(jni_env, java_thread, daemon); - assert(status == TM_ERROR_NONE); - + atomic_inc32(&fin_thread_info->thread_attached_num); /* Choice: use VM_thread or hythread to indicate the finalizer thread ? * Now we use hythread * p_TLS_vmthread->finalize_thread_flags = thread_id; @@ -201,7 +211,8 @@ } vm_heavy_finalizer_resume_mutator(); - status = jthread_detach(java_thread); + status = DetachCurrentThread(java_vm); + //status = jthread_detach(java_thread); return status; } Index: vmcore/src/init/ref_enqueue_thread.cpp =================================================================== --- vmcore/src/init/ref_enqueue_thread.cpp (revision 525690) +++ vmcore/src/init/ref_enqueue_thread.cpp (working copy) @@ -44,7 +44,8 @@ return; ref_thread_info = (Ref_Enqueue_Thread_Info *)STD_MALLOC(sizeof(Ref_Enqueue_Thread_Info)); - ref_thread_info->shutdown = FALSE; + ref_thread_info->shutdown = false; + ref_thread_info->thread_attached = 0; IDATA status = hysem_create(&ref_thread_info->pending_sem, 0, REF_ENQUEUE_THREAD_NUM); assert(status == TM_ERROR_NONE); @@ -53,6 +54,8 @@ args[0] = (void *)java_vm; status = hythread_create(NULL, 0, REF_ENQUEUE_THREAD_PRIORITY, 0, (hythread_entrypoint_t)ref_enqueue_thread_func, args); assert(status == TM_ERROR_NONE); + + while(ref_thread_info->thread_attached == 0); } void ref_enqueue_shutdown(void) @@ -77,14 +80,21 @@ { JavaVM *java_vm = (JavaVM *)args[0]; JNIEnv *jni_env; - jthread java_thread; + //jthread java_thread; char *name = "ref handler"; - jboolean daemon = JNI_TRUE; + //jboolean daemon = JNI_TRUE; - IDATA status = vm_attach_internal(&jni_env, &java_thread, java_vm, NULL, name, daemon); + //IDATA status = vm_attach_internal(&jni_env, &java_thread, java_vm, NULL, name, daemon); + //assert(status == JNI_OK); + //status = jthread_attach(jni_env, java_thread, daemon); + //assert(status == TM_ERROR_NONE); + JavaVMAttachArgs *jni_args = (JavaVMAttachArgs*)STD_MALLOC(sizeof(JavaVMAttachArgs)); + jni_args->version = JNI_VERSION_1_2; + jni_args->name = name; + jni_args->group = NULL; + IDATA status = AttachCurrentThreadAsDaemon(java_vm, (void**)&jni_env, jni_args); assert(status == JNI_OK); - status = jthread_attach(jni_env, java_thread, daemon); - assert(status == TM_ERROR_NONE); + ref_thread_info->thread_attached = 1; while(true){ /* Waiting for pending weak references */ @@ -97,6 +107,7 @@ break; } - status = jthread_detach(java_thread); + status = DetachCurrentThread(java_vm); + //status = jthread_detach(java_thread); return status; }