Index: gc_gen/src/common/gc_for_vm.cpp =================================================================== --- gc_gen/src/common/gc_for_vm.cpp (revision 526184) +++ gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -239,8 +239,6 @@ * Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_reference); * *p_referent_field = (Partial_Reveal_Object *)((unsigned int)*p_referent_field | PHANTOM_REF_ENQUEUED_MASK | ~PHANTOM_REF_PENDING_MASK); * } - - * } */ Index: gc_gen/src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 526184) +++ gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -28,6 +28,7 @@ #include "../gen/gen.h" Boolean IGNORE_FINREF = FALSE; +Boolean DURING_RESURRECTION = FALSE; static inline Boolean obj_is_dead_in_gen_minor_gc(Partial_Reveal_Object *p_obj) @@ -224,6 +225,8 @@ if(finalizable_obj_pool_is_empty(gc)) return; + DURING_RESURRECTION = TRUE; + if(!gc_match_kind(gc, MINOR_COLLECTION)) finref_reset_repset(gc); pool_iterator_init(finalizable_obj_pool); @@ -260,6 +263,8 @@ finref_put_repset(gc); metadata->pending_finalizers = TRUE; + DURING_RESURRECTION = FALSE; + /* fianlizable objs have been added to finref repset pool or updated by tracing */ } Index: gc_gen/src/finalizer_weakref/finalizer_weakref.h =================================================================== --- gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 526184) +++ gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -53,15 +53,20 @@ return (REF*)(( Byte*)p_obj+get_gc_referent_offset()); } -typedef void (* Scan_Slot_Func)(Collector *collector, REF* p_ref); +extern Boolean DURING_RESURRECTION; +typedef void (* Scan_Slot_Func)(Collector *collector, REF *p_ref); inline void scan_weak_reference(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot) { WeakReferenceType type = special_reference_type(p_obj); if(type == NOT_REFERENCE) return; - REF* p_referent_field = obj_get_referent_field(p_obj); + REF *p_referent_field = obj_get_referent_field(p_obj); REF p_referent = *p_referent_field; if (!p_referent) return; + if(DURING_RESURRECTION){ + write_slot(p_referent_field, NULL); + return; + } switch(type){ case SOFT_REFERENCE : if(gc_match_kind(collector->gc, MINOR_COLLECTION)) Index: vmcore/include/finalize.h =================================================================== --- vmcore/include/finalize.h (revision 526180) +++ vmcore/include/finalize.h (working copy) @@ -54,5 +54,6 @@ void vm_ref_enqueue_func(void); // added for NATIVE REFERENCE ENQUEUE THREAD Boolean get_native_finalizer_thread_flag(); // added for NATIVE FINALIZER THREAD +void wait_native_finref_threads_detached(void); // added for NATIVE FINALIZER THREAD #endif Index: vmcore/src/init/finalizer_thread.cpp =================================================================== --- vmcore/src/init/finalizer_thread.cpp (revision 526180) +++ vmcore/src/init/finalizer_thread.cpp (working copy) @@ -30,6 +30,8 @@ static Fin_Thread_Info *fin_thread_info = NULL; unsigned int cpu_num_bits; +static volatile unsigned int finref_thread_num = 0; + static uint32 atomic_inc32(volatile apr_uint32_t *mem) { return (uint32)apr_atomic_inc32(mem); } @@ -69,6 +71,7 @@ fin_thread_info = (Fin_Thread_Info *)STD_MALLOC(sizeof(Fin_Thread_Info)); fin_thread_info->thread_num = port_CPUs_number(); + finref_thread_num = fin_thread_info->thread_num + 1; cpu_num_bits = coarse_log(fin_thread_info->thread_num); fin_thread_info->working_thread_num = 0; fin_thread_info->end_waiting_num = 0; @@ -121,6 +124,12 @@ activate_finalizer_threads(TRUE); } +void dec_finref_thread_num(void) +{ atomic_dec32(&finref_thread_num); } + +void wait_native_finref_threads_detached(void) +{ while(finref_thread_num); } + /* Restrict waiting time; Unit: msec */ static unsigned int restrict_wait_time(unsigned int wait_time, unsigned int max_time) { @@ -212,6 +221,7 @@ vm_heavy_finalizer_resume_mutator(); status = DetachCurrentThread(java_vm); + dec_finref_thread_num(); //status = jthread_detach(java_thread); return status; } Index: vmcore/src/init/ref_enqueue_thread.cpp =================================================================== --- vmcore/src/init/ref_enqueue_thread.cpp (revision 526180) +++ vmcore/src/init/ref_enqueue_thread.cpp (working copy) @@ -76,6 +76,7 @@ assert(stat == TM_ERROR_NONE); } +extern void dec_finref_thread_num(void); static IDATA ref_enqueue_thread_func(void **args) { JavaVM *java_vm = (JavaVM *)args[0]; @@ -108,6 +109,7 @@ } status = DetachCurrentThread(java_vm); + dec_finref_thread_num(); //status = jthread_detach(java_thread); return status; } Index: vmcore/src/init/vm_shutdown.cpp =================================================================== --- vmcore/src/init/vm_shutdown.cpp (revision 526180) +++ vmcore/src/init/vm_shutdown.cpp (working copy) @@ -36,6 +36,7 @@ #include "vm_stats.h" #include "thread_dump.h" #include "interpreter.h" +#include "finalize.h" #define LOG_DOMAIN "vm.core.shutdown" #include "cxxlog.h" @@ -183,6 +184,9 @@ // Execute pending shutdown hooks & finalizers status = exec_shutdown_sequence(jni_env); if (status != JNI_OK) return (jint)status; + + if(get_native_finalizer_thread_flag()) + wait_native_finref_threads_detached(); // Raise uncaught exception to current thread. // It will be properly processed in jthread_detach().