Index: vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp (revision 532501) +++ vm/gc_gen/src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -19,6 +19,10 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" +//wjw stolen from thread_native_thin_monitor.c, line 49 +#define IS_FAT_LOCK(lockword) (lockword >> 31) +//wjw + static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { Partial_Reveal_Object *p_obj = read_slot(p_ref); @@ -37,6 +41,12 @@ static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { + //wjw + if (IS_FAT_LOCK(p_obj->obj_info)) { + //printf("fat callback on 0x%x\n", p_obj); + native_resource_is_attached_to_a_live_java_object( (void *)p_obj); + } + //wjw if( !object_has_ref_field(p_obj) ) return; REF *p_ref; Index: vm/gc_gen/src/common/gc_common.cpp =================================================================== --- vm/gc_gen/src/common/gc_common.cpp (revision 532501) +++ vm/gc_gen/src/common/gc_common.cpp (working copy) @@ -322,6 +322,9 @@ gc_assign_free_area_to_mutators(gc); + //wjw + reclaim_resources_attached_to_dead_java_objects(); + //wjw vm_resume_threads_after(); return; } Index: vm/gc_gen/src/common/mark_scan_pool.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan_pool.cpp (revision 532501) +++ vm/gc_gen/src/common/mark_scan_pool.cpp (working copy) @@ -23,6 +23,10 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" +//wjw stolen from thread_native_thin_monitor.c, line 49 +#define IS_FAT_LOCK(lockword) (lockword >> 31) +//wjw + static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { Partial_Reveal_Object *p_obj = read_slot(p_ref); @@ -36,6 +40,12 @@ static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { + //wjw + if (IS_FAT_LOCK(p_obj->obj_info)) { + //printf("fat callback on 0x%x\n", p_obj); + native_resource_is_attached_to_a_live_java_object( (void *)p_obj); + } + //wjw if( !object_has_ref_field(p_obj) ) return; REF *p_ref; Index: vm/include/open/vm_gc.h =================================================================== --- vm/include/open/vm_gc.h (revision 532501) +++ vm/include/open/vm_gc.h (working copy) @@ -159,7 +159,12 @@ */ VMEXPORT Boolean verify_object_header(void *ptr); +//wjw -- Routines to support fat monitor collection +VMEXPORT void native_resource_is_attached_to_a_live_java_object(void *); +VMEXPORT void reclaim_resources_attached_to_dead_java_objects(); + + /* * ***** * * Index: vm/include/open/hythread_ext.h =================================================================== --- vm/include/open/hythread_ext.h (revision 532501) +++ vm/include/open/hythread_ext.h (working copy) @@ -305,6 +305,10 @@ IDATA VMCALL hythread_thin_monitor_destroy(hythread_thin_monitor_t *lockword); hythread_t VMCALL hythread_thin_monitor_get_owner(hythread_thin_monitor_t *lockword); IDATA VMCALL hythread_thin_monitor_get_recursion(hythread_thin_monitor_t *lockword); +//wjw +void VMCALL hy_native_resource_is_live(U_32); +void VMCALL hy_reclaim_resources(); +UDATA VMCALL hythread_fat_lock_quota(); //@} /** @name State query Index: vm/vmcore/src/thread/thread_generic.cpp =================================================================== --- vm/vmcore/src/thread/thread_generic.cpp (revision 532501) +++ vm/vmcore/src/thread/thread_generic.cpp (working copy) @@ -309,3 +309,16 @@ vm_jthread_set_tm_data(jthread java_thread, NULL); */ } + +void native_resource_is_attached_to_a_live_java_object(void *p_obj) +{ + ManagedObject *p_object = (ManagedObject *)p_obj; + uint32 xx = p_object->get_obj_info(); + hy_native_resource_is_live(xx); +} + +void reclaim_resources_attached_to_dead_java_objects() +{ + hy_reclaim_resources(); +} + Index: vm/thread/src/thread_java_monitors.c =================================================================== --- vm/thread/src/thread_java_monitors.c (revision 532501) +++ vm/thread/src/thread_java_monitors.c (working copy) @@ -28,11 +28,14 @@ #include "thread_private.h" #include "jni.h" +#include "open/gc.h" + void add_owned_monitor(jobject monitor); void remove_owned_monitor(jobject monitor); void set_contended_monitor(jobject monitor); void set_wait_monitor(jobject monitor); + /** * Initializes Java monitor. * @@ -74,6 +77,13 @@ hythread_t tm_native_thread; int disable_count; + if ( hythread_fat_lock_quota() ) { //wjw + //printf("jthread_monitor_enter(), gc_force_gc()\n"); + hythread_suspend_disable(); + gc_force_gc(); + hythread_suspend_enable(); + } + assert(monitor); hythread_suspend_disable(); lockword = vm_object_get_lockword_addr(monitor); @@ -166,6 +176,12 @@ IDATA status; assert(monitor); + if ( hythread_fat_lock_quota() ) { //wjw + //printf("jthread_monitor_try_enter(), gc_force_gc()\n"); + hythread_suspend_disable(); + gc_force_gc(); + hythread_suspend_enable(); + } hythread_suspend_disable(); lockword = vm_object_get_lockword_addr(monitor); status = hythread_thin_monitor_try_enter(lockword); @@ -286,6 +302,7 @@ * @param[in] nanos time to wait (in nanoseconds) * @sa java.lang.Object.wait() */ + IDATA VMCALL jthread_monitor_timed_wait(jobject monitor, jlong millis, jint nanos) { hythread_thin_monitor_t *lockword; IDATA status; @@ -293,8 +310,13 @@ apr_time_t wait_begin; jvmti_thread_t tm_java_thread; int disable_count; - /////// + if ( hythread_fat_lock_quota() ) { + //printf("jthread_monitor_timed_wait(), gc_force_gc()\n"); + hythread_suspend_disable(); + gc_force_gc(); + hythread_suspend_enable(); + } assert(monitor); hythread_suspend_disable(); Index: vm/thread/src/hythr.exp =================================================================== --- vm/thread/src/hythr.exp (revision 532501) +++ vm/thread/src/hythr.exp (working copy) @@ -87,6 +87,10 @@ hythread_thin_monitor_notify_all; hythread_thin_monitor_destroy; hythread_thin_monitor_get_owner; +hy_native_resource_is_live; +hy_reclaim_resources; +hythread_fat_lock_quota; + hythread_add_task; hythread_get_function_pointer; hythread_get_data_pointer; Index: vm/thread/src/hythr.def =================================================================== --- vm/thread/src/hythr.def (revision 532501) +++ vm/thread/src/hythr.def (working copy) @@ -86,6 +86,9 @@ hythread_thin_monitor_notify_all hythread_thin_monitor_destroy hythread_thin_monitor_get_owner +hy_native_resource_is_live +hy_reclaim_resources +hythread_fat_lock_quota hysem_post hysem_wait Index: vm/thread/src/thread_private.h =================================================================== --- vm/thread/src/thread_private.h (revision 532501) +++ vm/thread/src/thread_private.h (working copy) @@ -637,6 +637,7 @@ int os_cond_timedwait(hycond_t *cond, hymutex_t *mutex, I_64 ms, IDATA nano); + #ifdef __cplusplus } #endif Index: vm/thread/src/thread_native_thin_monitor.c =================================================================== --- vm/thread/src/thread_native_thin_monitor.c (revision 532501) +++ vm/thread/src/thread_native_thin_monitor.c (working copy) @@ -57,6 +57,10 @@ #define RECURSION_DEC(lockword_ptr, lockword) (*lockword_ptr=lockword - (1<<11)) #define MAX_RECURSION 31 +int number_of_occupied_array_slots; //wjw +#define MAX_ARRAY_ENTRIES 8*1024 + + IDATA owns_thin_lock(hythread_t thread, I_32 lockword) { IDATA this_id = thread->thread_id; assert(!IS_FAT_LOCK(lockword)); @@ -73,12 +77,22 @@ #ifdef LOCK_RESERVATION assert(!IS_RESERVED(lockword)); #endif + assert(monitor_id < MAX_ARRAY_ENTRIES); lockword&=0x7FF; lockword|=(monitor_id << 11) | 0x80000000; *lockword_ptr=lockword; apr_memory_rw_barrier(); } +IDATA get_fat_lock_id(hythread_thin_monitor_t *lockword_ptr) { + I_32 lockword = *lockword_ptr; + assert(lockword & 0x80000000); //fat lock bit better be set + lockword &= 0x7fFFffFF; // throw away the fat lock bit + lockword = lockword >> 11; + assert(lockword < MAX_ARRAY_ENTRIES); + return lockword; +} + IDATA is_fat_lock(hythread_thin_monitor_t lockword) { return IS_FAT_LOCK(lockword); } @@ -635,10 +649,14 @@ */ //FIXME: make table resizable, implement delete - -extern int table_size; -extern hythread_monitor_t *lock_table; -int fat_monitor_count = 1; +//wjw extern int table_size; +//wjw extern hythread_monitor_t *lock_table; + +hythread_monitor_t *lock_array = 0; + +unsigned char *live_array = 0; + +//wjw int fat_monitor_count = 1; // Lock table implementation @@ -649,9 +667,10 @@ hythread_monitor_t locktable_get_fat_monitor(IDATA lock_id) { hythread_monitor_t fat_monitor; TRACE(("LOCK ID in table %x\n", lock_id)); - assert(lock_id >=0 && lock_id < table_size); + assert(lock_id >=0 && lock_id < MAX_ARRAY_ENTRIES); //hythread_global_lock(); - fat_monitor = lock_table[lock_id]; + //wjw fat_monitor = lock_table[lock_id]; + fat_monitor = lock_array[lock_id]; //hythread_global_unlock(); return fat_monitor; } @@ -659,32 +678,110 @@ /* * Sets the value of the specific entry in the lock table */ +int array_cursor = 0; //wjw used to scan the lock_table for the next available entry; + IDATA locktable_put_fat_monitor(hythread_monitor_t fat_monitor) { - int id; - //hythread_global_lock(); - id = fat_monitor_count++; - if (id >= table_size) { - hythread_suspend_all(NULL, NULL); - table_size = table_size*2; - lock_table = (hythread_monitor_t *)realloc(lock_table, sizeof(hythread_monitor_t)*table_size); - assert(lock_table); - apr_memory_rw_barrier(); - hythread_resume_all(NULL); + int xx = 0; + int yy = 0; + int retval = 0; + + if (lock_array == 0) { //move this to thread_init eventually + __asm {int 3}; + lock_array = (hythread_monitor_t *)malloc(sizeof(hythread_monitor_t)* MAX_ARRAY_ENTRIES); + live_array = (unsigned char *)malloc(sizeof(unsigned char)* MAX_ARRAY_ENTRIES); + for (yy = 0; yy < MAX_ARRAY_ENTRIES; yy++) { + lock_array[yy] = 0; + live_array[yy] = 0; + } } + //hythread_global_lock(); --- is there are critical section in the callee?? + for(xx =0; xx < MAX_ARRAY_ENTRIES; xx++) { + if (lock_array[array_cursor] == 0) { + assert(live_array[array_cursor] == 0); + lock_array[array_cursor] = fat_monitor; + number_of_occupied_array_slots++; + retval = array_cursor; + break; + } + array_cursor++; + if (array_cursor == MAX_ARRAY_ENTRIES) array_cursor = 0; + } + //hythread_global_unlock(); race condition - lock_table[id] = fat_monitor; - //hythread_global_unlock(); - return id; -} + if(number_of_occupied_array_slots >= MAX_ARRAY_ENTRIES-2) { + printf("PANIC:: hard crash in thread_native_thin_monitor.c::locktable_put_fat_monitor() \n"); + assert(0); + } + return retval; +} +//wjw +//ugly hack, GC somehow does not always call hy_native_resouces_is_live +//before calling hy_reclaim_resources() ??? Xiao Feng, can you figure this one out for us?? + +unsigned char live_objs_were_reported = 0; +void VMCALL hy_native_resource_is_live(U_32 lockword) +{ + IDATA index = 0; + index = get_fat_lock_id( (hythread_thin_monitor_t *) &lockword); + // toss printf("hy_native_resource_is_live(): index = %d\n", index); + live_array[index] = 0xff; // mark the fat lock entry as still alive + live_objs_were_reported = 0xff; +} + +void VMCALL hy_reclaim_resources() +{ + int xx = 0; + int old_number_of_occupied_array_slots = number_of_occupied_array_slots; + if (!live_objs_were_reported) return; + else live_objs_were_reported = 0; //reset for the next GC cycle, ugly hack, I don't understand GC internals. + number_of_occupied_array_slots = 0; + for(xx=0; xx < MAX_ARRAY_ENTRIES; xx++) { + if (live_array[xx]) { + assert(lock_array[xx]); + number_of_occupied_array_slots++; + live_array[xx] = 0; // reset the live array for the next major GC cycle + } else { + if (lock_array[xx]) { + hythread_monitor_destroy(lock_array[xx]); + lock_array[xx] = 0; + //toss printf("hy_reclaim_resources() --- not live but need to release resources\n"); + } + } + } + printf("hy_reclaim_resources(), old_... = %d, number_... = %d\n", + old_number_of_occupied_array_slots, number_of_occupied_array_slots ); +} + +UDATA hythread_fat_lock_quota() { + + int junk = 0; //wjw remove this + hythread_t self = 0; + int disable_count = 0; + if ( number_of_occupied_array_slots > MAX_ARRAY_ENTRIES/2) { + self = tm_self_tls; + disable_count = self->disable_count; + if (disable_count == 0) { // a hack to get around a bug where jthread_monitor_enter() is called with the WRONG disable_count + return 0xFFffFFff; + } + else { + //printf("hythread_fat_lock_quota, disable_count = %d\n", disable_count); + junk++; + } + } + return 0; +} + + /* * Deletes the entry in the lock table with the given lock_id */ -hythread_monitor_t locktable_delete_entry(int lock_id) { +hythread_monitor_t locktable_delete_entry(int lock_id) { hythread_monitor_t m; - assert(lock_id >=0 && lock_id < table_size); - m = lock_table[lock_id]; - lock_table[lock_id] = NULL; + assert(0); //wjw debug when hit + assert(lock_id >=0 && lock_id < MAX_ARRAY_ENTRIES); + m = lock_array[lock_id]; + lock_array[lock_id] = NULL; return m; } Index: vm/thread/src/thread_init.c =================================================================== --- vm/thread/src/thread_init.c (revision 532501) +++ vm/thread/src/thread_init.c (working copy) @@ -49,8 +49,8 @@ hythread_group_t TM_DEFAULT_GROUP; hythread_group_t group_list; -hythread_monitor_t *lock_table = NULL; -int table_size = 8024; +//wjw hythread_monitor_t *lock_table = NULL; +//wjw int table_size = 8024; IDATA groups_count; @@ -173,8 +173,8 @@ status = hycond_create(&lib->nondaemon_thread_cond); assert(status == TM_ERROR_NONE); - lock_table = (hythread_monitor_t *)malloc(sizeof(hythread_monitor_t)*table_size); - assert(lock_table); + //wjw lock_table = (hythread_monitor_t *)malloc(sizeof(hythread_monitor_t)*table_size); + //wjwassert(lock_table); // init global monitor status=hythread_monitor_init_with_name(&p_global_monitor, 0, "Thread Global Monitor");