Index: thread_native_thin_monitor.c =================================================================== --- thread_native_thin_monitor.c (revision 544905) +++ thread_native_thin_monitor.c (working copy) @@ -152,77 +152,68 @@ * Used lockword */ IDATA unreserve_lock(hythread_thin_monitor_t *lockword_ptr) { - U_32 lockword = *lockword_ptr; + U_32 lockword = *lockword_ptr; // wjw --- NOTE does this need the "volatile" keyword to stop the compiler from doing common subexpression elimination?? + U_32 current_lockword = 0; // wjw -- an attempt to defeat compiler optimizations. Maybe just use "volatile"keyword instead?? + U_32 cas_lockword = 0; U_32 lockword_new; uint16 lock_id; - hythread_t owner; + hythread_t owner, current_owner; IDATA status; - // trylock used to prevent cyclic suspend deadlock - // the java_monitor_enter calls safe_point between attempts. - /*status = hymutex_trylock(&TM_LOCK); - if (status !=TM_ERROR_NONE) { - return status; - }*/ - + if (IS_FAT_LOCK(lockword)) { return TM_ERROR_NONE; } lock_id = THREAD_ID(lockword); - owner = hythread_get_thread(lock_id); - TRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); - if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) { - // hymutex_unlock(&TM_LOCK); - return TM_ERROR_NONE; + owner = hythread_get_thread(lock_id); + TRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); + if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) { + return TM_ERROR_NONE; + } + hymutex_lock(&TM_UNRESERVATION_LOCK); // wjw -- this is a new critical section, do not use TM_LOCK + // only one thread can execute the following code at any given instant + status=hythread_suspend_other(owner); // if the owner thread has died, this might crash (we need to fix hythread_suspend_other() to deal with dead threads) + if (status !=TM_ERROR_NONE) { + hymutex_unlock(&TM_UNRESERVATION_LOCK); + return status; } - // suspend owner - if (owner) { - assert(owner); - assert(hythread_get_id(owner) == lock_id); - assert(owner != hythread_self()); - status=hythread_suspend_other(owner); - if (status !=TM_ERROR_NONE) { - return status; - } - } + current_lockword = *lockword_ptr; + lock_id = THREAD_ID(current_lockword); + current_owner = hythread_get_thread(lock_id); + if (current_owner != owner) { + hymutex_unlock(&TM_UNRESERVATION_LOCK); + hythread_resume(owner); + return TM_ERROR_EBUSY; + } + // if we get here, the true owner of the lock is suspended and only one caller of unreserve_lock can execute this code at any given instant + // prepare new unreserved lockword and try to CAS it with old one. - while (IS_RESERVED(lockword)) { - assert(!IS_FAT_LOCK(lockword)); + if (IS_RESERVED(current_lockword)) { + assert(!IS_FAT_LOCK(current_lockword)); TRACE(("unreserving lock")); - lockword_new = (lockword | RESERVED_BITMASK); - if (RECURSION(lockword) != 0) { - assert(RECURSION(lockword) > 0); - assert(RECURSION(lockword_new) > 0); - RECURSION_DEC(&lockword_new, lockword_new); + lockword_new = (current_lockword | RESERVED_BITMASK); + if (RECURSION(current_lockword) != 0) { + assert(RECURSION(current_lockword) > 0); + assert(RECURSION(lockword_new) > 0); // useless, needs to be tossed + RECURSION_DEC(&lockword_new, lockword_new); // why decrement? Does anybody have a clue? } else { lockword_new = lockword_new & 0x0000ffff; } - if (lockword == apr_atomic_cas32 (((volatile apr_uint32_t*) lockword_ptr), - (apr_uint32_t) lockword_new, lockword)) { - TRACE(("unreserved lock")); - break; - } - lockword = *lockword_ptr; + cas_lockword = apr_atomic_cas32 (((volatile apr_uint32_t*) lockword_ptr), + (apr_uint32_t) lockword_new, current_lockword)); } - - // resume owner - if (owner) { - os_thread_yield_other(owner->os_handle); - hythread_resume(owner); + + hythread_resume(owner); + hymutex_unlock(&TM_UNRESERVATION_LOCK); + if (cas_lockword == current_lockword) { + return TM_ERROR_NONE; } - /* status = hymutex_unlock(&TM_LOCK);*/ - // Gregory - This lock, right after it was unreserved, may be // inflated by another thread and therefore instead of recursion // count and reserved flag it will have the fat monitor ID. The // assertion !IS_RESERVED(lockword) fails in this case. So it is // necessary to check first that monitor is not fat. - // To avoid race condition between checking two different - // conditions inside of assert, the lockword contents has to be - // loaded before checking. - lockword = *lockword_ptr; - assert(IS_FAT_LOCK(lockword) || !IS_RESERVED(lockword)); - return TM_ERROR_NONE; + // wjw -- Good point! The above needs to be carefully reviewed. } #else IDATA unreserve_lock(I_32* lockword_ptr) {