From feecdfde3f62282b639c2d064a3a8ba54de7829a Mon Sep 17 00:00:00 2001 From: Salikh Zakirov Date: Fri, 2 Mar 2007 17:10:56 +0300 Subject: [PATCH] fixed callers of hymutex and hycond to use address As a consequence of changing hymutex_t from 'struct HyMutex*' to 'struct HyMutex', the following change needs to be done in callers, from hymutex_t mutex; hymutex_init(&mutex); ... hymutex_lock(mutex); to hymutex_t mutex; hymutex_init(&mutex); ... hymutex_lock(&mutex); Notice hymutex_lock() now also needs a & address operator. Some condition variable users were also relying on the fact that the hycond_t is a pointer, and need fixing. --- vm/em/src/DrlEMImpl.cpp | 12 ++-- vm/em/src/EBProfileCollector.cpp | 10 ++-- vm/em/src/EdgeProfileCollector.cpp | 10 ++-- vm/em/src/NValueProfileCollector.cpp | 8 +- vm/em/src/NValueProfileCollector.h | 4 +- vm/tests/unit/thread/test_performance.h | 4 +- vm/tests/unit/thread/test_performance_basic.c | 25 ++++---- .../thread/test_performance_concurrent_mutex.c | 12 ++-- vm/thread/src/thread_init.c | 29 ++++----- vm/thread/src/thread_java_basic.c | 24 ++++---- vm/thread/src/thread_native_basic.c | 6 +- vm/thread/src/thread_native_condvar.c | 2 +- vm/thread/src/thread_native_fat_monitor.c | 53 +++++++---------- vm/thread/src/thread_native_interrupt.c | 26 ++++---- vm/thread/src/thread_native_latch.c | 51 +++++++-------- vm/thread/src/thread_native_park.c | 14 ++-- vm/thread/src/thread_native_semaphore.c | 64 +++++++++----------- vm/thread/src/thread_native_thin_monitor.c | 14 ++-- vm/thread/src/thread_private.h | 20 ++---- vm/thread/src/thread_ti_monitors.c | 8 +- vm/vmcore/include/jvmti_internal.h | 1 + vm/vmcore/src/jvmti/jvmti.cpp | 4 +- vm/vmcore/src/jvmti/jvmti_capability.cpp | 5 +- vm/vmcore/src/jvmti/jvmti_event.cpp | 27 +++------ vm/vmcore/src/jvmti/jvmti_heap.cpp | 5 +- vm/vmcore/src/thread/lock_manager.cpp | 10 ++-- 26 files changed, 204 insertions(+), 244 deletions(-) diff --git a/vm/em/src/DrlEMImpl.cpp b/vm/em/src/DrlEMImpl.cpp index b946bc2..0d48913 100644 --- a/vm/em/src/DrlEMImpl.cpp +++ b/vm/em/src/DrlEMImpl.cpp @@ -116,7 +116,7 @@ DrlEMImpl::DrlEMImpl() : jh(NULL), _execute_method(NULL) { DrlEMImpl::~DrlEMImpl() { deallocateResources(); - hymutex_destroy(recompilationLock); + hymutex_destroy(&recompilationLock); } void DrlEMImpl::initProfileAccess() { @@ -763,15 +763,15 @@ bool DrlEMImpl::initProfileCollectors(RChain* chain, const std::string& config) void DrlEMImpl::methodProfileIsReady(MethodProfile* mp) { - hymutex_lock(recompilationLock); + hymutex_lock(&recompilationLock); if (methodsInRecompile.find((Method_Profile_Handle)mp)!=methodsInRecompile.end()) { //method is already recompiling by another thread or by this thread(recursion) - hymutex_unlock(recompilationLock); + hymutex_unlock(&recompilationLock); return; } methodsInRecompile.insert((Method_Profile_Handle)mp); nMethodsRecompiled++; - hymutex_unlock(recompilationLock); + hymutex_unlock(&recompilationLock); const char* methodName = NULL; const char* className = NULL; @@ -812,9 +812,9 @@ void DrlEMImpl::methodProfileIsReady(MethodProfile* mp) { } } } - hymutex_lock(recompilationLock); + hymutex_lock(&recompilationLock); methodsInRecompile.erase((Method_Profile_Handle)mp); - hymutex_unlock(recompilationLock); + hymutex_unlock(&recompilationLock); } ProfileCollector* DrlEMImpl::getProfileCollector(EM_PCTYPE type, JIT_Handle jh, EM_JIT_PC_Role jitRole) const { diff --git a/vm/em/src/EBProfileCollector.cpp b/vm/em/src/EBProfileCollector.cpp index 34a3663..c60cd83 100644 --- a/vm/em/src/EBProfileCollector.cpp +++ b/vm/em/src/EBProfileCollector.cpp @@ -112,7 +112,7 @@ EBProfileCollector::~EBProfileCollector() { delete profile; } - hymutex_destroy(profilesLock); + hymutex_destroy(&profilesLock); } MethodProfile* EBProfileCollector::getMethodProfile(Method_Handle mh) const { @@ -126,7 +126,7 @@ MethodProfile* EBProfileCollector::getMethodProfile(Method_Handle mh) const { EBMethodProfile* EBProfileCollector::createProfile(Method_Handle mh) { EBMethodProfile* profile = new EBMethodProfile(this, mh); - hymutex_lock(profilesLock); + hymutex_lock(&profilesLock); assert(profilesByMethod.find(mh) == profilesByMethod.end()); profilesByMethod[mh] = profile; @@ -134,7 +134,7 @@ EBMethodProfile* EBProfileCollector::createProfile(Method_Handle mh) { newProfiles.push_back(profile); } - hymutex_unlock(profilesLock); + hymutex_unlock(&profilesLock); return profile; } @@ -155,10 +155,10 @@ static void logReadyProfile(const std::string& catName, const std::string& profi void EBProfileCollector::onTimeout() { assert(mode == EB_PCMODE_ASYNC); if(!newProfiles.empty()) { - hymutex_lock(profilesLock); + hymutex_lock(&profilesLock); greenProfiles.insert(greenProfiles.end(), newProfiles.begin(), newProfiles.end()); newProfiles.clear(); - hymutex_unlock(profilesLock); + hymutex_unlock(&profilesLock); } for (std::vector::iterator it = greenProfiles.begin(), end = greenProfiles.end(); it!=end; ++it) { diff --git a/vm/em/src/EdgeProfileCollector.cpp b/vm/em/src/EdgeProfileCollector.cpp index e5afb2d..833ddd3 100644 --- a/vm/em/src/EdgeProfileCollector.cpp +++ b/vm/em/src/EdgeProfileCollector.cpp @@ -113,7 +113,7 @@ EdgeProfileCollector::~EdgeProfileCollector() EdgeMethodProfile* profile = it->second; delete profile; } - hymutex_destroy(profilesLock); + hymutex_destroy(&profilesLock); } @@ -176,7 +176,7 @@ EdgeMethodProfile* EdgeProfileCollector::createProfile( Method_Handle mh, uint32* counterKeys, uint32 checkSum) { - hymutex_lock(profilesLock); + hymutex_lock(&profilesLock); EdgeMethodProfile* profile = new EdgeMethodProfile(this, mh); @@ -192,7 +192,7 @@ EdgeMethodProfile* EdgeProfileCollector::createProfile( Method_Handle mh, profilesByMethod[mh] = profile; newProfiles.push_back(profile); - hymutex_unlock(profilesLock); + hymutex_unlock(&profilesLock); return profile; } @@ -234,10 +234,10 @@ static void logReadyProfile(const std::string& catName, const std::string& profi void EdgeProfileCollector::onTimeout() { if(!newProfiles.empty()) { - hymutex_lock(profilesLock); + hymutex_lock(&profilesLock); greenProfiles.insert(greenProfiles.end(), newProfiles.begin(), newProfiles.end()); newProfiles.clear(); - hymutex_unlock(profilesLock); + hymutex_unlock(&profilesLock); } std::vector::iterator it = greenProfiles.begin(); diff --git a/vm/em/src/NValueProfileCollector.cpp b/vm/em/src/NValueProfileCollector.cpp index fd58f68..fce4d6b 100644 --- a/vm/em/src/NValueProfileCollector.cpp +++ b/vm/em/src/NValueProfileCollector.cpp @@ -125,7 +125,7 @@ void ValueProfileCollector::insert_into_tnv_table (struct Simple_TNV_Table* TNV_ ValueMethodProfile* ValueProfileCollector::createProfile(Method_Handle mh, uint32 numkeys, uint32 keys[]) { - hymutex_lock(profilesLock); + hymutex_lock(&profilesLock); ValueMethodProfile* profile = new ValueMethodProfile(this, mh); VPInstructionProfileData* vpmap = new VPInstructionProfileData[numkeys]; // Allocate space for value maps @@ -147,7 +147,7 @@ ValueMethodProfile* ValueProfileCollector::createProfile(Method_Handle mh, uint3 } assert(profilesByMethod.find(mh) == profilesByMethod.end()); profilesByMethod[mh] = profile; - hymutex_unlock(profilesLock); + hymutex_unlock(&profilesLock); return profile; } @@ -191,7 +191,7 @@ ValueProfileCollector::~ValueProfileCollector() ValueMethodProfile* profile = it->second; delete profile; } - hymutex_destroy(profilesLock); + hymutex_destroy(&profilesLock); } ValueMethodProfile::ValueMethodProfile(ValueProfileCollector* pc, Method_Handle mh) @@ -202,7 +202,7 @@ ValueMethodProfile::ValueMethodProfile(ValueProfileCollector* pc, Method_Handle ValueMethodProfile::~ValueMethodProfile() { - hymutex_destroy(lock); + hymutex_destroy(&lock); } void ValueMethodProfile::addNewValue(uint32 instructionKey, POINTER_SIZE_INT valueToAdd) diff --git a/vm/em/src/NValueProfileCollector.h b/vm/em/src/NValueProfileCollector.h index d14b6b9..984d9e6 100644 --- a/vm/em/src/NValueProfileCollector.h +++ b/vm/em/src/NValueProfileCollector.h @@ -91,8 +91,8 @@ public: public: ValueMethodProfile(ValueProfileCollector* pc, Method_Handle mh); ~ValueMethodProfile(); - void lockProfile() {hymutex_lock(lock);} - void unlockProfile() {hymutex_unlock(lock);} + void lockProfile() {hymutex_lock(&lock);} + void unlockProfile() {hymutex_unlock(&lock);} void dumpValues(std::ostream& os); void addNewValue(uint32 instructionKey, POINTER_SIZE_INT valueToAdd); POINTER_SIZE_INT getResult(uint32 instructionKey); diff --git a/vm/tests/unit/thread/test_performance.h b/vm/tests/unit/thread/test_performance.h index 727921e..5a662be 100644 --- a/vm/tests/unit/thread/test_performance.h +++ b/vm/tests/unit/thread/test_performance.h @@ -47,7 +47,7 @@ float const PERF_COEFFICIENT = 3; /* * Locks for waiting */ -hymutex_t tm_mutex_lock = NULL; +hymutex_t tm_mutex_lock; hycond_t tm_condition_lock = NULL; apr_thread_mutex_t* apr_mutex_lock = NULL; apr_thread_cond_t* apr_condition_lock = NULL; @@ -55,7 +55,7 @@ apr_thread_cond_t* apr_condition_lock = NULL; /* * Locks for concurrent mutex tests */ -hymutex_t tm_concurrent_mutex_lock = NULL; +hymutex_t tm_concurrent_mutex_lock; apr_thread_mutex_t* apr_concurrent_mutex_lock = NULL; /* diff --git a/vm/tests/unit/thread/test_performance_basic.c b/vm/tests/unit/thread/test_performance_basic.c index f59c356..aa9bc44 100644 --- a/vm/tests/unit/thread/test_performance_basic.c +++ b/vm/tests/unit/thread/test_performance_basic.c @@ -31,9 +31,9 @@ void* APR_THREAD_FUNC proc_apr_empty(apr_thread_t *thread, void *args) { } int proc_waiting(void *args) { - hymutex_lock(tm_mutex_lock); - hycond_wait(tm_condition_lock, tm_mutex_lock); - hymutex_unlock(tm_mutex_lock); + hymutex_lock(&tm_mutex_lock); + hycond_wait(&tm_condition_lock, &tm_mutex_lock); + hymutex_unlock(&tm_mutex_lock); return 0; } @@ -133,13 +133,12 @@ int test_hymutex_create_destroy(void) { for (i = 0; i < ITERATIONS; i++) { stat = hymutex_create(&tm_mutex_lock, APR_THREAD_MUTEX_DEFAULT); assert(!stat); - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); } end = apr_time_now(); difference = difference + (end - start); } - tm_mutex_lock = NULL; difference = difference / PERF_FIDELITY; tested_threads_destroy(); @@ -192,15 +191,15 @@ int test_hymutex_lock_unlock(void) { for (j = 0; j < PERF_FIDELITY; j++) { start = apr_time_now(); for (i = 0; i < ITERATIONS; i++) { - stat = hymutex_lock(tm_mutex_lock); + stat = hymutex_lock(&tm_mutex_lock); assert(!stat); - stat = hymutex_unlock(tm_mutex_lock); + stat = hymutex_unlock(&tm_mutex_lock); assert(!stat); } end = apr_time_now(); difference = difference + (end - start); } - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); difference = difference / PERF_FIDELITY; tested_threads_destroy(); @@ -254,15 +253,15 @@ int test_hymutex_trylock_unlock(void) { for (j = 0; j < PERF_FIDELITY; j++) { start = apr_time_now(); for (i = 0; i < ITERATIONS; i++) { - stat = hymutex_trylock(tm_mutex_lock); + stat = hymutex_trylock(&tm_mutex_lock); assert(!stat); - stat = hymutex_unlock(tm_mutex_lock); + stat = hymutex_unlock(&tm_mutex_lock); assert(!stat); } end = apr_time_now(); difference = difference + (end - start); } - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); difference = difference / PERF_FIDELITY; tested_threads_destroy(); @@ -466,7 +465,7 @@ int test_hythread_set_private_data(void) { assert(!stat); stat = hythread_join(thread); assert(!stat); - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); stat = hycond_destroy(tm_condition_lock); assert(!stat); @@ -562,7 +561,7 @@ int test_hythread_get_private_data(void) { assert(!stat); stat = hythread_join(thread); assert(!stat); - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); stat = hycond_destroy(tm_condition_lock); assert(!stat); diff --git a/vm/tests/unit/thread/test_performance_concurrent_mutex.c b/vm/tests/unit/thread/test_performance_concurrent_mutex.c index e63cf02..9acac5f 100644 --- a/vm/tests/unit/thread/test_performance_concurrent_mutex.c +++ b/vm/tests/unit/thread/test_performance_concurrent_mutex.c @@ -52,9 +52,9 @@ int proc_concurrent(void *args) { int j = 0; - hymutex_lock(tm_mutex_lock); - hycond_wait(tm_condition_lock, tm_mutex_lock); - hymutex_unlock(tm_mutex_lock); + hymutex_lock(&tm_mutex_lock); + hycond_wait(&tm_condition_lock, &tm_mutex_lock); + hymutex_unlock(&tm_mutex_lock); for (j = 0; j < iterations; j++) { hymutex_lock(tm_concurrent_mutex_lock); @@ -308,11 +308,11 @@ int test_hythread_cuncurrent_mutex_tm(hythread_t threads_array[], hythread_join(threads_array[i]); } end = apr_time_now(); - stat = hymutex_destroy(tm_concurrent_mutex_lock); + stat = hymutex_destroy(&tm_concurrent_mutex_lock); assert(!stat); - stat = hymutex_destroy(tm_mutex_lock); + stat = hymutex_destroy(&tm_mutex_lock); assert(!stat); - stat = hycond_destroy(tm_condition_lock); + stat = hycond_destroy(&tm_condition_lock); assert(!stat); return (end - start); } diff --git a/vm/thread/src/thread_init.c b/vm/thread/src/thread_init.c index 3dd903d..b1d000e 100644 --- a/vm/thread/src/thread_init.c +++ b/vm/thread/src/thread_init.c @@ -39,8 +39,9 @@ apr_pool_t *TM_POOL = NULL; apr_threadkey_t *TM_THREAD_KEY; //Thread manager global lock -hymutex_t TM_START_LOCK = NULL; -hymutex_t FAT_MONITOR_TABLE_LOCK = NULL; +hymutex_t TM_START_LOCK; +static int TM_INITIALIZED = 0; +hymutex_t FAT_MONITOR_TABLE_LOCK; #define GLOBAL_MONITOR_NAME "global_monitor" hythread_monitor_t p_global_monitor; @@ -138,10 +139,8 @@ void VMCALL hythread_init(hythread_library_t lib) { } assert(TM_LIBRARY == lib); - // Check if someone already initialized the library. - if (TM_START_LOCK != NULL) { - return; - } + if (TM_INITIALIZED) return; + TM_INITIALIZED = 1; apr_status = apr_initialize(); assert(apr_status == APR_SUCCESS); @@ -206,7 +205,7 @@ void VMCALL hythread_lib_lock(hythread_t self) { IDATA status; assert(self == hythread_self()); - status = hymutex_lock(self->library->TM_LOCK); + status = hymutex_lock(&self->library->TM_LOCK); assert(status == TM_ERROR_NONE); } @@ -219,7 +218,7 @@ void VMCALL hythread_lib_unlock(hythread_t self) { IDATA status; assert(self == hythread_self()); - status = hymutex_unlock(self->library->TM_LOCK); + status = hymutex_unlock(&self->library->TM_LOCK); assert(status == TM_ERROR_NONE); } @@ -236,13 +235,13 @@ IDATA VMCALL hythread_global_lock() { // we need not care about suspension if the thread // is not even tattached to hythread if (self == NULL) - return hymutex_lock(TM_LIBRARY->TM_LOCK); + return hymutex_lock(&TM_LIBRARY->TM_LOCK); // suspend_disable_count must be 0 on potentially // blocking operation to prevent suspension deadlocks, // meaning that the thread is safe for suspension saved_count = reset_suspend_disable(); - r = hymutex_lock(TM_LIBRARY->TM_LOCK); + r = hymutex_lock(&TM_LIBRARY->TM_LOCK); if (r) return r; // make sure we do not get a global thread lock @@ -250,10 +249,10 @@ IDATA VMCALL hythread_global_lock() { while (self->suspend_request) { // give up global thread lock before safepoint, // because this thread can be suspended at a safepoint - r = hymutex_unlock(TM_LIBRARY->TM_LOCK); + r = hymutex_unlock(&TM_LIBRARY->TM_LOCK); if (r) return r; hythread_safe_point(); - r = hymutex_lock(TM_LIBRARY->TM_LOCK); + r = hymutex_lock(&TM_LIBRARY->TM_LOCK); if (r) return r; } @@ -268,7 +267,7 @@ IDATA VMCALL hythread_global_lock() { * */ IDATA VMCALL hythread_global_unlock() { - return hymutex_unlock(TM_LIBRARY->TM_LOCK);; + return hymutex_unlock(&TM_LIBRARY->TM_LOCK);; } hythread_group_t get_java_thread_group(void) { @@ -319,11 +318,11 @@ static IDATA destroy_group_list() { } IDATA acquire_start_lock() { - return hymutex_lock(TM_START_LOCK); + return hymutex_lock(&TM_START_LOCK); } IDATA release_start_lock() { - return hymutex_unlock(TM_START_LOCK); + return hymutex_unlock(&TM_START_LOCK); } /* diff --git a/vm/thread/src/thread_java_basic.c b/vm/thread/src/thread_java_basic.c index ab9c562..4bd8e8e 100644 --- a/vm/thread/src/thread_java_basic.c +++ b/vm/thread/src/thread_java_basic.c @@ -643,26 +643,26 @@ IDATA VMCALL jthread_wait_for_all_nondaemon_threads() { jvmti_thread = hythread_get_private_data(native_thread); lib = native_thread->library; - status = hymutex_lock(lib->TM_LOCK); + status = hymutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) return status; if (lib->nondaemon_thread_count == 1 && !jvmti_thread->daemon) { - status = hymutex_unlock(lib->TM_LOCK); + status = hymutex_unlock(&lib->TM_LOCK); return status; } while ((!jvmti_thread->daemon && lib->nondaemon_thread_count > 1) || (jvmti_thread->daemon && lib->nondaemon_thread_count > 0)) { - status = hycond_wait(lib->nondaemon_thread_cond, lib->TM_LOCK); + status = hycond_wait(&lib->nondaemon_thread_cond, &lib->TM_LOCK); //check interruption and other problems TRACE(("TM wait for nondaemons notified, count: %d", lib->nondaemon_thread_count)); if (status != TM_ERROR_NONE) { - hymutex_unlock(lib->TM_LOCK); + hymutex_unlock(&lib->TM_LOCK); return status; } } - status = hymutex_unlock(lib->TM_LOCK); + status = hymutex_unlock(&lib->TM_LOCK); return status; } @@ -709,11 +709,11 @@ IDATA increase_nondaemon_threads_count(hythread_t self) { lib = self->library; - status = hymutex_lock(lib->TM_LOCK); + status = hymutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) return status; lib->nondaemon_thread_count++; - status = hymutex_unlock(lib->TM_LOCK); + status = hymutex_unlock(&lib->TM_LOCK); return status; } @@ -723,11 +723,11 @@ IDATA countdown_nondaemon_threads(hythread_t self) { lib = self->library; - status = hymutex_lock(lib->TM_LOCK); + status = hymutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) return status; if (lib->nondaemon_thread_count <= 0) { - status = hymutex_unlock(lib->TM_LOCK); + status = hymutex_unlock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) return status; return TM_ERROR_ILLEGAL_STATE; } @@ -735,14 +735,14 @@ IDATA countdown_nondaemon_threads(hythread_t self) { TRACE(("TM: nondaemons decreased, thread: %p count: %d\n", self, lib->nondaemon_thread_count)); lib->nondaemon_thread_count--; if (lib->nondaemon_thread_count <= 1) { - status = hycond_notify_all(lib->nondaemon_thread_cond); + status = hycond_notify_all(&lib->nondaemon_thread_cond); TRACE(("TM: nondaemons all dead, thread: %p count: %d\n", self, lib->nondaemon_thread_count)); if (status != TM_ERROR_NONE) { - hymutex_unlock(lib->TM_LOCK); + hymutex_unlock(&lib->TM_LOCK); return status; } } - status = hymutex_unlock(lib->TM_LOCK); + status = hymutex_unlock(&lib->TM_LOCK); return status; } diff --git a/vm/thread/src/thread_native_basic.c b/vm/thread/src/thread_native_basic.c index 4a0cfe0..37df2ad 100644 --- a/vm/thread/src/thread_native_basic.c +++ b/vm/thread/src/thread_native_basic.c @@ -409,11 +409,11 @@ IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) { // Report error in case current thread is not attached if (!thread) return TM_ERROR_UNATTACHED_THREAD; - hymutex_lock(thread->mutex); + hymutex_lock(&thread->mutex); thread->state |= TM_THREAD_STATE_SLEEPING; - status = condvar_wait_impl(thread->condition, thread->mutex, millis, nanos, interruptable); + status = condvar_wait_impl(&thread->condition, &thread->mutex, millis, nanos, interruptable); thread->state &= ~TM_THREAD_STATE_SLEEPING; - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); return (status == TM_ERROR_INTERRUPT && interruptable) ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_condvar.c b/vm/thread/src/thread_native_condvar.c index 83e1921..ed6d3bb 100644 --- a/vm/thread/src/thread_native_condvar.c +++ b/vm/thread/src/thread_native_condvar.c @@ -31,7 +31,7 @@ /** * Waits on a conditional, handling interruptions and thread state. */ -static IDATA condvar_wait_impl(hycond_t *cond, hymutex_t *mutex, I_64 ms, IDATA nano, IDATA interruptable) { +IDATA condvar_wait_impl(hycond_t *cond, hymutex_t *mutex, I_64 ms, IDATA nano, IDATA interruptable) { int r; int disable_count; hythread_t self; diff --git a/vm/thread/src/thread_native_fat_monitor.c b/vm/thread/src/thread_native_fat_monitor.c index 86967f1..96aff2f 100644 --- a/vm/thread/src/thread_native_fat_monitor.c +++ b/vm/thread/src/thread_native_fat_monitor.c @@ -43,21 +43,18 @@ * */ IDATA VMCALL hythread_monitor_init_with_name(hythread_monitor_t *mon_ptr, UDATA flags, char *name) { + int r; hythread_monitor_t mon; - apr_pool_t *pool = get_local_pool(); - apr_status_t apr_status; mon = calloc(1, sizeof(HyThreadMonitor)); if (mon == NULL) { return TM_ERROR_OUT_OF_MEMORY; } - apr_status = apr_thread_mutex_create((apr_thread_mutex_t**)&(mon->mutex), TM_MUTEX_NESTED, pool); - if (apr_status != APR_SUCCESS) goto cleanup; + r = hymutex_create(&mon->mutex, TM_MUTEX_NESTED); + if (r) goto cleanup; + r = hycond_create(&mon->condition); + if (r) goto cleanup; - apr_status = apr_thread_cond_create((apr_thread_cond_t**)&(mon->condition), pool); - if (apr_status != APR_SUCCESS) goto cleanup; - - mon->pool = pool; mon->flags = flags; mon->name = name; mon->owner = 0; @@ -68,7 +65,7 @@ IDATA VMCALL hythread_monitor_init_with_name(hythread_monitor_t *mon_ptr, UDATA cleanup: free(mon); - return CONVERT_ERROR(apr_status); + return r; } /** @@ -88,7 +85,7 @@ IDATA VMCALL hythread_monitor_enter(hythread_monitor_t mon_ptr) { IDATA status; hythread_t self = tm_self_tls; if (mon_ptr->owner != self) { - status = hymutex_lock(mon_ptr->mutex); + status = hymutex_lock(&mon_ptr->mutex); mon_ptr->owner = self; assert(status == TM_ERROR_NONE); } else { @@ -116,7 +113,7 @@ IDATA VMCALL hythread_monitor_try_enter(hythread_monitor_t mon_ptr) { IDATA status; hythread_t self = tm_self_tls; if (mon_ptr->owner != self) { - status = hymutex_trylock(mon_ptr->mutex); + status = hymutex_trylock(&mon_ptr->mutex); if (status == TM_ERROR_NONE) { mon_ptr->owner = tm_self_tls; } @@ -150,7 +147,7 @@ IDATA VMCALL hythread_monitor_exit(hythread_monitor_t mon_ptr) { } if (mon_ptr->recursion_count == 0) { mon_ptr->owner = NULL; - status = hymutex_unlock(mon_ptr->mutex); + status = hymutex_unlock(&mon_ptr->mutex); } else { mon_ptr->recursion_count--; } @@ -179,17 +176,17 @@ IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA i mon_ptr->owner = NULL; mon_ptr->recursion_count =0; mon_ptr->wait_count++; - hymutex_lock(self->mutex); - self->current_condition = mon_ptr->condition; + hymutex_lock(&self->mutex); + self->current_condition = &mon_ptr->condition; self->state |= TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = mon_ptr; - hymutex_unlock(self->mutex); + hymutex_unlock(&self->mutex); do { apr_time_t start; assert(0 <= mon_ptr->notify_flag && mon_ptr->notify_flag < mon_ptr->wait_count); start = apr_time_now(); - status = condvar_wait_impl(mon_ptr->condition, mon_ptr->mutex, ms, nano, interruptable); + status = condvar_wait_impl(&mon_ptr->condition, &mon_ptr->mutex, ms, nano, interruptable); if (status != TM_ERROR_NONE || mon_ptr->notify_flag || hythread_interrupted(self)) break; @@ -214,19 +211,19 @@ IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA i } while (1); if (mon_ptr->notify_flag) mon_ptr->notify_flag -= 1; - hymutex_lock(self->mutex); + hymutex_lock(&self->mutex); self->state &= ~TM_THREAD_STATE_IN_MONITOR_WAIT; self->current_condition = NULL; self->waited_monitor = NULL; - hymutex_unlock(self->mutex); + hymutex_unlock(&self->mutex); mon_ptr->wait_count--; if (self->suspend_request) { int save_count; - hymutex_unlock(mon_ptr->mutex); + hymutex_unlock(&mon_ptr->mutex); hythread_safe_point(); save_count = reset_suspend_disable(); - hymutex_lock(mon_ptr->mutex); + hymutex_lock(&mon_ptr->mutex); set_suspend_disable(save_count); } @@ -329,7 +326,7 @@ IDATA VMCALL hythread_monitor_notify_all(hythread_monitor_t mon_ptr) { return TM_ERROR_ILLEGAL_STATE; } mon_ptr->notify_flag = mon_ptr->wait_count; - return hycond_notify_all(mon_ptr->condition); + return hycond_notify_all(&mon_ptr->condition); } @@ -352,7 +349,7 @@ IDATA VMCALL hythread_monitor_notify(hythread_monitor_t mon_ptr) { } if (mon_ptr->notify_flag < mon_ptr->wait_count) mon_ptr->notify_flag += 1; - return hycond_notify(mon_ptr->condition); + return hycond_notify(&mon_ptr->condition); } @@ -371,20 +368,12 @@ IDATA VMCALL hythread_monitor_notify(hythread_monitor_t mon_ptr) { * @see hythread_monitor_init_with_name */ IDATA VMCALL hythread_monitor_destroy(hythread_monitor_t monitor) { - apr_status_t apr_status; - apr_pool_t *pool = monitor->pool; if (monitor->owner != NULL || monitor->wait_count > 0) { return TM_ERROR_ILLEGAL_STATE; } - if (pool != get_local_pool()) { - return local_pool_cleanup_register(hythread_monitor_destroy, monitor); - } - apr_status=apr_thread_mutex_destroy((apr_thread_mutex_t*)monitor->mutex); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); - apr_status=apr_thread_cond_destroy((apr_thread_cond_t*)monitor->condition); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); - // apr_pool_free(pool, monitor); + hymutex_destroy(&monitor->mutex); + hycond_destroy(&monitor->condition); free(monitor); return TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_interrupt.c b/vm/thread/src/thread_native_interrupt.c index e493c41..2391f9b 100644 --- a/vm/thread/src/thread_native_interrupt.c +++ b/vm/thread/src/thread_native_interrupt.c @@ -38,11 +38,11 @@ static int interrupter_thread_function(void *args); void VMCALL hythread_interrupt(hythread_t thread) { IDATA status; hythread_t thr = NULL; - hymutex_lock(thread->mutex); + hymutex_lock(&thread->mutex); thread->state |= TM_THREAD_STATE_INTERRUPTED; if (thread == tm_self_tls) { - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); return; } @@ -62,29 +62,29 @@ void VMCALL hythread_interrupt(hythread_t thread) { } } - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); } + static int interrupter_thread_function(void *args) { hythread_t thread = (hythread_t)args; hythread_monitor_t monitor = NULL; - hymutex_lock(thread->mutex); + hymutex_lock(&thread->mutex); if (thread->waited_monitor) { monitor = thread->waited_monitor; } else { - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); hythread_exit(NULL); return 0; } - hymutex_unlock(thread->mutex); - + hymutex_unlock(&thread->mutex); - hythread_monitor_enter(monitor); - hythread_monitor_notify_all(monitor); + hythread_monitor_enter(monitor); + hythread_monitor_notify_all(monitor); - hythread_exit(monitor); - return 0; + hythread_exit(monitor); + return 0; } /** @@ -95,10 +95,10 @@ static int interrupter_thread_function(void *args) { */ UDATA VMCALL hythread_clear_interrupted_other(hythread_t thread) { int interrupted; - hymutex_lock(thread->mutex); + hymutex_lock(&thread->mutex); interrupted = thread->state & TM_THREAD_STATE_INTERRUPTED; thread->state &= ~TM_THREAD_STATE_INTERRUPTED; - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); return interrupted ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_latch.c b/vm/thread/src/thread_native_latch.c index 9b037ba..bc9008d 100644 --- a/vm/thread/src/thread_native_latch.c +++ b/vm/thread/src/thread_native_latch.c @@ -41,6 +41,7 @@ * @sa java.util.concurrent.CountDownLatch */ IDATA VMCALL hylatch_create(hylatch_t *latch, IDATA count) { + int r; hylatch_t l; apr_pool_t *pool = get_local_pool(); apr_status_t apr_status; @@ -49,17 +50,18 @@ IDATA VMCALL hylatch_create(hylatch_t *latch, IDATA count) { if (l == NULL) { return TM_ERROR_OUT_OF_MEMORY; } - apr_status = apr_thread_mutex_create((apr_thread_mutex_t**)&(l->mutex), TM_MUTEX_DEFAULT, pool); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); + r = hymutex_create(&l->mutex, TM_MUTEX_DEFAULT); + if (r) goto cleanup; + r = hycond_create(&l->condition); + if (r) goto cleanup; - apr_status = apr_thread_cond_create((apr_thread_cond_t**)&(l->condition), pool); - - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); - l->count = count; - l->pool = pool; *latch = l; return TM_ERROR_NONE; + +cleanup: + free(l); + return r; } //wait method implementation @@ -67,19 +69,19 @@ IDATA VMCALL hylatch_create(hylatch_t *latch, IDATA count) { static IDATA latch_wait_impl(hylatch_t latch, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; - status = hymutex_lock(latch->mutex); + status = hymutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; while (latch->count) { - status = condvar_wait_impl(latch->condition, latch->mutex, ms, nano, interruptable); + status = condvar_wait_impl(&latch->condition, &latch->mutex, ms, nano, interruptable); //check interruption and other problems if (status != TM_ERROR_NONE) { - hymutex_unlock(latch->mutex); + hymutex_unlock(&latch->mutex); return status; } if (ms || nano) break; } - status = hymutex_unlock(latch->mutex); + status = hymutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -136,10 +138,10 @@ IDATA VMCALL hylatch_wait_interruptable(hylatch_t latch, I_64 ms, IDATA nano) { IDATA VMCALL hylatch_set(hylatch_t latch, IDATA count) { IDATA status; - status = hymutex_lock(latch->mutex); + status = hymutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; latch->count = count; - status = hymutex_unlock(latch->mutex); + status = hymutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -155,23 +157,23 @@ IDATA VMCALL hylatch_set(hylatch_t latch, IDATA count) { IDATA VMCALL hylatch_count_down(hylatch_t latch) { IDATA status; - status = hymutex_lock(latch->mutex); + status = hymutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; if (latch->count <= 0) { - status = hymutex_unlock(latch->mutex); + status = hymutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_ILLEGAL_STATE; } latch->count--; if (latch->count == 0) { - status = hycond_notify_all(latch->condition); + status = hycond_notify_all(&latch->condition); if (status != TM_ERROR_NONE) { - hymutex_unlock(latch->mutex); + hymutex_unlock(&latch->mutex); return status; } } - status = hymutex_unlock(latch->mutex); + status = hymutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -188,10 +190,10 @@ IDATA VMCALL hylatch_count_down(hylatch_t latch) { IDATA VMCALL hylatch_get_count(IDATA *count, hylatch_t latch) { IDATA status; - status = hymutex_lock(latch->mutex); + status = hymutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; *count = latch->count; - status = hymutex_unlock(latch->mutex); + status = hymutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -202,13 +204,8 @@ IDATA VMCALL hylatch_get_count(IDATA *count, hylatch_t latch) { * @param[in] latch the latch */ IDATA VMCALL hylatch_destroy(hylatch_t latch) { - apr_pool_t *pool = latch->pool; - if (pool != get_local_pool()) { - return local_pool_cleanup_register(hylatch_destroy, latch); - } - apr_thread_mutex_destroy((apr_thread_mutex_t*)latch->mutex); - apr_thread_cond_destroy((apr_thread_cond_t*)latch->condition); - // apr_pool_free(pool, latch); + hymutex_destroy(&latch->mutex); + hycond_destroy(&latch->condition); free(latch); return TM_ERROR_NONE; diff --git a/vm/thread/src/thread_native_park.c b/vm/thread/src/thread_native_park.c index 40fc9af..7bad6d7 100644 --- a/vm/thread/src/thread_native_park.c +++ b/vm/thread/src/thread_native_park.c @@ -46,16 +46,16 @@ IDATA VMCALL hythread_park(I_64 millis, IDATA nanos) { hythread_t t = tm_self_tls; assert(t); - hymutex_lock(t->mutex); + hymutex_lock(&t->mutex); if (t->state & TM_THREAD_STATE_UNPARKED) { t->state &= ~TM_THREAD_STATE_UNPARKED; - hymutex_unlock(t->mutex); + hymutex_unlock(&t->mutex); return (t->state & TM_THREAD_STATE_INTERRUPTED) ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; } t->state |= TM_THREAD_STATE_PARKED; - status = hycond_wait_interruptable(t->condition, t->mutex, millis, nanos); + status = hycond_wait_interruptable(&t->condition, &t->mutex, millis, nanos); t->state &= ~TM_THREAD_STATE_PARKED; //the status should be restored for j.u.c.LockSupport @@ -63,7 +63,7 @@ IDATA VMCALL hythread_park(I_64 millis, IDATA nanos) { t->state |= TM_THREAD_STATE_INTERRUPTED; } - hymutex_unlock(t->mutex); + hymutex_unlock(&t->mutex); return status; } @@ -83,14 +83,14 @@ void VMCALL hythread_unpark(hythread_t thread) { return; } - hymutex_lock(thread->mutex); + hymutex_lock(&thread->mutex); if (thread->state & TM_THREAD_STATE_PARKED) { thread->state &= ~TM_THREAD_STATE_PARKED; - hycond_notify_all(thread->condition); + hycond_notify_all(&thread->condition); } else { thread->state |= TM_THREAD_STATE_UNPARKED; } - hymutex_unlock(thread->mutex); + hymutex_unlock(&thread->mutex); } diff --git a/vm/thread/src/thread_native_semaphore.c b/vm/thread/src/thread_native_semaphore.c index 5f68521..dc74bcb 100644 --- a/vm/thread/src/thread_native_semaphore.c +++ b/vm/thread/src/thread_native_semaphore.c @@ -36,40 +36,41 @@ * @param[in] max_count maximum semaphore count */ IDATA VMCALL hysem_create(hysem_t *sem, UDATA initial_count, UDATA max_count) { + int r; hysem_t l; - apr_pool_t *pool = get_local_pool(); - apr_status_t apr_status; l = malloc(sizeof(HySemaphore)); if (l == NULL) { - return TM_ERROR_OUT_OF_MEMORY; + return TM_ERROR_OUT_OF_MEMORY; } - apr_status = apr_thread_mutex_create((apr_thread_mutex_t**)&(l->mutex), TM_MUTEX_DEFAULT, pool); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); + r = hymutex_create(&l->mutex, TM_MUTEX_DEFAULT); + if (r) goto cleanup; + r = hycond_create(&l->condition); + if (r) goto cleanup; - apr_status = apr_thread_cond_create((apr_thread_cond_t**)&(l->condition), pool); - - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); l->count = initial_count; l->max_count = max_count; - l->pool = pool; *sem = l; return TM_ERROR_NONE; + +cleanup: + free(l); + return r; } IDATA sem_wait_impl(hysem_t sem, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; - status = hymutex_lock(sem->mutex); + status = hymutex_lock(&sem->mutex); if (status != TM_ERROR_NONE) return status; //printf("wait %x %d\n", sem, sem->count); //fflush(NULL); while (sem->count <= 0) { - status = condvar_wait_impl(sem->condition, sem->mutex, ms, nano, interruptable); + status = condvar_wait_impl(&sem->condition, &sem->mutex, ms, nano, interruptable); //check interruption and timeout if (status != TM_ERROR_NONE) { - hymutex_unlock(sem->mutex); + hymutex_unlock(&sem->mutex); return status; } @@ -78,14 +79,14 @@ IDATA sem_wait_impl(hysem_t sem, I_64 ms, IDATA nano, IDATA interruptable) { //should we check here if timeout is not supposed to happen if (sem->count == 0 /*&& (ms || nano)*/) { if (ms || nano) { - hymutex_unlock(sem->mutex); + hymutex_unlock(&sem->mutex); return TM_ERROR_TIMEOUT; } else { assert(0); } } sem->count--; - status = hymutex_unlock(sem->mutex); + status = hymutex_unlock(&sem->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -149,20 +150,20 @@ IDATA VMCALL hysem_post(hysem_t sem) { IDATA status; //printf("post %x %d\n", sem, sem->count); //fflush(NULL); - status = hymutex_lock(sem->mutex); + status = hymutex_lock(&sem->mutex); if (status != TM_ERROR_NONE) return status; if (sem->count >= sem->max_count) { - hymutex_unlock(sem->mutex); + hymutex_unlock(&sem->mutex); //printf("illegal state %d : %d \n", sem->count, sem->max_count); //fflush(NULL); return TM_ERROR_ILLEGAL_STATE; } sem->count++; if (sem->count > 0) { - hycond_notify(sem->condition); + hycond_notify(&sem->condition); } - status = hymutex_unlock(sem->mutex); + status = hymutex_unlock(&sem->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; } @@ -176,22 +177,22 @@ IDATA VMCALL hysem_post(hysem_t sem) { IDATA VMCALL hysem_set(hysem_t sem, IDATA count) { IDATA status; - status = hymutex_lock(sem->mutex); + status = hymutex_lock(&sem->mutex); if (status != TM_ERROR_NONE) return status; if (count > sem->max_count) { - hymutex_unlock(sem->mutex); + hymutex_unlock(&sem->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_ILLEGAL_STATE; } sem->count = count; if (count > 0) { - status = hycond_notify_all(sem->condition); + status = hycond_notify_all(&sem->condition); if (status != TM_ERROR_NONE) { - hymutex_unlock(sem->mutex); + hymutex_unlock(&sem->mutex); return status; } } - status = hymutex_unlock(sem->mutex); + status = hymutex_unlock(&sem->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -206,10 +207,10 @@ IDATA VMCALL hysem_set(hysem_t sem, IDATA count) { IDATA VMCALL hysem_getvalue(IDATA *count, hysem_t sem) { IDATA status; - status = hymutex_lock(sem->mutex); + status = hymutex_lock(&sem->mutex); if (status != TM_ERROR_NONE) return status; *count = sem->count; - status = hymutex_unlock(sem->mutex); + status = hymutex_unlock(&sem->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; @@ -228,15 +229,8 @@ IDATA VMCALL hysem_getvalue(IDATA *count, hysem_t sem) { * @see hysem_init, hysem_wait, hysem_post */ IDATA VMCALL hysem_destroy(hysem_t sem) { - apr_status_t apr_status; - apr_pool_t *pool = sem->pool; - if (pool != get_local_pool()) { - return local_pool_cleanup_register(hysem_destroy, sem); - } - apr_status=apr_thread_mutex_destroy((apr_thread_mutex_t*)sem->mutex); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); - apr_status=apr_thread_cond_destroy((apr_thread_cond_t*)sem->condition); - // apr_pool_free(pool, sem); + hymutex_destroy(&sem->mutex); + hycond_destroy(&sem->condition); free(sem); - return CONVERT_ERROR(apr_status);; + return TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_thin_monitor.c b/vm/thread/src/thread_native_thin_monitor.c index 3a02109..2ce12aa 100644 --- a/vm/thread/src/thread_native_thin_monitor.c +++ b/vm/thread/src/thread_native_thin_monitor.c @@ -139,7 +139,7 @@ IDATA unreserve_lock(hythread_thin_monitor_t *lockword_ptr) { IDATA status; // trylock used to prevent cyclic suspend deadlock // the java_monitor_enter calls safe_point between attempts. - /*status = hymutex_trylock(TM_LOCK); + /*status = hymutex_trylock(&TM_LOCK); if (status !=TM_ERROR_NONE) { return status; }*/ @@ -151,7 +151,7 @@ IDATA unreserve_lock(hythread_thin_monitor_t *lockword_ptr) { owner = hythread_get_thread(lock_id); TRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) { - // hymutex_unlock(TM_LOCK); + // hymutex_unlock(&TM_LOCK); return TM_ERROR_NONE; } // suspend owner @@ -190,7 +190,7 @@ IDATA unreserve_lock(hythread_thin_monitor_t *lockword_ptr) { hythread_resume(owner); } - /* status = hymutex_unlock(TM_LOCK);*/ + /* status = hymutex_unlock(&TM_LOCK);*/ // Gregory - This lock, right after it was unreserved, may be // inflated by another thread and therefore instead of recursion @@ -561,12 +561,12 @@ hythread_monitor_t VMCALL inflate_lock(hythread_thin_monitor_t *lockword_ptr) { IDATA fat_monitor_id; I_32 lockword; int i; - status=hymutex_lock(FAT_MONITOR_TABLE_LOCK); + status=hymutex_lock(&FAT_MONITOR_TABLE_LOCK); assert(status == TM_ERROR_NONE); TRACE(("inflate tmj%d\n", ++inflate_count)); lockword = *lockword_ptr; if (IS_FAT_LOCK (lockword)) { - status = hymutex_unlock(FAT_MONITOR_TABLE_LOCK); + status = hymutex_unlock(&FAT_MONITOR_TABLE_LOCK); assert(status == TM_ERROR_NONE); return locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); } @@ -587,7 +587,7 @@ hythread_monitor_t VMCALL inflate_lock(hythread_thin_monitor_t *lockword_ptr) { assert(status == TM_ERROR_NONE); status = hythread_monitor_enter(fat_monitor); if (status != TM_ERROR_NONE) { - hymutex_unlock(FAT_MONITOR_TABLE_LOCK); + hymutex_unlock(&FAT_MONITOR_TABLE_LOCK); return NULL; } @@ -603,7 +603,7 @@ hythread_monitor_t VMCALL inflate_lock(hythread_thin_monitor_t *lockword_ptr) { TRACE(("FAT ID : 0x%x", *lockword_ptr)); fat_monitor->inflate_count++; fat_monitor->inflate_owner=tm_self_tls; - status=hymutex_unlock(FAT_MONITOR_TABLE_LOCK); + status=hymutex_unlock(&FAT_MONITOR_TABLE_LOCK); assert(status == TM_ERROR_NONE); #ifdef LOCK_RESERVATION assert(!IS_RESERVED(*lockword_ptr)); diff --git a/vm/thread/src/thread_private.h b/vm/thread/src/thread_private.h index 0ad8814..2af5e58 100644 --- a/vm/thread/src/thread_private.h +++ b/vm/thread/src/thread_private.h @@ -33,6 +33,10 @@ #include #include "apr_thread_ext.h" +#ifdef __linux__ +#include +#endif // __linux__ + // temporary remove logging #define TRACE(a) //printf a; printf("\n") //#include "clog.h" @@ -263,7 +267,7 @@ typedef struct HyThread { /** * Current conditional variable thread is waiting on (used for interrupting) */ - hycond_t current_condition; + hycond_t *current_condition; // State @@ -486,12 +490,6 @@ typedef struct HyThreadMonitor { int notify_flag; /** - * monitor sub pool - * will be destroyed by monitor_destroy() - */ - apr_pool_t *pool; - - /** * Owner thread ID. */ IDATA thread_id; @@ -554,12 +552,6 @@ typedef struct HySemaphore { * Mutex associated with the semaphore data. */ hymutex_t mutex; - - /** - * semaphore sub pool - * will be destroyed by sem_destroy() - */ - apr_pool_t *pool; } HySemaphore; // Global variables @@ -621,7 +613,7 @@ IDATA acquire_start_lock(void); IDATA release_start_lock(void); IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable); -IDATA condvar_wait_impl(hycond_t cond, hymutex_t mutex, I_64 ms, IDATA nano, IDATA interruptable); +IDATA condvar_wait_impl(hycond_t *cond, hymutex_t *mutex, I_64 ms, IDATA nano, IDATA interruptable); IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA interruptable); IDATA thin_monitor_wait_impl(hythread_thin_monitor_t *lockword_ptr, I_64 ms, IDATA nano, IDATA interruptable); IDATA sem_wait_impl(hysem_t sem, I_64 ms, IDATA nano, IDATA interruptable); diff --git a/vm/thread/src/thread_ti_monitors.c b/vm/thread/src/thread_ti_monitors.c index 9ab79c3..4f47db4 100644 --- a/vm/thread/src/thread_ti_monitors.c +++ b/vm/thread/src/thread_ti_monitors.c @@ -63,11 +63,11 @@ IDATA VMCALL jthread_raw_monitor_create(jrawMonitorID* mon_ptr) { if (status != TM_ERROR_NONE) return status; } - status =hymutex_lock(jvmti_monitor_table_lock); + status =hymutex_lock(&jvmti_monitor_table_lock); if (status != TM_ERROR_NONE) return status; *mon_ptr = array_add(jvmti_monitor_table, monitor); - status =hymutex_unlock(jvmti_monitor_table_lock); + status =hymutex_unlock(&jvmti_monitor_table_lock); if (status != TM_ERROR_NONE) return status; if (!(*mon_ptr)) return TM_ERROR_OUT_OF_MEMORY; @@ -93,10 +93,10 @@ IDATA VMCALL jthread_raw_monitor_destroy(jrawMonitorID mon_ptr) { return status; } - status =hymutex_lock(jvmti_monitor_table_lock); + status =hymutex_lock(&jvmti_monitor_table_lock); if (status != TM_ERROR_NONE) return status; array_delete(jvmti_monitor_table, (UDATA)mon_ptr); - status =hymutex_unlock(jvmti_monitor_table_lock); + status =hymutex_unlock(&jvmti_monitor_table_lock); return status; } diff --git a/vm/vmcore/include/jvmti_internal.h b/vm/vmcore/include/jvmti_internal.h index 780ba39..53a7535 100644 --- a/vm/vmcore/include/jvmti_internal.h +++ b/vm/vmcore/include/jvmti_internal.h @@ -155,6 +155,7 @@ class DebugUtilsTI { // TI event thread data hythread_t event_thread; hycond_t event_cond; + int event_cond_initialized; DebugUtilsTI(); diff --git a/vm/vmcore/src/jvmti/jvmti.cpp b/vm/vmcore/src/jvmti/jvmti.cpp index 106c115..ca96a09 100644 --- a/vm/vmcore/src/jvmti/jvmti.cpp +++ b/vm/vmcore/src/jvmti/jvmti.cpp @@ -261,7 +261,7 @@ jint JNICALL create_jvmti_environment(JavaVM *vm_ext, void **env, jint version) error_code = newenv->allocate_extension_event_callbacks_table(); if (error_code != JVMTI_ERROR_NONE) { - hymutex_destroy(newenv->lock); + hymutex_destroy(&newenv->lock); _deallocate((unsigned char *)newenv); *env = NULL; return error_code; @@ -309,7 +309,7 @@ void DebugUtilsTI::setExecutionMode(Global_Env *p_env) DebugUtilsTI::DebugUtilsTI() : event_thread(NULL), - event_cond(NULL), + event_cond_initialized(0), agent_counter(1), access_watch_list(NULL), modification_watch_list(NULL), diff --git a/vm/vmcore/src/jvmti/jvmti_capability.cpp b/vm/vmcore/src/jvmti/jvmti_capability.cpp index 699b097..5c20d51 100644 --- a/vm/vmcore/src/jvmti/jvmti_capability.cpp +++ b/vm/vmcore/src/jvmti/jvmti_capability.cpp @@ -402,14 +402,13 @@ jvmtiRelinquishCapabilities(jvmtiEnv* env, if (removed_caps.can_tag_objects) { // clear tags on relinquishing can_tag_objects capability ti_env = reinterpret_cast(env); - assert(ti_env->lock); - hymutex_lock(ti_env->lock); + hymutex_lock(&ti_env->lock); if (ti_env->tags) { ti_env->tags->clear(); delete ti_env->tags; ti_env->tags = NULL; } - hymutex_unlock(ti_env->lock); + hymutex_unlock(&ti_env->lock); ti->reset_global_capability(DebugUtilsTI::TI_GC_ENABLE_TAG_OBJECTS); } diff --git a/vm/vmcore/src/jvmti/jvmti_event.cpp b/vm/vmcore/src/jvmti/jvmti_event.cpp index 0d91b33..0ba06c6 100644 --- a/vm/vmcore/src/jvmti/jvmti_event.cpp +++ b/vm/vmcore/src/jvmti/jvmti_event.cpp @@ -2237,16 +2237,14 @@ jvmti_event_thread_function(void *args) hymutex_t event_mutex; UNREF IDATA stat = hymutex_create(&event_mutex, TM_MUTEX_NESTED); assert(stat == TM_ERROR_NONE); - hycond_t event_cond; - stat = hycond_create(&event_cond); + stat = hycond_create(&ti->event_cond); assert(stat == TM_ERROR_NONE); - ti->event_cond = event_cond; // event thread loop while(true) { - hymutex_lock(event_mutex); - hycond_wait(event_cond, event_mutex); - hymutex_unlock(event_mutex); + hymutex_lock(&event_mutex); + hycond_wait(&ti->event_cond, &event_mutex); + hymutex_unlock(&event_mutex); if(!ti->event_thread) { // event thread is NULL, @@ -2259,9 +2257,9 @@ jvmti_event_thread_function(void *args) } // release wait loop environment - stat = hymutex_destroy(event_mutex); + stat = hymutex_destroy(&event_mutex); assert(stat == TM_ERROR_NONE); - stat = hycond_destroy(event_cond); + stat = hycond_destroy(&ti->event_cond); assert(stat == TM_ERROR_NONE); return 0; @@ -2302,14 +2300,8 @@ jvmti_destroy_event_thread() return; } - // getting condition - hycond_t event_cond = ti->event_cond; - ti->event_thread = NULL; - ti->event_cond = NULL; - // notify event thread - assert(event_cond); - UNREF IDATA stat = hycond_notify(event_cond); + UNREF IDATA stat = hycond_notify(&ti->event_cond); assert(stat == TM_ERROR_NONE); return; } @@ -2318,12 +2310,11 @@ void jvmti_notify_data_dump_request() { DebugUtilsTI *ti = VM_Global_State::loader_env->TI; - if( !ti->event_thread || !ti->event_cond ) { + if( !ti->event_thread) { // nothing to do return; } - assert(ti->event_cond); - UNREF IDATA stat = hycond_notify(ti->event_cond); + UNREF IDATA stat = hycond_notify(&ti->event_cond); assert(stat == TM_ERROR_NONE); return; } diff --git a/vm/vmcore/src/jvmti/jvmti_heap.cpp b/vm/vmcore/src/jvmti/jvmti_heap.cpp index 453bfc4..87631d1 100644 --- a/vm/vmcore/src/jvmti/jvmti_heap.cpp +++ b/vm/vmcore/src/jvmti/jvmti_heap.cpp @@ -124,12 +124,11 @@ jvmtiSetTag(jvmtiEnv* env, return JVMTI_ERROR_INVALID_OBJECT; if (ti_env->tags == NULL) { - assert(ti_env->lock); - hymutex_lock(ti_env->lock); + hymutex_lock(&ti_env->lock); if (ti_env->tags == NULL) { ti_env->tags = new TITags; } - hymutex_unlock(ti_env->lock); + hymutex_unlock(&ti_env->lock); } if (ti_env->tags == NULL) { diff --git a/vm/vmcore/src/thread/lock_manager.cpp b/vm/vmcore/src/thread/lock_manager.cpp index a7a939d..58b87dc 100644 --- a/vm/vmcore/src/thread/lock_manager.cpp +++ b/vm/vmcore/src/thread/lock_manager.cpp @@ -36,25 +36,25 @@ Lock_Manager::Lock_Manager() Lock_Manager::~Lock_Manager() { - UNREF IDATA stat = hymutex_destroy (lock); + UNREF IDATA stat = hymutex_destroy (&lock); assert(stat==TM_ERROR_NONE); } void Lock_Manager::_lock() { - UNREF IDATA stat = hymutex_lock(lock); + UNREF IDATA stat = hymutex_lock(&lock); assert(stat==TM_ERROR_NONE); } bool Lock_Manager::_tryLock() { - IDATA stat = hymutex_trylock(lock); + IDATA stat = hymutex_trylock(&lock); return stat==TM_ERROR_NONE; } void Lock_Manager::_unlock() { - UNREF IDATA stat = hymutex_unlock(lock); + UNREF IDATA stat = hymutex_unlock(&lock); assert(stat==TM_ERROR_NONE); } @@ -97,6 +97,6 @@ void Lock_Manager::_unlock_enum_or_null() bool Lock_Manager::_lock_enum_or_null(bool UNREF return_null_on_fail) { - IDATA stat = hymutex_lock(lock); + IDATA stat = hymutex_lock(&lock); return stat==TM_ERROR_NONE; } -- 1.5.0.33.g1b20