From fc5175b539cf262f995137ae2d4e669b272bbefd Mon Sep 17 00:00:00 2001 From: Salikh Zakirov Date: Fri, 2 Mar 2007 16:43:53 +0300 Subject: [PATCH] allocate monitors, latches and semaphores from C heap Use general C heap for allocation of monitors, latches and semaphores. This is required for the monitors, as they may have a complex lifecycle, associated with the lifecycle of the java object, and thus cannot be allocated from the thread pools, which have more straightforward lifecycle. Fix latches and semaphores for consistency. This is not needed now, as they are only used for thread-specific synchronization, but should anyone else ever use them for other purpose, the pool allocation immediately would become incorrect. Accordingly, use free() to release memory in destroy functions. --- vm/thread/src/thread_native_fat_monitor.c | 11 ++++++++--- vm/thread/src/thread_native_latch.c | 3 ++- vm/thread/src/thread_native_semaphore.c | 3 ++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/vm/thread/src/thread_native_fat_monitor.c b/vm/thread/src/thread_native_fat_monitor.c index a5cba3e..86967f1 100644 --- a/vm/thread/src/thread_native_fat_monitor.c +++ b/vm/thread/src/thread_native_fat_monitor.c @@ -47,15 +47,15 @@ IDATA VMCALL hythread_monitor_init_with_name(hythread_monitor_t *mon_ptr, UDATA apr_pool_t *pool = get_local_pool(); apr_status_t apr_status; - mon = apr_pcalloc(pool, sizeof(HyThreadMonitor)); + mon = calloc(1, sizeof(HyThreadMonitor)); if (mon == NULL) { return TM_ERROR_OUT_OF_MEMORY; } apr_status = apr_thread_mutex_create((apr_thread_mutex_t**)&(mon->mutex), TM_MUTEX_NESTED, pool); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); + if (apr_status != APR_SUCCESS) goto cleanup; apr_status = apr_thread_cond_create((apr_thread_cond_t**)&(mon->condition), pool); - if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); + if (apr_status != APR_SUCCESS) goto cleanup; mon->pool = pool; mon->flags = flags; @@ -65,6 +65,10 @@ IDATA VMCALL hythread_monitor_init_with_name(hythread_monitor_t *mon_ptr, UDATA *mon_ptr = mon; return TM_ERROR_NONE; + +cleanup: + free(mon); + return CONVERT_ERROR(apr_status); } /** @@ -381,6 +385,7 @@ IDATA VMCALL hythread_monitor_destroy(hythread_monitor_t monitor) { apr_status=apr_thread_cond_destroy((apr_thread_cond_t*)monitor->condition); if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); // apr_pool_free(pool, monitor); + free(monitor); return TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_latch.c b/vm/thread/src/thread_native_latch.c index 5468c88..8cb6020 100644 --- a/vm/thread/src/thread_native_latch.c +++ b/vm/thread/src/thread_native_latch.c @@ -45,7 +45,7 @@ IDATA VMCALL hylatch_create(hylatch_t *latch, IDATA count) { apr_pool_t *pool = get_local_pool(); apr_status_t apr_status; - l = apr_palloc(pool, sizeof(HyLatch)); + l = malloc(sizeof(HyLatch)); if (l == NULL) { return TM_ERROR_OUT_OF_MEMORY; } @@ -210,6 +210,7 @@ IDATA VMCALL hylatch_destroy(hylatch_t latch) { apr_thread_cond_destroy((apr_thread_cond_t*)latch->condition); // apr_pool_free(pool, latch); + free(latch); return TM_ERROR_NONE; } diff --git a/vm/thread/src/thread_native_semaphore.c b/vm/thread/src/thread_native_semaphore.c index 14533c4..5f68521 100644 --- a/vm/thread/src/thread_native_semaphore.c +++ b/vm/thread/src/thread_native_semaphore.c @@ -40,7 +40,7 @@ IDATA VMCALL hysem_create(hysem_t *sem, UDATA initial_count, UDATA max_count) { apr_pool_t *pool = get_local_pool(); apr_status_t apr_status; - l = apr_palloc(pool, sizeof(HySemaphore)); + l = malloc(sizeof(HySemaphore)); if (l == NULL) { return TM_ERROR_OUT_OF_MEMORY; } @@ -237,5 +237,6 @@ IDATA VMCALL hysem_destroy(hysem_t sem) { if (apr_status != APR_SUCCESS) return CONVERT_ERROR(apr_status); apr_status=apr_thread_cond_destroy((apr_thread_cond_t*)sem->condition); // apr_pool_free(pool, sem); + free(sem); return CONVERT_ERROR(apr_status);; } -- 1.5.0.33.g1b20