Index: build/make/targets/cunit.test.xml
===================================================================
--- build/make/targets/cunit.test.xml (revision 584243)
+++ build/make/targets/cunit.test.xml (working copy)
@@ -27,7 +27,7 @@
-
+
@@ -222,7 +222,6 @@
Index: vm/tests/unit/thread/test_native_exp_sync.c
===================================================================
--- vm/tests/unit/thread/test_native_exp_sync.c (revision 0)
+++ vm/tests/unit/thread/test_native_exp_sync.c (revision 0)
@@ -0,0 +1,593 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include
+#include
+#include
+#include "testframe.h"
+#include "thread_manager.h"
+
+/////////////
+// Tested functions:
+/////////////
+#define hythread_suspend_disable test_hythread_suspend_disable
+#define hythread_suspend_enable test_hythread_suspend_enable
+#define hythread_gc_suspend(thr) test_hythread_suspend(thr, GCSUSPEND)
+#define hythread_gc_resume(thr) test_hythread_resume(thr, GCSUSPEND)
+#define hythread_java_suspend(thr) test_hythread_suspend(thr, J_L_T_SUSPEND)
+#define hythread_java_resume(thr) test_hythread_resume(thr, J_L_T_SUSPEND)
+#define hythread_safe_point test_hythread_backbranch_support
+
+#define GC_REQUEST_THREAD_COUNT 1
+
+#define JAVA_REQUEST_THREAD_COUNT 3
+
+#define CHECK_NUMBER 360
+
+#define CHECK_TIME_WAIT 1000
+
+#define SUSPEND_SPIN_COUNT 100
+
+//#define RAND()(rand() % 100000)
+#define RAND() 1
+
+#define CHECK_RAND(percentage) ((rand() % 100) < (((percentage)+1)))
+#define test_self() (hythread_ext_t)hythread_self()
+
+#define trace(x) printf(x); fflush(stdout)
+
+typedef enum SuspendType {
+ J_L_T_SUSPEND,
+ GCSUSPEND
+} SuspendType;
+
+typedef struct HyThreadExt {
+ HyThread hy_thr;
+ uint32 now_blocked_at_;
+ uint32 j_l_t_suspend_request;
+ uint32 gc_suspend_request;
+} HyThreadExt;
+
+typedef struct HyThreadExt * hythread_ext_t;
+
+static void test_dump_thread_data(hythread_ext_t thread);
+
+static int test_thread_proc(void *args);
+static int test_gc_request_thread_proc(void *args);
+static int test_java_request_thread_proc(void *args);
+
+static uint32 test_waste_time(uint32 count);
+static uint32 test_read_location(uint32 *points_at);
+
+static IDATA test_hythread_java_suspend(hythread_t thread);
+static void test_hythread_java_resume(hythread_t thread);
+
+static void test_hythread_suspend_disable();
+static void test_hythread_suspend_enable();
+static IDATA test_hythread_suspend(hythread_ext_t thread, SuspendType kind);
+static void test_hythread_resume(hythread_ext_t thread, SuspendType kind);
+static void test_hythread_backbranch_support();
+
+static hylatch_t wait_threads;
+static hylatch_t start;
+static char stop = 0;
+static char failed = 0;
+static uint64 cycle_count = 0;
+static uint32 code_path_coverage[32];
+
+apr_thread_mutex_t * suspend_resume_mutex = NULL;
+apr_pool_t * SUSPEND_RESUME_POOL = NULL;
+
+int test_exp_sync(void)
+{
+ int index;
+ uint64 tmp_cycle;
+ char buf[1024];
+ IDATA status;
+ HyThreadExt threads[1 + GC_REQUEST_THREAD_COUNT + JAVA_REQUEST_THREAD_COUNT] = {0};
+ int number = 1 + GC_REQUEST_THREAD_COUNT + JAVA_REQUEST_THREAD_COUNT;
+
+ srand(time(NULL));
+
+ status = hylatch_create(&wait_threads, number);
+ tf_assert_same(status, TM_ERROR_NONE);
+ status = hylatch_create(&start, 1);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ status = apr_pool_create(&SUSPEND_RESUME_POOL, 0);
+ assert(SUSPEND_RESUME_POOL != 0);
+ status = apr_thread_mutex_create(&suspend_resume_mutex, APR_THREAD_MUTEX_DEFAULT, SUSPEND_RESUME_POOL);
+ assert(status == APR_SUCCESS);
+
+ //__asm { int 3 } ;
+ // start tested thread
+ status = hythread_create_ex((hythread_t)&threads[0], 0, 0, 0, NULL,
+ (hythread_entrypoint_t)test_thread_proc, NULL);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ // start gc_request threads
+ for (index = 1; index < GC_REQUEST_THREAD_COUNT + 1; index++) {
+ status = hythread_create_ex((hythread_t)&threads[index], 0, 0, 0, NULL,
+ (hythread_entrypoint_t)test_gc_request_thread_proc, &threads[0]);
+ tf_assert_same(status, TM_ERROR_NONE);
+ }
+
+ // start java_request threads
+ for (; index < number; index++) {
+ status = hythread_create_ex((hythread_t)&threads[index], 0, 0, 0, NULL,
+ (hythread_entrypoint_t)test_java_request_thread_proc, &threads[0]);
+ tf_assert_same(status, TM_ERROR_NONE);
+ }
+
+ // Wait util all threads have started.
+ status = hylatch_wait(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+ status = hylatch_set(wait_threads, number);
+ tf_assert_same(status, TM_ERROR_NONE);
+ hythread_sleep(100);
+
+ // Start testing
+ trace("TEST start!\n");
+ status = hylatch_count_down(start);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ // checkpoints
+ tmp_cycle = cycle_count;
+ for (index = 0; index < CHECK_NUMBER; index++) {
+ // wait a bit
+ hythread_sleep(CHECK_TIME_WAIT);
+
+ if (failed) {
+ sprintf(buf, "\nTEST FAILED!\n index = %d", index);
+ trace(buf);
+ test_dump_thread_data(&threads[0]);
+ return TEST_FAILED;
+ } else {
+ trace(".");
+ }
+ if ((index % 10) == 0 ) {
+ trace("\n");
+ }
+ }
+ trace("\n\n");
+
+ // Stop testing, wait util all threads have finished.
+ stop = 1;
+ status = hylatch_wait(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ sprintf(buf, "\nTEST PASSED!\ncheck number = %d\n"
+ "cycle_count = %ld\n", CHECK_NUMBER, cycle_count);
+ trace(buf);
+ return TEST_PASSED;
+} // test_exp_sync
+
+
+volatile apr_uint32_t test_heap_ptr_1;
+volatile apr_uint32_t test_heap_ptr_2;
+
+static void test_target_heap_access()
+{
+ char buf[1024];
+
+ if (test_heap_ptr_1 == 0) {
+ if (test_heap_ptr_2 != 0) {
+ sprintf(buf, "\n********** FAILED! ************\ntarget: test_heap_ptr_1 = %d, "
+ "test_heap_ptr_2 = %d\n********** FAILED! ************\n",
+ test_heap_ptr_1, test_heap_ptr_2);
+ trace(buf);
+ assert(0);
+ failed = 1;
+ }
+ apr_atomic_inc32(&test_heap_ptr_1); //test_heap_ptr_1++;
+ apr_atomic_inc32(&test_heap_ptr_2); //test_heap_ptr_2++;
+ } else {
+ if (test_heap_ptr_1 != 1 || test_heap_ptr_2 != 1) {
+ sprintf(buf, "\n********** FAILED! ************\ntarget: test_heap_ptr_1 = %d, "
+ "test_heap_ptr_2 = %d\n********** FAILED! ************\n",
+ test_heap_ptr_1, test_heap_ptr_2);
+ trace(buf);
+ assert(0);
+ failed = 1;
+ }
+ apr_atomic_dec32(&test_heap_ptr_1); //test_heap_ptr_1--;
+ apr_atomic_dec32(&test_heap_ptr_2); //test_heap_ptr_2--;
+ }
+} // test_target_heap_access
+
+static void test_requestor_heap_access()
+{
+ char buf[1024];
+
+ if (test_heap_ptr_2 == 0) {
+ if (test_heap_ptr_1 != 0) {
+ sprintf(buf, "\n********** FAILED! ************\nrequestor: test_heap_ptr_1 = %d, "
+ "test_heap_ptr_2 = %d\n********** FAILED! ************\n",
+ test_heap_ptr_1, test_heap_ptr_2);
+ trace(buf);
+ assert(0);
+ failed = 1;
+ }
+ apr_atomic_inc32(&test_heap_ptr_2); //test_heap_ptr_2++;
+ apr_atomic_inc32(&test_heap_ptr_1); //test_heap_ptr_1++;
+ } else {
+ if (test_heap_ptr_2 != 1 || test_heap_ptr_1 != 1) {
+ sprintf(buf, "\n********** FAILED! ************\nrequestor: test_heap_ptr_1 = %d, "
+ "test_heap_ptr_2 = %d\n********** FAILED! ************\n",
+ test_heap_ptr_1, test_heap_ptr_2);
+ trace(buf);
+ assert(0);
+ failed = 1;
+ }
+ apr_atomic_dec32(&test_heap_ptr_2); //test_heap_ptr_2--;
+ apr_atomic_dec32(&test_heap_ptr_1); //test_heap_ptr_1--;
+ }
+ //////////apr_atomic_cas32(&force_store_buffer_to_empty, 0, 1);
+} // test_requestor_heap_access
+
+static void test_dump_thread_data(hythread_ext_t thread)
+{
+ int xx = 0;
+ char buf[1024];
+ sprintf(buf, "\nthread_ext: %p\n"
+ "\tnow_blocked_at_: %d, should be 0\n"
+ "\tj_l_t_suspend_request: %d, should be 0\n"
+ "\tgc_suspend_request: %d, should be 0\n"
+ "hythread: %p\n"
+ "\thythread.request: %d, should be 0\n"
+ "\thythread.suspend_count: %d, should be 0\n"
+ "\thythread.disable_count: %d, should be 0\n\n",
+ thread,
+ thread->now_blocked_at_,
+ thread->j_l_t_suspend_request,
+ thread->gc_suspend_request,
+ thread,
+ thread->hy_thr.request,
+ thread->hy_thr.suspend_count,
+ thread->hy_thr.disable_count);
+ trace(buf);
+
+ trace("\n");
+ for (xx = 0; xx < 8; xx++) {
+ sprintf(buf, "code_path_coverage[%d] = %d\n", xx, code_path_coverage[xx]);
+ trace(buf);
+ }
+} // test_dump_thread_data
+
+static int test_thread_proc(void *args) {
+ uint32 xx = 0;
+ IDATA status;
+ hythread_t self = hythread_self();
+
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ trace("Tested thread is started\n");
+
+ status = hylatch_wait(start);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ while (!stop && !failed) {
+ hythread_suspend_disable();
+ test_target_heap_access();
+ xx += test_waste_time(RAND());
+
+ cycle_count++;
+
+ // BB polling is called about 30% of the time
+ if (CHECK_RAND(30) && self->request) {
+ hythread_safe_point();
+ test_target_heap_access();
+ xx += test_waste_time(RAND());
+ }
+
+ hythread_suspend_enable();
+ xx += test_waste_time(RAND());
+ }
+
+ trace("Tested thread is finished\n");
+
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ for (xx = 0; xx < 8; xx++) {
+ printf("code_path_coverage[%d] = %d\n", xx, code_path_coverage[xx]);
+ }
+
+ return 0;
+} // test_thread_proc
+
+static int test_gc_request_thread_proc(void *args)
+{
+ uint32 xx = 0;
+ IDATA status;
+ hythread_ext_t test_thread = (hythread_ext_t)args;
+
+ // Notify main thread about start
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+ trace("GC request thread is started\n");
+
+ // Wait all thread start
+ status = hylatch_wait(start);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ while (!stop && !failed) {
+ status = hythread_gc_suspend(test_thread);
+
+ test_requestor_heap_access();
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ xx += test_waste_time(RAND());
+
+ test_requestor_heap_access();
+
+ hythread_gc_resume(test_thread);
+
+ xx += test_waste_time(RAND());
+ }
+
+ trace("GC request thread is finished\n");
+
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ return 0;
+} // test_gc_request_thread_proc
+
+static int test_java_request_thread_proc(void *args)
+{
+ uint32 xx = 0;
+ IDATA status;
+ hythread_ext_t test_thread = (hythread_ext_t)args;
+
+ // Notify main thread about start
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+ trace("JAVA request thread is started\n");
+
+ // Wait all thread start
+ status = hylatch_wait(start);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ while (!stop && !failed) {
+
+ status = hythread_java_suspend(test_thread);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ xx += test_waste_time(RAND());
+
+ status = hythread_java_suspend(test_thread);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ xx += test_waste_time(RAND());
+
+ hythread_java_resume(test_thread);
+
+ xx += test_waste_time(RAND());
+
+ }
+
+ trace("JAVA request thread is finished\n");
+
+ status = hylatch_count_down(wait_threads);
+ tf_assert_same(status, TM_ERROR_NONE);
+
+ return 0;
+} // test_java_request_thread_proc
+
+static void request_loop()
+{
+ uint8 status;
+ uint32 ret_val = 0;
+ hythread_ext_t self = test_self();
+ while (test_read_location(&self->hy_thr.request) ) {
+
+ // TODO disable_count is a 16-bit field, it looks like padding leaves the next field 32-bit aligned
+ // there is no cas16. Either create a cas16 or make disable_count a 32-bit field
+ ret_val = apr_atomic_cas32((uint32 *)&self->hy_thr.disable_count, 0, 1); // set it *back* to zero
+ assert(ret_val == 1);
+ ret_val = apr_atomic_cas32(&self->now_blocked_at_, TRUE, FALSE); // set it to true
+ assert(ret_val = FALSE);
+ code_path_coverage[0]++;
+
+ while (self->j_l_t_suspend_request || self->gc_suspend_request) {
+ // put a timeout to ensure a missed semaphore set does not
+ // hang the system (graceful degradation)
+ status = hysem_wait_interruptable(self->hy_thr.resume_event, 300 /*msec*/, 0 /*nano*/);
+ //if (status == timeout && self->gc_suspend_request) log a warning/diagnostic somewhere
+ }
+
+ status = apr_atomic_cas32(&self->now_blocked_at_, FALSE, TRUE);
+ assert(status == TRUE);
+ ret_val = apr_atomic_cas32((uint32 *)&self->hy_thr.disable_count, 1, 0); // set it *back* to one
+ assert(ret_val == 0);
+ }
+}
+static void test_hythread_suspend_disable()
+{
+ uint32 ret_val = 0;
+ hythread_ext_t self = test_self();
+
+ self->hy_thr.disable_count++;
+
+ if (self->hy_thr.disable_count == 1) { // we are making a suspend enabled to suspend disabled transition
+ request_loop();
+ }
+
+} // test_hythread_suspend_disable
+
+static void test_hythread_suspend_enable()
+{
+ hythread_t self = hythread_self();
+ assert(self->disable_count > 0);
+ self->disable_count--;
+} // test_hythread_suspend_enable
+
+// make it static to prevent compiler to optimize
+// reading/writing of this variable
+static uint32 waste_time_int;
+
+static uint32 test_waste_time(uint32 count)
+{
+ for (; count; count--) {
+ waste_time_int = waste_time_int * rand();
+ waste_time_int += rand();
+ }
+
+ // yield happend about 10% of the time
+ if (CHECK_RAND(10)) {
+ hythread_yield();
+ }
+ return waste_time_int;
+} // test_waste_time
+
+static uint32 test_read_location(uint32 *points_at)
+{
+ /* the below assembly has been replaced with the use of volatile keyword. We can toss the asm code in a few weeks 11/2/07
+ __asm {
+ push ecx
+ mov ecx, points_at
+ mov eax, [ecx]
+ pop ecx
+ }
+ */
+ volatile uint32 *p_volatile = points_at;
+ return *p_volatile;
+} // test_read_location
+
+void wait_for_disable_count(hythread_ext_t thread)
+{
+ while (1) {
+ // a thread can not stay in disable_count > 0 for long
+ // by design, no thread will block when disable_count > 0
+ if (thread->hy_thr.disable_count == 0) {
+ code_path_coverage[3]++;
+ break;
+ }
+ // the below yield() puts the current thread at the end of the OS ready queue
+ // the target thread *should* run before the current thread resumes
+ // the basic idea is to flush the OS ready queue (force target thread to run)
+ hythread_yield();
+ }
+}
+
+static IDATA test_hythread_suspend(hythread_ext_t thread, SuspendType kind)
+{
+ uint32 ret_val;
+ uint8 blocked_at_disable_or_back_branch = FALSE;
+ uint32 xx;
+ uint32 disable_count_always_zero;
+
+ // TODO grab the suspend_resume mutex.
+ // This *implies* the current thread in in suspend enabled state since it may block at mutex enter
+ apr_thread_mutex_lock(suspend_resume_mutex);
+
+ if (kind == J_L_T_SUSPEND) {
+ ret_val = apr_atomic_cas32(&thread->j_l_t_suspend_request, TRUE, FALSE);
+ if (ret_val == TRUE) {
+ // some other thread has previously made a j.l.T.suspend() request
+ code_path_coverage[1]++;
+ apr_thread_mutex_unlock(suspend_resume_mutex);
+ return TM_ERROR_NONE;
+ }
+ } else {
+ assert(kind == GCSUSPEND);
+ // only allow one GC at a time for now
+ ret_val = apr_atomic_cas32(&thread->gc_suspend_request, TRUE, FALSE);
+ assert(ret_val == FALSE); // only one GC at a time
+ code_path_coverage[2]++;
+ }
+ apr_atomic_cas32(&thread->hy_thr.request, TRUE, FALSE);
+
+ while (1) {
+ wait_for_disable_count(thread);
+
+ disable_count_always_zero = TRUE;
+ for (xx = 0; xx < SUSPEND_SPIN_COUNT; xx++) {
+
+ if (thread->now_blocked_at_ )
+ {
+ code_path_coverage[4]++;
+ apr_thread_mutex_unlock(suspend_resume_mutex);
+ return TM_ERROR_NONE;
+ }
+
+ if (thread->hy_thr.disable_count != 0) {
+ disable_count_always_zero = FALSE;
+ break;
+ }
+ hythread_yield();
+ }
+ if (disable_count_always_zero == TRUE) {
+ // we put the current thread at the tail end of the OS ready queue
+ // SUSPEND_SPIN_COUNT times. This means if the target thread is actually
+ // on the ready queue, it would have run. TODO: verify the last statement
+ // for both windows and linux.
+ // If the target thread has not changed disable_count, this means
+ // the target thread is blocked inside
+ // a native library or inside a java.lang.Sleep() or Object.wait(), etc.
+ code_path_coverage[5]++;
+ apr_thread_mutex_unlock(suspend_resume_mutex);
+ return TM_ERROR_NONE;
+ }
+ }
+ // TODO release the suspend_resume mutex
+ apr_thread_mutex_unlock(suspend_resume_mutex);
+ return TM_ERROR_NONE;
+} // test_hythread_suspend
+
+
+static void test_hythread_backbranch_support() {
+ hythread_ext_t self = test_self();
+ assert(self->hy_thr.state == TM_THREAD_STATE_RUNNABLE);
+ code_path_coverage[6]++;
+ request_loop();
+} // test_hythread_backbranch_support
+
+static void test_hythread_resume(hythread_ext_t thread, SuspendType kind)
+{
+ uint32 ret_val;
+
+ hythread_ext_t self = test_self();
+
+ // TODO grab the suspend_resume mutex
+ // This *implies* the current thread in in suspend enabled state since it may block at mutex enter
+ apr_thread_mutex_lock(suspend_resume_mutex);
+ if (kind == J_L_T_SUSPEND) {
+ ret_val = apr_atomic_cas32(&thread->j_l_t_suspend_request, FALSE, TRUE);
+
+ } else {
+ assert(kind == GCSUSPEND);
+ ret_val = apr_atomic_cas32(&thread->gc_suspend_request, FALSE, TRUE);
+ assert(ret_val == TRUE);
+ }
+ if ( (thread->j_l_t_suspend_request == FALSE) &&
+ (thread->gc_suspend_request == FALSE) ) {
+ code_path_coverage[7]++;
+ ret_val = apr_atomic_cas32(&thread->hy_thr.request, FALSE, TRUE);
+ assert(ret_val == TRUE);
+ hysem_post(thread->hy_thr.resume_event);
+ }
+ // TODO release the suspend_resume mutex
+ apr_thread_mutex_unlock(suspend_resume_mutex);
+}
+
+TEST_LIST_START
+ TEST(test_exp_sync)
+TEST_LIST_END;
+
Property changes on: vm/tests/unit/thread/test_native_exp_sync.c
___________________________________________________________________
Name: svn:executable
+ *
Name: svn:eol-style
+ native