Index: src/thread_java_monitors.c =================================================================== --- src/thread_java_monitors.c (revision 491398) +++ src/thread_java_monitors.c (working copy) @@ -44,7 +44,7 @@ * Implementation may use the knowledge of internal object layout in order to allocate lock * and conditional variable in the most efficient manner. * - * @param[in] monitor object where monitor needs to be initilalized. + * @param[in] monitor object where monitor needs to be initialized. */ IDATA VMCALL jthread_monitor_init(jobject monitor) { @@ -114,7 +114,7 @@ } // busy wait and inflate - // reload poiter after safepoints + // reload pointer after safepoints lockword = vm_object_get_lockword_addr(monitor); while ((status = hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY) { @@ -279,7 +279,7 @@ * and VM chooses this thread to wake up; *
  • another thread invokes thread_notifyAll(object); *
  • another thread invokes thread_interrupt(thread); - *
  • real time elapsed from the wating begin is + *
  • real time elapsed from the waiting begin is * greater or equal the timeout specified. * * Index: src/thread_native_basic.c =================================================================== --- src/thread_native_basic.c (revision 491398) +++ src/thread_native_basic.c (working copy) @@ -277,7 +277,7 @@ thread = hythread_self(); } - // Acquire global TM lock to prevent concurrent acccess to thread list + // Acquire global TM lock to prevent concurrent access to thread list status = hythread_global_lock(NULL); assert (status == TM_ERROR_NONE); @@ -549,7 +549,7 @@ * Terminates all running threads in the given group. * * @param[in] group thread group - * @see hythread_cancell + * @see hythread_cancel */ IDATA VMCALL hythread_cancel_all(hythread_group_t group) { hythread_iterator_t iter; @@ -565,7 +565,7 @@ if(next != self) { hythread_cancel(next); //since this method being used at shutdown it does not - //males any sence to exit on error, but continue terminating threads + //males any sense to exit on error, but continue terminating threads } } @@ -596,7 +596,7 @@ assert(thread); assert(group); - // Acquire global TM lock to prevent concurrent acccess to thread list + // Acquire global TM lock to prevent concurrent access to thread list status = hythread_global_lock(NULL); if (status != TM_ERROR_NONE) return status; Index: src/thread_native_semaphore.c =================================================================== --- src/thread_native_semaphore.c (revision 491398) +++ src/thread_native_semaphore.c (working copy) @@ -75,7 +75,7 @@ if (nano || ms) break; } - //should we check here if timeout is not supposet to happen + //should we check here if timeout is not supposed to happen if(sem->count==0 /*&& (ms || nano)*/) { if (ms || nano) { @@ -169,7 +169,7 @@ } /** - * Resets current semaphore count to the specifed numbers. + * Resets current semaphore count to the specified numbers. * * @param[in] count new semaphore count * @param[in] sem semaphore Index: src/thread_java_park.c =================================================================== --- src/thread_java_park.c (revision 491398) +++ src/thread_java_park.c (working copy) @@ -86,7 +86,7 @@ * Parks the current thread until the specified deadline * * Stops the current thread from executing until it is unparked, interrupted, - * or until the specified dealine. + * or until the specified delaine. * Unlike wait or sleep, the interrupted flag is NOT cleared by this API. * * @param[in] millis absolute time in milliseconds to wait until Index: src/thread_native_latch.c =================================================================== --- src/thread_native_latch.c (revision 491398) +++ src/thread_native_latch.c (working copy) @@ -153,7 +153,7 @@ /** * Decreases the count for latch. * - * If the count reaches zero, all threads awating on the latch are unblocked. + * If the count reaches zero, all threads awaiting on the latch are unblocked. * @param[in] latch the latch * @sa java.util.concurrent.CountDownLatch.countDown() */ Index: src/thread_helpers.cpp =================================================================== --- src/thread_helpers.cpp (revision 491398) +++ src/thread_helpers.cpp (working copy) @@ -24,7 +24,7 @@ * @file thread_helpers.cpp * @brief Set of VM helpers * - * This file contatins the set of "VM helpers" which help to optimize monitors perforamance + * This file contains the set of "VM helpers" which help to optimize monitors performance * in the code generated by JIT compiler. Typically, these functions will be called by JIT, * but VM also could also use them with care. */ @@ -109,7 +109,7 @@ #ifdef LOCK_RESERVATION // if this is initial reservation also increase the recursion ss = mov(ss, edx_opnd, eax_opnd); - // eax stil ROR so ROR the mask + // eax still ROR so ROR the mask ss = alu(ss, and_opc, edx_opnd, Imm_Opnd(0x0400ffff)); ss = test(ss, edx_opnd, edx_opnd); ss = branch8(ss, Condition_Z, Imm_Opnd(size_8, 0)); @@ -118,12 +118,12 @@ ss = ret(ss, Imm_Opnd(4)); #ifdef LOCK_RESERVATION - // increase recurison brench + // increase recursion branch signed offset = (signed)ss - (signed)backpatch_address__recursion_inc - 1; *backpatch_address__recursion_inc = (char)offset; // test recursion overflow - // eax stil ROR so ROR the mask + // eax still ROR so ROR the mask ss = alu(ss, cmp_opc, eax_opnd, Imm_Opnd(0xf4000000)); ss = branch8(ss, Condition_A, Imm_Opnd(size_8, 0)); char *backpatch_address__inline_monitor_failed3 = ((char *)ss) - 1; Index: src/thread_native_interrupt.c =================================================================== --- src/thread_native_interrupt.c (revision 491398) +++ src/thread_native_interrupt.c (working copy) @@ -35,7 +35,7 @@ * resume the thread and cause it to return from the blocking function with * HYTHREAD_INTERRUPTED. * - * @param[in] thread a thead to be interrupted + * @param[in] thread a thread to be interrupted * @return none */ void VMCALL hythread_interrupt(hythread_t thread) { @@ -65,7 +65,7 @@ * Returns interrupted status and clear interrupted flag. * * @param[in] thread where to clear interrupt flag - * @returns TM_ERROR_INTERRUPT if thread was interruped, TM_ERROR_NONE otherwise + * @returns TM_ERROR_INTERRUPT if thread was interrupted, TM_ERROR_NONE otherwise */ UDATA VMCALL hythread_clear_interrupted_other(hythread_t thread) { int interrupted; Index: src/thread_native_tls.c =================================================================== --- src/thread_native_tls.c (revision 491398) +++ src/thread_native_tls.c (working copy) @@ -44,7 +44,7 @@ * * Create and return a new, unique key for thread local storage. * - * @note The hande returned will be >=0, so it is safe to test the handle against 0 to see if it's been + * @note The handle returned will be >=0, so it is safe to test the handle against 0 to see if it's been * allocated yet. * * @param[out] handle pointer to a key to be initialized with a key value @@ -61,7 +61,7 @@ * * Create and return a new, unique key for thread local storage. * - * @note The hande returned will be >=0, so it is safe to test the handle against 0 to see if it's been + * @note The handle returned will be >=0, so it is safe to test the handle against 0 to see if it's been * allocated yet. * * @param[out] handle pointer to a key to be initialized with a key value Index: src/thread_native_thin_monitor.c =================================================================== --- src/thread_native_thin_monitor.c (revision 491398) +++ src/thread_native_thin_monitor.c (working copy) @@ -89,7 +89,7 @@ return IS_FAT_LOCK(lockword); } -//forward decalration +//forward declaration hythread_monitor_t locktable_get_fat_monitor(IDATA lock_id); IDATA locktable_put_fat_monitor(hythread_monitor_t fat_monitor); hythread_monitor_t locktable_delete_entry(int lock_id); @@ -120,7 +120,7 @@ assert(hythread_get_id(hythread_self()) == THREAD_ID(lockword)); assert (!IS_FAT_LOCK(*lockword_ptr)); assert (IS_RESERVED(lockword)); - TRACE(("Unreaerve self %d \n", ++unreserve_count_self/*, vm_get_object_class_name(lockword_ptr-1)*/)); + TRACE(("Unreserved self %d \n", ++unreserve_count_self/*, vm_get_object_class_name(lockword_ptr-1)*/)); // Set reservation bit to 1 and reduce recursion count lockword_new = (lockword | RESERVED_BITMASK); @@ -156,7 +156,7 @@ } lock_id = THREAD_ID(lockword); owner = hythread_get_thread(lock_id); - TRACE(("Unreaserve other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); + TRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/)); if(!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) { // hymutex_unlock(TM_LOCK); return TM_ERROR_NONE; @@ -225,7 +225,7 @@ * @param[in] lockword_ptr monitor addr */ IDATA hythread_thin_monitor_create(hythread_thin_monitor_t *lockword_ptr) { - //clear anithing but hashcode + //clear anything but hashcode // 000000000000000000011111111111 *lockword_ptr = *lockword_ptr & 0x3FF; return TM_ERROR_NONE; @@ -296,7 +296,7 @@ #ifdef LOCK_RESERVATION //lockword = *lockword_ptr; // this reloading of lockword may be odd, need to investigate; if (IS_RESERVED(lockword)) { - TRACE(("initialy reserve lock %x count: %d ", *lockword_ptr, init_reserve_cout++)); + TRACE(("initially reserve lock %x count: %d ", *lockword_ptr, init_reserve_cout++)); RECURSION_INC(lockword_ptr, *lockword_ptr); } #endif @@ -367,7 +367,7 @@ set_suspend_disable(saved_disable_count); return status; // lock fat_monitor } - //hythread_sefe_point(); + //hythread_safe_point(); hythread_yield(); } if(IS_FAT_LOCK(*lockword_ptr)) { @@ -405,7 +405,7 @@ //TRACE(("recursion_dec: 0x%x", *lockword_ptr)); } //TRACE(("unlocked: 0x%x id: %d\n", *lockword_ptr, THREAD_ID(*lockword_ptr))); - //hythread_sefe_point(); + //hythread_safe_point(); return TM_ERROR_NONE; } else if (IS_FAT_LOCK(lockword)) { TRACE(("exit fat monitor %d thread: %d\n", FAT_LOCK_ID(lockword), tm_self_tls->thread_id)); @@ -593,8 +593,8 @@ } for (i = RECURSION(lockword); i > 0; i--) { - TRACE( ("infalte recursion monitor")); - status = hythread_monitor_enter(fat_monitor); // transfer recusrion count to fat fat_monitor + TRACE( ("inflate recursion monitor")); + status = hythread_monitor_enter(fat_monitor); // transfer recursion count to fat fat_monitor assert(status == TM_ERROR_NONE); } fat_monitor_id = locktable_put_fat_monitor(fat_monitor); // put fat_monitor into lock table Index: src/thread_private.h =================================================================== --- src/thread_private.h (revision 491398) +++ src/thread_private.h (working copy) @@ -109,14 +109,14 @@ /** * get_local_pool() function return apr pool associated with the current thread. * the memory could be allocated without lock using this pool - * dealocation should be done in the same thread, otherwise + * deallocation should be done in the same thread, otherwise * local_pool_cleanup_register() should be called */ apr_pool_t* get_local_pool(); /** - * local_pool_cleanup_register() synchroniously register the cleanup function. - * It shold be called to request cleaunp in thread local pool, from other thread + * local_pool_cleanup_register() synchronously register the cleanup function. + * It should be called to request cleanup in thread local pool, from other thread * Usage scenario: * IDATA hymutex_destroy (tm_mutex_t *mutex) { * apr_pool_t *pool = apr_thread_mutex_pool_get ((apr_thread_mutex_t*)mutex); @@ -173,7 +173,7 @@ #endif /** - * Each thread keeps a pointer to the libary it belongs to. + * Each thread keeps a pointer to the library it belongs to. */ HyThreadLibrary * library; @@ -390,7 +390,7 @@ apr_pool_t *pool; /** - * weak reference to corresponding java.lang.Thread instace + * weak reference to corresponding java.lang.Thread instance */ jobject thread_ref; @@ -427,7 +427,7 @@ int threads_count; /** - * Group index or key for search perposes + * Group index or key for search purposes */ int group_index; @@ -597,7 +597,7 @@ hythread_group_t get_java_thread_group(void); /** - * Thread cancelation, being used at VM shutdown through + * Thread cancellation, being used at VM shutdown through * tmj_cancel_all_threads() method call to terminate all java * threads at shutdown. */ Index: src/thread_java_basic.c =================================================================== --- src/thread_java_basic.c (revision 491398) +++ src/thread_java_basic.c (working copy) @@ -74,7 +74,7 @@ jthread java_thread; wrapper_proc_data *data = (wrapper_proc_data *)arg; - // Assocciation should be already done. + // Association should be already done. native_thread = hythread_self(); jvmti_thread = hythread_get_private_data(native_thread); assert(jvmti_thread); @@ -153,7 +153,7 @@ return TM_ERROR_OUT_OF_MEMORY; } - // Prepare argumets for wrapper proc + // Prepare arguments for wrapper proc status = (*jni_env) -> GetJavaVM(jni_env, &data->java_vm); if (status != JNI_OK) return TM_ERROR_INTERNAL; @@ -181,9 +181,9 @@ * and associate it with the current native thread. Nothing happens * if this thread is already attached. * - * @param[in] jni_env JNI environment for cuurent thread + * @param[in] jni_env JNI environment for current thread * @param[in] java_thread j.l.Thread instance to associate with current thread - * @param[in] daemon JNI_TRUE if attaching thread is a daemon thread, JNI_FALSE overwise + * @param[in] daemon JNI_TRUE if attaching thread is a daemon thread, JNI_FALSE otherwise * @sa JNI::AttachCurrentThread () */ IDATA jthread_attach(JNIEnv * jni_env, jthread java_thread, jboolean daemon) { Index: src/thread_native_state.c =================================================================== --- src/thread_native_state.c (revision 491398) +++ src/thread_native_state.c (working copy) @@ -99,7 +99,7 @@ int VMCALL hythread_is_waiting(hythread_t thread) { return thread->state & TM_THREAD_STATE_WAITING ; }; /** - * Returns non-zero if thread is waiting indefinitly. + * Returns non-zero if thread is waiting indefinitely. * * @param[in] thread those attribute is read */ Index: src/thread_native_suspend.c =================================================================== --- src/thread_native_suspend.c (revision 491398) +++ src/thread_native_suspend.c (working copy) @@ -137,7 +137,7 @@ /** * Denotes a single point where safe suspension is possible. * - * If there was a suspension request set for this thread, this method notifes + * If there was a suspension request set for this thread, this method notifies * the requesting thread and then blocks until someone calls the tmn_resume() * for this thread. *

    @@ -165,7 +165,7 @@ thread->safepoint_callback = NULL; // since set callback suspended the thread - // resore its original state + // restore its original state hythread_resume(tm_self_tls); callback_func(); } @@ -173,7 +173,7 @@ thread->suspend_disable_count = 0; apr_memory_rw_barrier(); - // code for Ipf that support StackIterator and immmediate suspend + // code for Ipf that support StackIterator and immediate suspend // notify suspender // hylatch_count_down(thread->safe_region_event); @@ -200,7 +200,7 @@ return; } - //we realy need to suspend thread. + //we really need to suspend thread. hysem_set(thread->resume_event, 0); @@ -208,11 +208,11 @@ apr_thread_yield_other(thread->os_handle); - TRACE(("TM: suspend requiest sent: %p request count: %d",thread , thread->suspend_request)); + TRACE(("TM: suspend request sent: %p request count: %d",thread , thread->suspend_request)); } -// the second part of suspention +// the second part of suspension // blocked in case was selfsuspended. static IDATA wait_safe_region_event(hythread_t thread) { assert(thread->suspend_request >= 1); @@ -366,7 +366,7 @@ * Returns safepoint callback function. * * @param[in] t thread where callback needs to be executed - * @return callback function currently instralled, or NULL if there was none + * @return callback function currently installed, or NULL if there was none */ hythread_event_callback_proc VMCALL hythread_get_safepoint_callback(hythread_t t) { return t->safepoint_callback; @@ -378,7 +378,7 @@ * This method sets a suspend request for the every thread in the group * and then returns the iterator that can be used to traverse through the suspended threads. * Each invocation of the tmn_iterator_next() method on the iterator will return the next - * suspeneded thread. + * suspended thread. * * @param[out] t iterator * @param[in] group thread group to be suspended @@ -404,7 +404,7 @@ } } hythread_iterator_reset(&iter); - // all threads should be stoped in safepoints or be in safe region. + // all threads should be stopped in safepoints or be in safe region. TRACE(("TM: wait suspend responses")); while((next = hythread_iterator_next(&iter)) != NULL) { if(next != self) { Index: src/thread_init.c =================================================================== --- src/thread_init.c (revision 491398) +++ src/thread_init.c (working copy) @@ -22,7 +22,7 @@ /** * @file thread_init.c - * @brief hythread init/shutdown finctions + * @brief hythread init/shutdown functions */ #undef LOG_DOMAIN @@ -79,7 +79,7 @@ * Creates and initializes a threading library. * * @param[out] lib pointer to the created thread library - * @return The thead library's initStatus will be set to 0 on success or + * @return The thread library's initStatus will be set to 0 on success or * a negative value on failure. * * @see hythread_attach, hythread_shutdown @@ -127,7 +127,7 @@ * hythread_attach before accessing any thread library functions. * * @param[in] lib pointer to the thread library to be initialized (non-NULL) - * @return The thead library's initStatus will be set to 0 on success or + * @return The thread library's initStatus will be set to 0 on success or * a negative value on failure. * * @see hythread_attach, hythread_shutdown @@ -202,7 +202,7 @@ } /** - * Acquires global lock of the library assocciated with the current thread. + * Acquires global lock of the library associated with the current thread. * * @param[in] self current thread */ @@ -215,7 +215,7 @@ } /** - * Releases global lock of the library assocciated with the current thread. + * Releases global lock of the library associated with the current thread. * * @param[in] self current thread */ @@ -327,7 +327,7 @@ return -1; } //add entry to the end of the array -// retrun new entry index, -1 if failed. +// return new entry index, -1 if failed. int add_entry(char* name) { int index = size++; if(index >= TABLE_SIZE-1) { Index: src/thread_native_attrs.c =================================================================== --- src/thread_native_attrs.c (revision 491398) +++ src/thread_native_attrs.c (working copy) @@ -22,7 +22,7 @@ /** * @file thread_native_attrs.c - * @brief Hythread priority related finctions + * @brief Hythread priority related functions */ #include Index: javasrc/org/apache/harmony/drlvm/thread/ThreadHelper.java =================================================================== --- javasrc/org/apache/harmony/drlvm/thread/ThreadHelper.java (revision 491398) +++ javasrc/org/apache/harmony/drlvm/thread/ThreadHelper.java (working copy) @@ -46,7 +46,7 @@ if ((new_lockword & HI_BITS) == 0){ // comparison above is some kind of tricky, two things are checked at once; - // if we got zero it meants that there is NO fat lock here, and thread_id stored in lockword + // if we got zero it means that there is NO fat lock here, and thread_id stored in lockword // is the same as current thread_id if ( new_lockword <= RECURSION_BOUND_IN_PLACE ) { @@ -59,7 +59,7 @@ return ; } } else { - // avaliable possiblities here: + // available possibilities here: // 1. fat lock - 0x80000000 is set to 1 ; need goto slow path // 2. captured thin lock for another thread ; need goto slow path // 3. reserved lock for another thread. ; need goto slow path