Index: vm/port/include/stack_iterator.h =================================================================== --- vm/port/include/stack_iterator.h (revision 601284) +++ vm/port/include/stack_iterator.h (working copy) @@ -52,13 +52,20 @@ struct StackIterator; /** - * Creates a new stack iterator for the given thread. + * Creates a new stack iterator for the current thread. * * @note The function assumes that the thread is currently in native code. */ StackIterator* si_create_from_native(); /** + * Filles a stack iterator structure for the current thread. + * + * @note The function assumes that the thread is currently in native code. + */ +void si_fill_from_native(StackIterator* si); + +/** * Creates a new stack iterator for the given thread. * * The thread can run concurrently with the stack iterator, @@ -74,6 +81,21 @@ StackIterator* si_create_from_native(VM_thread* thread); /** + * Filles a stack iterator structure for the given thread. + * + * The thread can run concurrently with the stack iterator, + * but it must not pop (return past) the most recent M2N frame when the iterator is called. + * + * Creation is not atomic with respect to pushing/popping of M2N frames. + * The client code must ensure that such operations are serialized. + * + * @param[in] thread - the pointer to the thread, the stack of which must be enumerated + * + * @note The function assumes that the given thread is currently in native code. + */ +void si_fill_from_native(StackIterator* si, VM_thread* thread); + +/** * Creates a new stack iterator for the suspended thread. * * The thread can run concurrently with the stack iterator, @@ -92,6 +114,29 @@ StackIterator* si_create_from_registers(Registers* regs, bool is_ip_past, M2nFrame* m2nf); /** + * Filles a stack iterator structure for the suspended thread. + * + * The thread can run concurrently with the stack iterator, + * but it must not pop (return past) the most recent M2N frame when the iterator is called. + * + * Creation is not atomic with respect to pushing/popping of M2N frames. + * The client code must ensure that such operations are serialized. + * + * @param[in] regs - values of the registers at the point of suspension + * @param[in] is_ip_past - indicate is ip past or not + * @param[in] m2nf - the pointer to the M2N frame that must be the one immediately + * prior to the suspended frame + * + * @note The function assumes that iterated thread is currently suspended from managed code. + */ +void si_fill_from_registers(StackIterator* si, Registers* regs, bool is_ip_past, M2nFrame* lm2nf); + +/** + * Returns suze of stack iterator structure in bytes. + */ +size_t si_size(); + +/** * Makes a copy of the given stack iterator. * * @param[in] si - the pointer to the stack iterator to be copied Index: vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp =================================================================== --- vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp (revision 601284) +++ vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp (working copy) @@ -315,18 +315,27 @@ return si_create_from_native(p_TLS_vmthread); } +void si_fill_from_native(StackIterator* si) { + si_fill_from_native(si, p_TLS_vmthread); +} + + StackIterator * si_create_from_native(VM_thread * thread) { ASSERT_NO_INTERPRETER // Allocate iterator StackIterator * si = (StackIterator *)STD_MALLOC(sizeof(StackIterator)); + + si_fill_from_native(si, thread); + return si; +} + +void si_fill_from_native(StackIterator* si, VM_thread * thread) { memset(si, 0, sizeof(StackIterator)); - si->cci = NULL; + si->cci = NULL; si->jit_frame_context.p_rip = &si->ip; si->m2n_frame = m2n_get_last_frame(thread); si->ip = 0; - - return si; } StackIterator * si_create_from_registers(Registers * regs, bool is_ip_past, @@ -334,6 +343,15 @@ ASSERT_NO_INTERPRETER // Allocate iterator StackIterator * si = (StackIterator *)STD_MALLOC(sizeof(StackIterator)); + assert(si); + + si_fill_from_registers(si, regs, is_ip_past, lm2nf); + + return si; +} + +void si_fill_from_registers(StackIterator* si, Registers* regs, bool is_ip_past, M2nFrame* lm2nf) +{ memset(si, 0, sizeof(StackIterator)); Global_Env *env = VM_Global_State::loader_env; @@ -344,8 +362,10 @@ si->m2n_frame = lm2nf; si->ip = regs->rip; +} - return si; +size_t si_size(){ + return sizeof(StackIterator); } // On EM64T all registers are preserved automatically, so this is a nop. @@ -464,7 +484,7 @@ // 1. Copy si to stack StackIterator local_si; si_copy(&local_si, si); - si_free(si); + //si_free(si); // 2. Set the M2nFrame list m2n_set_last_frame(local_si.m2n_frame); Index: vm/port/src/lil/ia32/pim/stack_iterator_ia32.cpp =================================================================== --- vm/port/src/lil/ia32/pim/stack_iterator_ia32.cpp (revision 601284) +++ vm/port/src/lil/ia32/pim/stack_iterator_ia32.cpp (working copy) @@ -236,48 +236,70 @@ return si_create_from_native(p_TLS_vmthread); } +void si_fill_from_native(StackIterator* si) +{ + si_fill_from_native(si, p_TLS_vmthread); +} + StackIterator* si_create_from_native(VM_thread* thread) { ASSERT_NO_INTERPRETER // Allocate iterator StackIterator* res = (StackIterator*)STD_MALLOC(sizeof(StackIterator)); assert(res); - memset(res, 0, sizeof(StackIterator)); // Setup current frame - res->cci = NULL; - res->m2nfl = m2n_get_last_frame(thread); - res->ip = 0; - res->c.p_eip = &res->ip; + si_fill_from_native(res, thread); return res; } +void si_fill_from_native(StackIterator* si, VM_thread* thread) +{ + memset(si, 0, sizeof(StackIterator)); + + // Setup current frame + si->cci = NULL; + si->m2nfl = m2n_get_last_frame(thread); + si->ip = 0; + si->c.p_eip = &si->ip; +} + + StackIterator* si_create_from_registers(Registers* regs, bool is_ip_past, M2nFrame* lm2nf) { // Allocate iterator StackIterator* res = (StackIterator*)STD_MALLOC(sizeof(StackIterator)); assert(res); - memset(res, 0, sizeof(StackIterator)); + si_fill_from_registers(res, regs, is_ip_past, lm2nf); + + return res; +} +void si_fill_from_registers(StackIterator* si,Registers* regs, bool is_ip_past, M2nFrame* lm2nf) +{ + memset(si, 0, sizeof(StackIterator)); + // Setup current frame Global_Env *env = VM_Global_State::loader_env; - // It's possible that registers represent native code and res->cci==NULL - res->cci = env->vm_methods->find((NativeCodePtr)regs->eip, is_ip_past); - res->c.esp = regs->esp; - res->c.p_eip = ®s->eip; - res->c.p_ebp = ®s->ebp; - res->c.p_edi = ®s->edi; - res->c.p_esi = ®s->esi; - res->c.p_ebx = ®s->ebx; - res->c.p_eax = ®s->eax; - res->c.p_ecx = ®s->ecx; - res->c.p_edx = ®s->edx; - res->c.is_ip_past = is_ip_past; - res->c.eflags = regs->eflags; - res->m2nfl = lm2nf; + // It's possible that registers represent native code and si->cci==NULL + si->cci = env->vm_methods->find((NativeCodePtr)regs->eip, is_ip_past); + si->c.esp = regs->esp; + si->c.p_eip = ®s->eip; + si->c.p_ebp = ®s->ebp; + si->c.p_edi = ®s->edi; + si->c.p_esi = ®s->esi; + si->c.p_ebx = ®s->ebx; + si->c.p_eax = ®s->eax; + si->c.p_ecx = ®s->ecx; + si->c.p_edx = ®s->edx; + si->c.is_ip_past = is_ip_past; + si->c.eflags = regs->eflags; + si->m2nfl = lm2nf; +} - return res; +size_t si_size(){ + return sizeof(StackIterator); } // On IA32 all registers are preserved automatically, so this is a nop. @@ -473,7 +495,7 @@ if (si->c.p_eip == &si->ip) local_si.c.p_eip = &local_si.ip; - si_free(si); + //si_free(si); // 2. Set the M2nFrame list m2n_set_last_frame(local_si.m2nfl); Index: vm/port/src/lil/ipf/pim/stack_iterator_ipf.cpp =================================================================== --- vm/port/src/lil/ipf/pim/stack_iterator_ipf.cpp (revision 601284) +++ vm/port/src/lil/ipf/pim/stack_iterator_ipf.cpp (working copy) @@ -356,6 +356,16 @@ ////////////////////////////////////////////////////////////////////////// // Stack Iterator Interface +StackIterator* si_create_from_native() +{ + return si_create_from_native(p_TLS_vmthread); +} + +void si_fill_from_native(StackIterator* si) +{ + si_fill_from_native(si, p_TLS_vmthread); +} + StackIterator* si_create_from_native(VM_thread* thread) { // Allocate iterator @@ -363,16 +373,18 @@ assert(res); // Setup current frame - res->cci = NULL; - res->m2nfl = m2n_get_last_frame(thread); - res->ip = 0; - res->c.p_eip = &res->ip; + si_fill_from_native(res, thread); return res; } -StackIterator* si_create_from_native() -{ - return si_create_from_native(p_TLS_vmthread); +void si_fill_from_native(StackIterator* si, VM_thread * thread) { + memset(si, 0, sizeof(StackIterator)); + + // Setup current frame + si->cci = NULL; + si->m2nfl = m2n_get_last_frame(thread); + si->ip = 0; + si->c.p_eip = &si->ip; } /* @@ -428,6 +440,15 @@ return NULL; } +void si_fill_from_registers(StackIterator* si, Registers*, bool is_ip_past, M2nFrame*) +{ + ABORT("Not implemented"); +} + +size_t si_size(){ + return sizeof(StackIterator); +} + void si_transfer_all_preserved_registers(StackIterator* si) { unsigned i; @@ -557,7 +578,7 @@ memcpy(&local_si, si, sizeof(StackIterator)); if (si->c.p_eip == &si->ip) local_si.c.p_eip = &local_si.ip; - si_free(si); + //si_free(si); // 2. Set the M2nFrame list m2n_set_last_frame(local_si.m2nfl); Index: vm/vmcore/include/object_handles.h =================================================================== --- vm/vmcore/include/object_handles.h (revision 601284) +++ vm/vmcore/include/object_handles.h (working copy) @@ -160,6 +160,8 @@ VMEXPORT // temporary solution for interpreter unplug ObjectHandle oh_allocate_local_handle(); +ObjectHandle oh_allocate_local_handle_from_jni(); + ObjectHandle oh_convert_to_local_handle(ManagedObject* pointer); ObjectHandle oh_copy_to_local_handle(ObjectHandle oh); Index: vm/vmcore/src/jni/jni_array.cpp =================================================================== --- vm/vmcore/src/jni/jni_array.cpp (revision 601284) +++ vm/vmcore/src/jni/jni_array.cpp (working copy) @@ -126,7 +126,11 @@ REFS_RUNTIME_SWITCH_ENDIF } - ObjectHandle new_handle = oh_allocate_local_handle(); + ObjectHandle new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } new_handle->object = (ManagedObject*)vector; tmn_suspend_enable(); //---------------------------------^ @@ -160,7 +164,11 @@ ManagedObject *val = get_raw_reference_pointer((ManagedObject **)addr); ObjectHandle new_handle = NULL; if (val != NULL) { - new_handle = oh_allocate_local_handle(); + new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } new_handle->object = val; } @@ -250,7 +258,11 @@ clss->instance_allocated(sz); #endif //VM_STATS - ObjectHandle h = oh_allocate_local_handle(); + ObjectHandle h = oh_allocate_local_handle_from_jni(); + if (h == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } h->object = (ManagedObject *)array; tmn_suspend_enable(); //---------------------------------^ Index: vm/vmcore/src/jni/jni.cpp =================================================================== --- vm/vmcore/src/jni/jni.cpp (revision 601284) +++ vm/vmcore/src/jni/jni.cpp (working copy) @@ -872,15 +872,14 @@ Global_Env * vm_env = jni_get_vm_env(jni_env); if (exn_raised() || ref == NULL) return NULL; - jobject new_ref = oh_copy_to_local_handle(ref); + tmn_suspend_disable(); + jobject new_ref = oh_allocate_local_handle_from_jni(); - if (NULL == new_ref) { - exn_raise_object(vm_env->java_lang_OutOfMemoryError); - return NULL; + if (NULL != new_ref) { + new_ref->object = ref->object; + TRACE2("jni", "NewLocalRef class = " << jobject_to_struct_Class(new_ref)); } - - TRACE2("jni", "NewLocalRef class = " << jobject_to_struct_Class(new_ref)); - + tmn_suspend_enable(); return new_ref; } //NewLocalRef @@ -980,7 +979,11 @@ tmn_suspend_enable(); //---------------------------------^ return NULL; } - ObjectHandle new_handle = oh_allocate_local_handle(); + ObjectHandle new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } new_handle->object = (ManagedObject *)new_obj; tmn_suspend_enable(); //---------------------------------^ @@ -1044,7 +1047,11 @@ tmn_suspend_disable(); //---------------------------------v - ObjectHandle new_handle = oh_allocate_local_handle(); + ObjectHandle new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } ManagedObject *jlo = h->object; assert(jlo); assert(jlo->vt()); @@ -1084,7 +1091,11 @@ tmn_suspend_disable(); //---------------------------------v - ObjectHandle new_handle = oh_allocate_local_handle(); + ObjectHandle new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } ManagedObject *jlo = h->object; assert(jlo); assert(jlo->vt()); Index: vm/vmcore/src/jni/jni_utils.cpp =================================================================== --- vm/vmcore/src/jni/jni_utils.cpp (revision 601284) +++ vm/vmcore/src/jni/jni_utils.cpp (working copy) @@ -64,8 +64,10 @@ hythread_suspend_enable(); return NULL; } - ObjectHandle res = oh_allocate_local_handle(); - res->object = obj; + ObjectHandle res = oh_allocate_local_handle_from_jni(); + if (res) { + res->object = obj; + } hythread_suspend_enable(); return (jobject)res; } @@ -756,13 +758,17 @@ jobject create_default_instance(Class* clss) { hythread_suspend_disable(); - jobject h = oh_allocate_local_handle(); ManagedObject *new_obj = class_alloc_new_object_and_run_default_constructor(clss); if (new_obj == NULL) { hythread_suspend_enable(); assert(exn_raised()); return NULL; } + jobject h = oh_allocate_local_handle_from_jni(); + if (h == NULL) { + hythread_suspend_enable(); + return NULL; + } h->object = new_obj; hythread_suspend_enable(); return h; Index: vm/vmcore/src/jni/jni_field.cpp =================================================================== --- vm/vmcore/src/jni/jni_field.cpp (revision 601284) +++ vm/vmcore/src/jni/jni_field.cpp (working copy) @@ -126,7 +126,11 @@ ManagedObject *val = get_raw_reference_pointer(field_addr); ObjectHandle new_handle = NULL; if (val != NULL) { - new_handle = oh_allocate_local_handle(); + new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle == NULL) { + tmn_suspend_enable(); //---------------------------------^ + return NULL; + } new_handle->object = val; } @@ -777,8 +781,10 @@ // compress static fields. ManagedObject *val = get_raw_reference_pointer(field_addr); if (val != NULL) { - new_handle = oh_allocate_local_handle(); - new_handle->object = val; + new_handle = oh_allocate_local_handle_from_jni(); + if (new_handle != NULL) { + new_handle->object = val; + } } else { new_handle = NULL; } Index: vm/vmcore/src/jvmti/jvmti_break_intf.cpp =================================================================== --- vm/vmcore/src/jvmti/jvmti_break_intf.cpp (revision 601284) +++ vm/vmcore/src/jvmti/jvmti_break_intf.cpp (working copy) @@ -828,8 +828,8 @@ // transfers execution control to the instruction buffer to // execute the original instruction with the registers which it // had before breakpoint happened - StackIterator *si = - si_create_from_registers(®s, false, m2n_get_previous_frame(m2nf)); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_registers(si, ®s, false, m2n_get_previous_frame(m2nf)); si_set_ip(si, instruction_buffer, false); si_transfer_control(si); Index: vm/vmcore/src/jvmti/jvmti_roots.cpp =================================================================== --- vm/vmcore/src/jvmti/jvmti_roots.cpp (revision 601284) +++ vm/vmcore/src/jvmti/jvmti_roots.cpp (working copy) @@ -314,7 +314,7 @@ { StackIterator* si; si = si_create_from_native(thread); - ti_enumerate_thread_stack((TIEnv*)env, si); + ti_enumerate_thread_stack((TIEnv*)env, si); // Enumerate references associated with a thread that are not stored on the thread's stack. ti_enumerate_thread_not_on_stack((TIEnv*)env, thread); Index: vm/vmcore/src/jvmti/jvmti_pop_frame.cpp =================================================================== --- vm/vmcore/src/jvmti/jvmti_pop_frame.cpp (revision 601284) +++ vm/vmcore/src/jvmti/jvmti_pop_frame.cpp (working copy) @@ -221,6 +221,7 @@ // save regs value from jit context to m2n Registers* regs = get_pop_frame_registers(top_frame); si_copy_to_registers(si, regs); + si_free(si); // set pop done frame state m2n_set_frame_type(top_frame, frame_type(FRAME_POP_DONE | FRAME_MODIFIED_STACK)); @@ -269,7 +270,8 @@ assert(FRAME_POP_DONE == (FRAME_POP_MASK & type)); // create stack iterator from native - StackIterator* si = si_create_from_native(); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_native(si); si_transfer_all_preserved_registers(si); // pop native frame @@ -296,7 +298,8 @@ assert(FRAME_POP_NOW == (FRAME_POP_MASK & type)); // create stack iterator from native - StackIterator* si = si_create_from_native(); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_native(si); si_transfer_all_preserved_registers(si); // prepare pop frame - find regs values Index: vm/vmcore/src/jvmti/jvmti_step.cpp =================================================================== --- vm/vmcore/src/jvmti/jvmti_step.cpp (revision 601284) +++ vm/vmcore/src/jvmti/jvmti_step.cpp (working copy) @@ -74,6 +74,7 @@ CodeChunkInfo *cci = si_get_code_chunk_info(si); JIT *jit = cci->get_jit(); + si_free(si); NativeCodePtr next_ip; OpenExeJpdaError UNREF result = jit->get_native_location_for_bc(method, Index: vm/vmcore/src/gc/stop_the_world_root_set_enum.cpp =================================================================== --- vm/vmcore/src/gc/stop_the_world_root_set_enum.cpp (revision 601284) +++ vm/vmcore/src/gc/stop_the_world_root_set_enum.cpp (working copy) @@ -221,7 +221,6 @@ vm_enumerate_root_set_single_thread_on_stack(si); // Enumerate references associated with a thread that are not stored on the thread's stack. vm_enumerate_root_set_single_thread_not_on_stack(thread); - } //vm_enumerate_thread Index: vm/vmcore/src/exception/exceptions_jit.cpp =================================================================== --- vm/vmcore/src/exception/exceptions_jit.cpp (revision 601284) +++ vm/vmcore/src/exception/exceptions_jit.cpp (working copy) @@ -203,7 +203,8 @@ assert(*exn_obj || exn_class); // Save the throw context - StackIterator *throw_si = si_dup(si); + StackIterator *throw_si = (StackIterator*) STD_ALLOCA(si_size()); + memcpy(throw_si, si, si_size()); // Skip first frame if it is an M2nFrame (which is always a transition from managed to the throw code). // The M2nFrame will be removed from the thread's M2nFrame list but transfer control or copy to registers. @@ -375,7 +376,7 @@ TRACE2("exn", ("setting return pointer to %d", exn_obj)); si_set_return_pointer(si, (void **) exn_obj); - si_free(throw_si); + //si_free(throw_si); return NULL; } } @@ -410,7 +411,7 @@ // Reload exception object pointer because it could have // moved while calling JVMTI callback if (exn_raised()) { - si_free(throw_si); + //si_free(throw_si); return NULL; } @@ -418,7 +419,7 @@ interrupted_method_jit, interrupted_method, interrupted_method_location, NULL, NULL, NULL); - si_free(throw_si); + //si_free(throw_si); return *exn_obj; } //exn_propagate_exception @@ -466,7 +467,8 @@ exn_class = VM_Global_State::loader_env->java_lang_NullPointerException_Class; } ManagedObject* local_exn_obj = exn_obj; - StackIterator* si = si_create_from_native(); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_native(si); if (exn_raised()) { return; @@ -493,7 +495,7 @@ jit_exn_constr_args, vm_exn_constr_args); if (exn_raised()) { - si_free(si); + //si_free(si); return; } @@ -574,7 +576,8 @@ BEGIN_RAISE_AREA; - si = si_create_from_native(); + si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_native(si); ManagedObject *local_exn_obj = NULL; exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, NULL, NULL, NULL); @@ -616,7 +619,7 @@ } unw_m2nf = si_get_m2n(si); - si_free(si); + //si_free(si); END_RAISE_AREA; @@ -642,8 +645,8 @@ m2n_push_suspended_frame(thread, m2n, ®s); M2nFrame* prev_m2n = m2n_get_previous_frame(m2n); - StackIterator *si = - si_create_from_registers(®s, false, prev_m2n); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_registers(si, ®s, false, prev_m2n); // si_create_from_registers uses large stack space, // so guard page restored after its invoke. @@ -660,11 +663,11 @@ || (m2n_get_frame_type(prev_m2n) & FRAME_NON_UNWINDABLE))) { exn_raise_by_class(env->java_lang_StackOverflowError_Class); } else { - si_free(si); + //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } else { - si_free(si); + //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } @@ -687,8 +690,8 @@ m2n_push_suspended_frame(thread, m2n, ®s); M2nFrame* prev_m2n = m2n_get_previous_frame(m2n); - StackIterator *si = - si_create_from_registers(®s, false, prev_m2n); + StackIterator* si = (StackIterator*) STD_ALLOCA(si_size()); + si_fill_from_registers(si, ®s, false, prev_m2n); // si_create_from_registers uses large stack space, // so guard page restored after its invoke, @@ -707,11 +710,11 @@ || (m2n_get_frame_type(prev_m2n) & FRAME_NON_UNWINDABLE))) { exn_raise_by_class(env->java_lang_StackOverflowError_Class); } else { - si_free(si); + //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } else { - si_free(si); + //si_free(si); exn_throw_by_class(env->java_lang_StackOverflowError_Class); } } Index: vm/vmcore/src/object/object_handles.cpp =================================================================== --- vm/vmcore/src/object/object_handles.cpp (revision 601284) +++ vm/vmcore/src/object/object_handles.cpp (working copy) @@ -25,6 +25,7 @@ #include #include "environment.h" +#include "exceptions.h" #include "open/gc.h" #include "lil.h" #include "lock_manager.h" @@ -278,6 +279,11 @@ unsigned capacity = 10; unsigned size = sizeof(ObjectHandlesNew)+sizeof(ManagedObject*)*(capacity-1); ObjectHandlesNew* n = (ObjectHandlesNew*)STD_MALLOC(size); + + if (n == NULL) { + return NULL; + } + assert(n); memset(n, 0, size); #ifdef _IPF_ @@ -291,14 +297,22 @@ return n; } -ObjectHandle oh_allocate_handle(ObjectHandles** hs) +static ObjectHandle oh_allocate_handle(ObjectHandles** hs) { // the function should be called only from suspend disabled mode // as it is not gc safe. assert(!hythread_is_suspend_enabled()); ObjectHandlesNew* cur = (ObjectHandlesNew*)*hs; - if (!cur || cur->size>=cur->capacity) - cur = oh_add_new_handles((ObjectHandlesNew**)hs); + + if (!cur || cur->size>=cur->capacity) { + ObjectHandlesNew* new_handle_block = oh_add_new_handles((ObjectHandlesNew**)hs); + + if (new_handle_block == NULL) { + return NULL; + } + assert(new_handle_block); + cur = new_handle_block; + } ObjectHandle h = (ObjectHandle)&cur->refs[cur->size]; cur->size++; h->object = NULL; @@ -346,7 +360,9 @@ ObjectHandle NativeObjectHandles::allocate() { - return oh_allocate_handle(&handles); + ObjectHandle res = oh_allocate_handle(&handles); + assert(res); + return res; } void NativeObjectHandles::enumerate() @@ -359,8 +375,7 @@ ////////////////////////////////////////////////////////////////////////// // Local Handles -VMEXPORT // temporary solution for interpreter unplug -ObjectHandle oh_allocate_local_handle() +static ObjectHandle oh_allocate_local_handle_internal() { assert(!hythread_is_suspend_enabled()); @@ -391,6 +406,23 @@ return res; } +VMEXPORT // temporary solution for interpreter unplug +ObjectHandle oh_allocate_local_handle() { + ObjectHandle res = oh_allocate_local_handle_internal(); + assert(res); + return res; +} + +ObjectHandle oh_allocate_local_handle_from_jni() { + ObjectHandle res = oh_allocate_local_handle_internal(); + + if (res == NULL) { + exn_raise_object(VM_Global_State::loader_env->java_lang_OutOfMemoryError); + } + return res; +} + + ObjectHandle oh_convert_to_local_handle(ManagedObject* pointer) { assert(!hythread_is_suspend_enabled()); assert(pointer);