From ecdd07874832bb4c2c4f95a478a0a4eeec5ff182 Mon Sep 17 00:00:00 2001 From: Pavel Afremov Date: Wed, 29 Aug 2007 17:22:44 +0400 Subject: [PATCH] Fix SOE support on Windows x86-64. On Windows x86-64.DRL VM can't process SOE, because one page on the stack is not enough to create exception. But on Windows x86-64 there is function SetThreadStackGuarantee, which allow increase size of guard stack. Unfortunately this function doesn't work on Windows x86-32 now. Current patch enable use of this function on x86-64 platform. Also it fix asm_helpers for exception callback functions, compile_me function checks, and si_set_callback. --- .../exclude.drlvm_smoke.windows.x86_64.jet | 2 - .../exclude.drlvm_smoke.windows.x86_64.jit | 6 +- .../exclude.drlvm_smoke.windows.x86_64.opt | 2 - .../exclude.drlvm_smoke.windows.x86_64.srv | 2 - vm/port/src/lil/em64t/pim/m2n_em64t_internal.h | 4 + vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp | 46 +++++++++------- vm/vmcore/include/exceptions.h | 6 +- vm/vmcore/src/exception/exceptions_impl.cpp | 22 +++++--- vm/vmcore/src/exception/exceptions_jit.cpp | 37 ++++++++++--- vm/vmcore/src/jit/compile.cpp | 2 - vm/vmcore/src/jit/jit_runtime_support.cpp | 8 +-- vm/vmcore/src/util/em64t/base/compile_em64t.cpp | 12 ++-- vm/vmcore/src/util/em64t/base/ini_em64t.cpp | 7 ++ .../src/util/win/em64t/exception_handlers.asm | 56 +++++++++++++++++++++-- .../win/ia32_em64t/nt_exception_filter_common.cpp | 57 ++++++++++++++++++----- 15 files changed, 194 insertions(+), 75 deletions(-) diff --git a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jet b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jet index 87a07f5..d35ee2f 100644 --- a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jet +++ b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jet @@ -9,7 +9,7 @@ exception/FinalizeStackTest.java # HARMONY-2977 io/Integers.java -# HARMONY-3010 HARMONY-4292 +# HARMONY-2972 HARMONY-3010 HARMONY-4292 stress/Stack.java # HARMONY-3917 diff --git a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jit b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jit index 64a3b32..42e2219 100644 --- a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jit +++ b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.jit @@ -6,15 +6,13 @@ gc/LOS.java gc/MultiThreads.java perf/MultiThreadsSimple.java -# HARMONY-2972 -StackTest.java -exception/FinalizeStackTest.java +# HARMONY-3019 stress/Stack.java # HARMONY-2977 io/Integers.java -# HARMONY-3010 HARMONY-4292 +# HARMONY-2972 HARMONY-3010 HARMONY-4292 stress/Stack.java # HARMONY-3917 diff --git a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.opt b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.opt index cebe289..0fc19fc 100644 --- a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.opt +++ b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.opt @@ -6,7 +6,7 @@ gc/LOS.java gc/MultiThreads.java perf/MultiThreadsSimple.java -# HARMONY-2972 +# HARMONY-2972 HARMONY-3010 HARMONY-4265 (HARMONY-4557) & HARMONY-4292 StackTest.java exception/FinalizeStackTest.java stress/Stack.java diff --git a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.srv b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.srv index ac687a3..bf81bdc 100644 --- a/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.srv +++ b/build/make/excludes/exclude.drlvm_smoke.windows.x86_64.srv @@ -6,7 +6,7 @@ gc/LOS.java gc/MultiThreads.java perf/MultiThreadsSimple.java -# HARMONY-2972 +# HARMONY-2972 HARMONY-3010 HARMONY-4265 (HARMONY-4557) & HARMONY-4292 StackTest.java exception/FinalizeStackTest.java stress/Stack.java diff --git a/vm/port/src/lil/em64t/pim/m2n_em64t_internal.h b/vm/port/src/lil/em64t/pim/m2n_em64t_internal.h index 4f2e3b3..10d9bd7 100644 --- a/vm/port/src/lil/em64t/pim/m2n_em64t_internal.h +++ b/vm/port/src/lil/em64t/pim/m2n_em64t_internal.h @@ -32,7 +32,11 @@ #include "m2n.h" #include "open/types.h" #include "encoder.h" +#ifdef _WIN64 +const unsigned m2n_sizeof_m2n_frame = 112; +#else const unsigned m2n_sizeof_m2n_frame = 96; +#endif typedef struct M2nFrame M2nFrame; diff --git a/vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp b/vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp index 57111f1..3f9ee1c 100644 --- a/vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp +++ b/vm/port/src/lil/em64t/pim/stack_iterator_em64t.cpp @@ -468,34 +468,42 @@ void si_transfer_control(StackIterator * tcs(&local_si); } +inline static uint64 unref_reg(uint64* p_reg) { + return p_reg ? *p_reg : 0; +} + void si_copy_to_registers(StackIterator * si, Registers * regs) { ASSERT_NO_INTERPRETER - + regs->rsp = si->jit_frame_context.rsp; - regs->rbp = *si->jit_frame_context.p_rbp; - regs->rip = *si->jit_frame_context.p_rip; - - regs->rbx = *si->jit_frame_context.p_rbx; - regs->r12 = *si->jit_frame_context.p_r12; - regs->r13 = *si->jit_frame_context.p_r13; - regs->r14 = *si->jit_frame_context.p_r14; - regs->r15 = *si->jit_frame_context.p_r15; - - regs->rax = *si->jit_frame_context.p_rax; - regs->rcx = *si->jit_frame_context.p_rcx; - regs->rdx = *si->jit_frame_context.p_rdx; - regs->rsi = *si->jit_frame_context.p_rsi; - regs->rdi = *si->jit_frame_context.p_rdi; - regs->r8 = *si->jit_frame_context.p_r8; - regs->r9 = *si->jit_frame_context.p_r9; - regs->r10 = *si->jit_frame_context.p_r10; - regs->r11 = *si->jit_frame_context.p_r11; + regs->rbp = unref_reg(si->jit_frame_context.p_rbp); + regs->rip = unref_reg(si->jit_frame_context.p_rip); + + regs->rbx = unref_reg(si->jit_frame_context.p_rbx); + regs->r12 = unref_reg(si->jit_frame_context.p_r12); + regs->r13 = unref_reg(si->jit_frame_context.p_r13); + regs->r14 = unref_reg(si->jit_frame_context.p_r14); + regs->r15 = unref_reg(si->jit_frame_context.p_r15); + + regs->rax = unref_reg(si->jit_frame_context.p_rax); + regs->rcx = unref_reg(si->jit_frame_context.p_rcx); + regs->rdx = unref_reg(si->jit_frame_context.p_rdx); + regs->rsi = unref_reg(si->jit_frame_context.p_rsi); + regs->rdi = unref_reg(si->jit_frame_context.p_rdi); + regs->r8 = unref_reg(si->jit_frame_context.p_r8); + regs->r9 = unref_reg(si->jit_frame_context.p_r9); + regs->r10 = unref_reg(si->jit_frame_context.p_r10); + regs->r11 = unref_reg(si->jit_frame_context.p_r11); regs->eflags = si->jit_frame_context.eflags; } void si_set_callback(StackIterator* si, NativeCodePtr* callback) { +#ifdef WIN32 + const static uint64 red_zone_size = 0x00; +#else const static uint64 red_zone_size = 0x80; +#endif si->jit_frame_context.rsp = si->jit_frame_context.rsp - red_zone_size - sizeof(void*); *((uint64*) si->jit_frame_context.rsp) = *(si->jit_frame_context.p_rip); si->jit_frame_context.p_rip = ((uint64*)callback); diff --git a/vm/vmcore/include/exceptions.h b/vm/vmcore/include/exceptions.h index 4aebcd7..b436431 100644 --- a/vm/vmcore/include/exceptions.h +++ b/vm/vmcore/include/exceptions.h @@ -240,11 +240,12 @@ assert(!is_unwindable()); #define BEGIN_RAISE_AREA \ { \ +if (is_unwindable()) exn_rethrow_if_pending();\ bool unwindable = set_unwindable(false); #define END_RAISE_AREA \ -set_unwindable(unwindable);\ if (unwindable) exn_rethrow_if_pending();\ +set_unwindable(unwindable);\ } @@ -272,9 +273,6 @@ void exn_rethrow_if_pending(); void set_guard_stack(); void remove_guard_stack(); void init_stack_info(); -#ifndef WIN32 -void remove_guard_stack(); -#endif VMEXPORT size_t get_available_stack_size(); VMEXPORT bool check_available_stack_size(size_t required_size); VMEXPORT size_t get_default_stack_size(); diff --git a/vm/vmcore/src/exception/exceptions_impl.cpp b/vm/vmcore/src/exception/exceptions_impl.cpp index 1ae89aa..d173a64 100644 --- a/vm/vmcore/src/exception/exceptions_impl.cpp +++ b/vm/vmcore/src/exception/exceptions_impl.cpp @@ -239,6 +239,9 @@ jthrowable create_exception(Class* exc_c if (!man_obj) { exn_raise_object(VM_Global_State::loader_env->java_lang_OutOfMemoryError); + if (suspended_enabled) { + tmn_suspend_enable(); + } return NULL; } @@ -247,6 +250,9 @@ jthrowable create_exception(Class* exc_c args[0].l = exc_object; if (exn_raised()) { //if RuntimeException or Error + if (suspended_enabled) { + tmn_suspend_enable(); + } return NULL; } @@ -303,6 +309,7 @@ jthrowable create_exception(Exception* e void exn_throw_object_internal(jthrowable exc_object) { + BEGIN_RAISE_AREA; // functions can be invoked in suspend disabled and enabled state if (hythread_is_suspend_enabled()) { tmn_suspend_disable(); @@ -310,11 +317,13 @@ void exn_throw_object_internal(jthrowabl assert(!hythread_is_suspend_enabled()); TRACE2("exn", ("%s", "exn_throw_object(), delegating to exn_throw_for_JIT()")); exn_throw_for_JIT(exc_object->object, NULL, NULL, NULL, NULL); + END_RAISE_AREA; } void exn_throw_by_class_internal(Class* exc_class, const char* exc_message, jthrowable exc_cause) { + BEGIN_RAISE_AREA; // functions can be invoked in suspend disabled and enabled state if (!hythread_is_suspend_enabled()) { // exception is throwing, so suspend can be enabled safely @@ -322,7 +331,7 @@ void exn_throw_by_class_internal(Class* } assert(hythread_is_suspend_enabled()); #ifdef VM_LAZY_EXCEPTION - set_unwindable(false); + //set_unwindable(false); jvalue args[3]; Method* exc_init = prepare_exc_creating( @@ -333,11 +342,11 @@ #ifdef VM_LAZY_EXCEPTION "exn_throw_by_class(),create exception and delegating to exn_throw_for_JIT()")); jthrowable exc_object = exn_create(exc_class, exc_message, exc_cause); exn_rethrow_if_pending(); - set_unwindable(true); + //set_unwindable(true); exn_throw_object_internal(exc_object); } else { TRACE2("exn", ("%s", "exn_throw_by_class(), lazy delegating to exn_throw_for_JIT()")); - set_unwindable(true); + //set_unwindable(true); // no return, so enable isn't required tmn_suspend_disable(); @@ -345,26 +354,24 @@ #ifdef VM_LAZY_EXCEPTION //tmn_suspend_enable(); } #else - set_unwindable(false); jthrowable exc_object = exn_create(exc_class, exc_message, exc_cause); - set_unwindable(true); exn_rethrow_if_pending(); exn_throw_object_internal(exc_object); #endif + END_RAISE_AREA; } void exn_throw_by_name_internal(const char* exc_name, const char* exc_message, jthrowable exc_cause) { + BEGIN_RAISE_AREA; // functions can be invoked in suspend disabled and enabled state if (!hythread_is_suspend_enabled()) { // exception is throwing, so suspend can be enabled safely tmn_suspend_enable(); } assert(hythread_is_suspend_enabled()); - set_unwindable(false); Class *exc_class = get_exc_class(exc_name); - set_unwindable(true); if (exc_class == NULL) { assert(exn_raised()); @@ -372,6 +379,7 @@ void exn_throw_by_name_internal(const ch return; // unreachable code } exn_throw_by_class_internal(exc_class, exc_message, exc_cause); + END_RAISE_AREA; } void exn_raise_object_internal(jthrowable exc_object) diff --git a/vm/vmcore/src/exception/exceptions_jit.cpp b/vm/vmcore/src/exception/exceptions_jit.cpp index c21fb8f..34e9c36 100644 --- a/vm/vmcore/src/exception/exceptions_jit.cpp +++ b/vm/vmcore/src/exception/exceptions_jit.cpp @@ -197,7 +197,9 @@ static void exn_propagate_exception( jvalue* vm_exn_constr_args) { assert(!hythread_is_suspend_enabled()); + ASSERT_RAISE_AREA; ASSERT_NO_INTERPRETER; + assert(*exn_obj || exn_class); // Save the throw context @@ -347,6 +349,8 @@ #endif // VM_STATS // Create exception if necessary if (!*exn_obj && !handler->is_exc_obj_dead()) { + assert(!exn_raised()); + *exn_obj = create_lazy_exception(exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); } @@ -358,7 +362,6 @@ #endif // VM_STATS jit_exn_constr_args, vm_exn_constr_args); } - BEGIN_RAISE_AREA; // Reload exception object pointer because it could have // moved while calling JVMTI callback @@ -366,8 +369,6 @@ #endif // VM_STATS interrupted_method_jit, interrupted_method, interrupted_method_location, jit, method, handler->get_handler_ip()); - - END_RAISE_AREA; } TRACE2("exn", ("setting return pointer to %d", exn_obj)); @@ -381,11 +382,9 @@ #endif // VM_STATS // No appropriate handler found, undo synchronization vm_monitor_exit_synchronized_method(si); - BEGIN_RAISE_AREA; jvalue ret_val = {(jlong)0}; jvmti_process_method_exception_exit_event( reinterpret_cast(method), JNI_TRUE, ret_val, si); - END_RAISE_AREA; // Goto previous frame si_goto_previous(si); @@ -409,6 +408,9 @@ #endif // VM_STATS // Reload exception object pointer because it could have // moved while calling JVMTI callback + if (exn_raised()) { + return; + } *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj, interrupted_method_jit, interrupted_method, interrupted_method_location, @@ -453,7 +455,13 @@ void exn_throw_for_JIT(ManagedObject* ex * !!!! NO TRACE2, INFO, WARN, ECHO, ASSERT, ... */ assert(!hythread_is_suspend_enabled()); + + if(exn_raised()) { + return; + } + ASSERT_NO_INTERPRETER + ASSERT_RAISE_AREA; if ((exn_obj == NULL) && (exn_class == NULL)) { exn_class = VM_Global_State::loader_env->java_lang_NullPointerException_Class; @@ -461,10 +469,16 @@ void exn_throw_for_JIT(ManagedObject* ex ManagedObject* local_exn_obj = exn_obj; StackIterator* si = si_create_from_native(); + if (exn_raised()) { + return; + } + #ifndef _IPF_ assert(is_gc_frame_before_m2n_frame()); #endif // _IPF_ + assert(!exn_raised()); + if (si_is_past_end(si)) { //FIXME LAZY EXCEPTION (2006.05.12) // should be replaced by lazy version @@ -473,12 +487,11 @@ #endif // _IPF_ } si_transfer_all_preserved_registers(si); + assert(!exn_raised()); DebugUtilsTI* ti = VM_Global_State::loader_env->TI; - exn_propagate_exception(si, &local_exn_obj, exn_class, exn_constr, jit_exn_constr_args, vm_exn_constr_args); - M2nFrame* m2nFrame = m2n_get_last_frame(); ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(m2nFrame); @@ -511,7 +524,9 @@ void exn_athrow(ManagedObject* exn_obj, Method_Handle exn_constr, uint8* exn_constr_args) { assert(!hythread_is_suspend_enabled()); + BEGIN_RAISE_AREA; exn_throw_for_JIT(exn_obj, exn_class, exn_constr, exn_constr_args, NULL); + END_RAISE_AREA; } @@ -525,18 +540,24 @@ void exn_athrow_regs(Registers * regs, C { assert(!hythread_is_suspend_enabled()); assert(exn_class); + #ifndef _IPF_ M2nFrame *m2nf; + StackIterator *si; if (java_code) { m2nf = m2n_push_suspended_frame(regs); } - StackIterator *si = si_create_from_native(); + BEGIN_RAISE_AREA; + + si = si_create_from_native(); ManagedObject *local_exn_obj = NULL; exn_propagate_exception(si, &local_exn_obj, exn_class, NULL, NULL, NULL); si_copy_to_registers(si, regs); + END_RAISE_AREA; + m2n_set_last_frame(si_get_m2n(si)); si_free(si); diff --git a/vm/vmcore/src/jit/compile.cpp b/vm/vmcore/src/jit/compile.cpp index 2312154..a64abd7 100644 --- a/vm/vmcore/src/jit/compile.cpp +++ b/vm/vmcore/src/jit/compile.cpp @@ -747,7 +747,7 @@ static JIT_Result compile_do_compilation } else if (method->get_state()==Method::ST_NotCompiled && exn_raised()) { method->unlock(); return JIT_FAILURE; - } else if(!check_available_stack_size(0x4000)) { + } else if(!check_available_stack_size(0x8000)) { method->unlock(); return JIT_FAILURE; } diff --git a/vm/vmcore/src/jit/jit_runtime_support.cpp b/vm/vmcore/src/jit/jit_runtime_support.cpp index 48d03d5..a92efc9 100644 --- a/vm/vmcore/src/jit/jit_runtime_support.cpp +++ b/vm/vmcore/src/jit/jit_runtime_support.cpp @@ -1606,7 +1606,7 @@ static NativeCodePtr rth_get_lil_gc_safe "call %1i;" "pop_m2n;" "ret;", - (POINTER_SIZE_INT)(FRAME_POPABLE | FRAME_SAFE_POINT), + (POINTER_SIZE_INT)(FRAME_NON_UNWINDABLE | FRAME_POPABLE | FRAME_SAFE_POINT), hythread_safe_point_ptr); assert(cs && lil_is_valid(cs)); addr = LilCodeGenerator::get_platform()->compile(cs); @@ -1808,7 +1808,7 @@ static NativeCodePtr rth_get_lil_stub_wi assert(cs); } - cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)FRAME_POPABLE); + cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)(FRAME_POPABLE)); assert(cs); cs = lil_parse_onto_end(cs, in2out); assert(cs); @@ -1848,7 +1848,7 @@ static NativeCodePtr rth_get_lil_stub_wi assert(cs); } - cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)FRAME_POPABLE); + cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)(FRAME_POPABLE)); assert(cs); cs = lil_parse_onto_end(cs, in2out); assert(cs); @@ -1887,7 +1887,7 @@ static NativeCodePtr rth_get_lil_stub_wi assert(cs); } - cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)FRAME_POPABLE); + cs = lil_parse_onto_end(cs, "push_m2n 0, %0i;", (POINTER_SIZE_INT)(FRAME_POPABLE)); assert(cs); cs = lil_parse_onto_end(cs, in2out); assert(cs); diff --git a/vm/vmcore/src/util/em64t/base/compile_em64t.cpp b/vm/vmcore/src/util/em64t/base/compile_em64t.cpp index bdbc3ff..0c2d63a 100644 --- a/vm/vmcore/src/util/em64t/base/compile_em64t.cpp +++ b/vm/vmcore/src/util/em64t/base/compile_em64t.cpp @@ -146,16 +146,12 @@ #endif // _WIN64 // (represented by heap_base) to an unmanaged one (NULL/0). Uses %rdi. char * gen_convert_managed_to_unmanaged_null_em64t(char * ss, const R_Opnd & input_param1) { - if (&input_param1 != &rdi_opnd) { - ss = mov(ss, rdi_opnd, input_param1); - } - if (VM_Global_State::loader_env->compress_references) { - ss = mov(ss, rcx_opnd, Imm_Opnd(size_64, (int64)VM_Global_State::loader_env->heap_base)); - ss = alu(ss, cmp_opc, rdi_opnd, rcx_opnd); + ss = mov(ss, r11_opnd, Imm_Opnd(size_64, (int64)VM_Global_State::loader_env->heap_base)); + ss = alu(ss, cmp_opc, input_param1, r11_opnd, size_64); ss = branch8(ss, Condition_NE, Imm_Opnd(size_8, 0)); // not null, branch around the mov 0 char *backpatch_address__not_managed_null = ((char *)ss) - 1; - ss = mov(ss, rdi_opnd, Imm_Opnd(0)); + ss = mov(ss, input_param1, Imm_Opnd(0)); POINTER_SIZE_SINT offset = (POINTER_SIZE_SINT)ss - (POINTER_SIZE_SINT)backpatch_address__not_managed_null - 1; *backpatch_address__not_managed_null = (char)offset; } @@ -299,6 +295,8 @@ #endif } NativeCodePtr compile_gen_compile_me(Method_Handle method) { + ASSERT_RAISE_AREA; + int STUB_SIZE = 64; #ifdef VM_STATS ++VM_Statistics::get_vm_stats().num_compileme_generated; diff --git a/vm/vmcore/src/util/em64t/base/ini_em64t.cpp b/vm/vmcore/src/util/em64t/base/ini_em64t.cpp index 09ab4dd..adec600 100644 --- a/vm/vmcore/src/util/em64t/base/ini_em64t.cpp +++ b/vm/vmcore/src/util/em64t/base/ini_em64t.cpp @@ -29,6 +29,7 @@ #include "open/types.h" #include "open/em.h" #include "environment.h" +#include "exceptions.h" #include "Class.h" #include "object_handles.h" #include "nogc.h" @@ -111,13 +112,16 @@ #endif stub = push(stub, rbp_opnd); stub = mov(stub, rbp_opnd, rsp_opnd); + // align stack pointer if required (rsp % 16 == 0) + stub = alu(stub, and_opc, rsp_opnd, Imm_Opnd(0xfffffff0)); + // 1) move stacked arguments in reverse (right-to-left) order stub = mov(stub, rcx_opnd, M_Base_Opnd(rbp_reg, STACK_NARGS_OFFSET)); stub = alu(stub, or_opc, rcx_opnd, rcx_opnd); stub = branch8(stub, Condition_Z, Imm_Opnd(size_8, 0)); labels.add_patch_to_label(MOVE_STACK_ARGS_END, stub - 1, LPT_Rel8); - // align stack if required (rsp % 16 == 0) + // align stack arguments if required (rsp % 16 == 0) stub = test(stub, rcx_opnd, Imm_Opnd(size_32, 1)); stub = branch8(stub, Condition_Z, Imm_Opnd(size_8, 0)); labels.add_patch_to_label(COMPUTE_ADDRESS, stub - 1, LPT_Rel8); @@ -202,6 +206,7 @@ void JIT_execute_method_default(JIT_Hand jvalue * result, jvalue * args) { assert(!hythread_is_suspend_enabled()); + ASSERT_RAISE_AREA; static const invoke_managed_func_int_t invoke_managed_func = (invoke_managed_func_int_t) gen_invoke_managed_func(); diff --git a/vm/vmcore/src/util/win/em64t/exception_handlers.asm b/vm/vmcore/src/util/win/em64t/exception_handlers.asm index 6a22223..292267f 100644 --- a/vm/vmcore/src/util/win/em64t/exception_handlers.asm +++ b/vm/vmcore/src/util/win/em64t/exception_handlers.asm @@ -25,7 +25,7 @@ vectored_exception_handler PROC cld sub rsp, 32 ; allocate stack for 4 registers call vectored_exception_handler_internal - add esp, 32 + add rsp, 32 popfq ret @@ -44,9 +44,25 @@ asm_c_exception_handler PROC pushfq cld + push rax + push rbx + push rcx + push rdx + push r8 + push r9 + push r10 + push r11 sub rsp, 32 ; allocate stack for 4 registers call c_exception_handler - add esp, 32 + add rsp, 32 + pop r11 + pop r10 + pop r9 + pop r8 + pop rdx + pop rcx + pop rbx + pop rax popfq ret @@ -64,9 +80,25 @@ asm_exception_catch_callback PROC pushfq cld + push rax + push rbx + push rcx + push rdx + push r8 + push r9 + push r10 + push r11 sub rsp, 32 ; allocate stack for 4 registers call exception_catch_callback_wrapper - add esp, 32 + add rsp, 32 + pop r11 + pop r10 + pop r9 + pop r8 + pop rdx + pop rcx + pop rbx + pop rax popfq ret @@ -84,9 +116,25 @@ asm_jvmti_exception_catch_callback PROC pushfq cld + push rax + push rbx + push rcx + push rdx + push r8 + push r9 + push r10 + push r11 sub rsp, 32 ; allocate stack for 4 registers call jvmti_exception_catch_callback_wrapper - add esp, 32 + add rsp, 32 + pop r11 + pop r10 + pop r9 + pop r8 + pop rdx + pop rcx + pop rbx + pop rax popfq ret diff --git a/vm/vmcore/src/util/win/ia32_em64t/nt_exception_filter_common.cpp b/vm/vmcore/src/util/win/ia32_em64t/nt_exception_filter_common.cpp index f93c921..e0a30c0 100644 --- a/vm/vmcore/src/util/win/ia32_em64t/nt_exception_filter_common.cpp +++ b/vm/vmcore/src/util/win/ia32_em64t/nt_exception_filter_common.cpp @@ -121,8 +121,13 @@ inline size_t find_guard_page_size() { } inline size_t find_guard_stack_size() { - // guaerded stack size on windows can be equals one page size only :( - return find_guard_page_size(); +# ifdef _EM64T_ + //this code in future should be used on both platforms x86-32 and x86-64 + return 64*1024; +# else + // guaerded stack size on windows 32 can be equals one page size only :( + return find_guard_page_size(); +# endif } static size_t common_guard_stack_size; @@ -150,13 +155,31 @@ void init_stack_info() { p_TLS_vmthread->stack_size = hythread_get_thread_stacksize(hythread_self()); common_guard_stack_size = find_guard_stack_size(); common_guard_page_size = find_guard_page_size(); + + + //this code in future should be used on both platforms x86-32 and x86-64 +# ifdef _EM64T_ + ULONG guard_stack_size_param = common_guard_stack_size; + + if (!SetThreadStackGuarantee(&guard_stack_size_param)) { + // should be successful always + assert(0); + } +# endif } void set_guard_stack() { void* stack_addr = get_stack_addr(); size_t stack_size = get_stack_size(); size_t page_size = get_guard_page_size(); - assert(((size_t)(&stack_addr)) > ((size_t)((char*)stack_addr - stack_size + 3 * page_size))); + size_t guard_stack_size = get_guard_stack_size(); + + //assert(((size_t)(&stack_addr)) > ((size_t)((char*)stack_addr - stack_size + 2 * page_size + guard_stack_size))); + if (((size_t)(&stack_addr)) < ((size_t)((char*)stack_addr - stack_size + 2 * page_size + guard_stack_size))) { + Global_Env *env = VM_Global_State::loader_env; + exn_raise_by_class(env->java_lang_StackOverflowError_Class); + return; + } if (!VirtualFree((char*)stack_addr - stack_size + page_size, page_size, MEM_DECOMMIT)) { @@ -164,14 +187,11 @@ void set_guard_stack() { assert(0); } - DWORD oldProtect; - - if (!VirtualProtect((char*)stack_addr - stack_size + page_size + page_size, - page_size, PAGE_GUARD | PAGE_READWRITE, &oldProtect)) { + if (!VirtualAlloc( (char*)stack_addr - stack_size + page_size + guard_stack_size, + page_size, MEM_COMMIT, PAGE_GUARD | PAGE_READWRITE)) { // should be successful always assert(0); } - p_TLS_vmthread->restore_guard_page = false; } @@ -179,12 +199,13 @@ void remove_guard_stack() { void* stack_addr = get_stack_addr(); size_t stack_size = get_stack_size(); size_t page_size = get_guard_page_size(); + size_t guard_stack_size = get_guard_stack_size(); DWORD oldProtect; assert(((size_t)(&stack_addr)) > ((size_t)((char*)stack_addr - stack_size + 3 * page_size))); p_TLS_vmthread->restore_guard_page = true; - if (!VirtualProtect((char*)stack_addr - stack_size + page_size + page_size, + if (!VirtualProtect((char*)stack_addr - stack_size + page_size + guard_stack_size, page_size, PAGE_READWRITE, &oldProtect)) { // should be successful always assert(0); @@ -194,9 +215,14 @@ void remove_guard_stack() { size_t get_available_stack_size() { char* stack_addr = (char*) get_stack_addr(); size_t used_stack_size = ((size_t)stack_addr) - ((size_t)(&stack_addr)); - int available_stack_size = - get_stack_size() - used_stack_size + int available_stack_size; + + if (!p_TLS_vmthread->restore_guard_page) { + available_stack_size = get_stack_size() - used_stack_size - 2 * get_guard_page_size() - get_guard_stack_size(); + } else { + available_stack_size = get_stack_size() - used_stack_size - get_guard_page_size(); + } if (available_stack_size > 0) { return (size_t) available_stack_size; @@ -204,10 +230,12 @@ size_t get_available_stack_size() { return 0; } } + size_t get_default_stack_size() { size_t default_stack_size = get_stack_size(); return default_stack_size; } + bool check_available_stack_size(size_t required_size) { size_t available_stack_size = get_available_stack_size(); if (available_stack_size < required_size) { @@ -223,7 +251,7 @@ bool check_available_stack_size(size_t r } size_t get_restore_stack_size() { - return 0x0100; + return 0x0200; } bool check_stack_size_enough_for_exception_catch(void* sp) { @@ -434,8 +462,11 @@ void __cdecl c_exception_handler(Class* M2nFrame* prev_m2n = m2n_get_last_frame(); M2nFrame* m2n = NULL; - if (in_java) + if (in_java) { m2n = m2n_push_suspended_frame(®s); + } else { + prev_m2n = m2n_get_previous_frame(prev_m2n); + } TRACE2("signals", ("should throw exception %p at IP=%p, SP=%p", exn_class, regs.get_ip(), regs_get_sp(®s))); -- 1.4.1