commit 6f562e1b9078de55f8210ea885ad2f1637767612 Author: Gregory Shimansky Date: Sat Sep 23 02:19:57 2006 +0400 FIX FIX FIX diff --git a/vm/interpreter/src/interpreter.cpp b/vm/interpreter/src/interpreter.cpp index d59379b..bbb4320 100644 --- a/vm/interpreter/src/interpreter.cpp +++ b/vm/interpreter/src/interpreter.cpp @@ -1603,7 +1603,9 @@ Opcode_PUTSTATIC(StackFrame& frame) { if (interpreter_ti_notification_mode & INTERPRETER_TI_FIELD_MODIFICATION) { Method *method = frame.method; + M2N_ALLOC_MACRO; putstatic_callback(field, frame); + M2N_FREE_MACRO; } @@ -1698,7 +1700,9 @@ Opcode_GETSTATIC(StackFrame& frame) { if (interpreter_ti_notification_mode & INTERPRETER_TI_FIELD_ACCESS) { Method *method = frame.method; + M2N_ALLOC_MACRO; getstatic_callback(field, frame); + M2N_FREE_MACRO; } void *addr = field->get_address(); @@ -1770,7 +1774,9 @@ Opcode_PUTFIELD(StackFrame& frame) { if (interpreter_ti_notification_mode & INTERPRETER_TI_FIELD_MODIFICATION) { Method *method = frame.method; + M2N_ALLOC_MACRO; putfield_callback(field, frame); + M2N_FREE_MACRO; } if (field->is_final()) { @@ -1939,7 +1945,9 @@ Opcode_GETFIELD(StackFrame& frame) { if (interpreter_ti_notification_mode & INTERPRETER_TI_FIELD_ACCESS) { Method *method = frame.method; + M2N_ALLOC_MACRO; getfield_callback(field, frame); + M2N_FREE_MACRO; } CREF cref = frame.stack.pick(0).cr; @@ -2560,7 +2568,9 @@ restart: if (frame.jvmti_pop_frame == POP_FRAME_NOW) { MonitorList *ml = frame.locked_monitors; while(ml) { + M2N_ALLOC_MACRO; vm_monitor_exit_wrapper(ml->monitor); + M2N_FREE_MACRO; ml = ml->next; } M2N_FREE_MACRO; @@ -2569,8 +2579,9 @@ restart: if (!breakpoint_processed && interpreter_ti_notification_mode & INTERPRETER_TI_SINGLE_STEP_EVENT) { - breakpoint_processed = false; + M2N_ALLOC_MACRO; single_step_callback(frame); + M2N_FREE_MACRO; } if (frame.jvmti_pop_frame == POP_FRAME_NOW) { MonitorList *ml = frame.locked_monitors; @@ -2581,6 +2592,8 @@ restart: M2N_FREE_MACRO; return; } + breakpoint_processed = false; + //assert(!exn_raised()); if (get_thread_ptr()->p_exception_object_ti || exn_raised()) { frame.exc = get_current_thread_exception(); @@ -2989,7 +3002,9 @@ got_exception: Method *catch_method; jlocation catch_location; findCatchMethod(&frame.exc, &catch_method, &catch_location); + M2N_ALLOC_MACRO; jvmti_interpreter_exception_event_callback_call(frame.exc, method, loc, catch_method, catch_location); + M2N_FREE_MACRO; assert(!exn_raised()); p_TLS_vmthread->p_exception_object_ti = (volatile ManagedObject*) frame.exc; } @@ -3007,7 +3022,9 @@ got_exception: set_current_thread_exception(frame.exc); if (frame.locked_monitors) { + M2N_ALLOC_MACRO; vm_monitor_exit_wrapper(frame.locked_monitors->monitor); + M2N_FREE_MACRO; assert(!frame.locked_monitors->next); } diff --git a/vm/interpreter/src/interpreter_ti.cpp b/vm/interpreter/src/interpreter_ti.cpp index e0a3cc2..505a0da 100644 --- a/vm/interpreter/src/interpreter_ti.cpp +++ b/vm/interpreter/src/interpreter_ti.cpp @@ -354,7 +354,10 @@ uint8 Opcode_BREAKPOINT(StackFrame& frame) { Method *m = frame.method; jlocation l = frame.ip - (uint8*)m->get_byte_code_addr(); - return (uint8) (POINTER_SIZE_INT) jvmti_process_interpreter_breakpoint_event((jmethodID)m, l); + M2N_ALLOC_MACRO; + uint8 b = (uint8) (POINTER_SIZE_INT) jvmti_process_interpreter_breakpoint_event((jmethodID)m, l); + M2N_FREE_MACRO; + return b; } jbyte interpreter_ti_set_breakpoint(jmethodID method, jlocation location) { diff --git a/vm/vmcore/include/jvmti_break_intf.h b/vm/vmcore/include/jvmti_break_intf.h index 07e48a0..23648db 100755 --- a/vm/vmcore/include/jvmti_break_intf.h +++ b/vm/vmcore/include/jvmti_break_intf.h @@ -30,6 +30,16 @@ #include "lock_manager.h" #include "jvmti_dasm.h" #include "environment.h" +#define INSTRUMENTATION_BYTE_HLT 0xf4 // HLT instruction +#define INSTRUMENTATION_BYTE_CLI 0xfa // CLI instruction +#define INSTRUMENTATION_BYTE_INT3 0xcc // INT 3 instruction + +#ifdef PLATFORM_NT +#define INSTRUMENTATION_BYTE INSTRUMENTATION_BYTE_CLI +#else +#define INSTRUMENTATION_BYTE INSTRUMENTATION_BYTE_INT3 +#endif + // Callbacks are called for interfaces according to its priority typedef enum { PRIORITY_SINGLE_STEP_BREAKPOINT = 0, diff --git a/vm/vmcore/include/jvmti_dasm.h b/vm/vmcore/include/jvmti_dasm.h index 90b8d86..a3958bb 100644 --- a/vm/vmcore/include/jvmti_dasm.h +++ b/vm/vmcore/include/jvmti_dasm.h @@ -115,9 +115,15 @@ public: CondJumpType_Count = 16 }; + InstructionDisassembler(void) : + m_type(OPCODEERROR), m_target(0), m_len(0), + m_cond_jump_type(JUMP_OVERFLOW), m_argc(0) + { + } InstructionDisassembler(NativeCodePtr address) : - m_type(OPCODEERROR), m_target(0), m_len(0), m_cond_jump_type(JUMP_OVERFLOW) + m_type(OPCODEERROR), m_target(0), m_len(0), + m_cond_jump_type(JUMP_OVERFLOW), m_argc(0) { disasm(address, this); } @@ -128,6 +134,10 @@ public: m_target = d.m_target; m_len = d.m_len; m_cond_jump_type = d.m_cond_jump_type; + m_argc = d.m_argc; + m_opnds[0] = d.m_opnds[0]; + m_opnds[1] = d.m_opnds[1]; + m_opnds[2] = d.m_opnds[2]; } /** @@ -197,6 +207,11 @@ public: * @note Only valid for branch instructions like JMPs, CALLs, etc. */ NativeCodePtr get_target_address_from_context(const Registers* pregs) const; + + /** + * Returns the appropriate register value for the register operand reg + */ + const char* get_reg_value(Register reg, const Registers* pcontext) const; private: /** * @brief Performs disassembling, fills out InstructionDisassembler's diff --git a/vm/vmcore/include/jvmti_internal.h b/vm/vmcore/include/jvmti_internal.h index 2fe8d5d..caef455 100644 --- a/vm/vmcore/include/jvmti_internal.h +++ b/vm/vmcore/include/jvmti_internal.h @@ -87,6 +87,7 @@ struct jvmti_StepLocation struct Method* method; unsigned location; NativeCodePtr native_location; + bool no_event; }; struct JVMTISingleStepState @@ -347,7 +348,7 @@ jint load_agentpath(Agent *agent, const // Breakpoints internal functions jvmtiError jvmti_get_next_bytecodes_from_native(VM_thread *thread, - jvmti_StepLocation **next_step, unsigned *count, bool stack_step_up); + jvmti_StepLocation **next_step, unsigned *count, bool invoked_frame); void jvmti_set_single_step_breakpoints(DebugUtilsTI *ti, VM_thread *vm_thread, jvmti_StepLocation *locations, unsigned locations_number); diff --git a/vm/vmcore/src/class_support/Class.cpp b/vm/vmcore/src/class_support/Class.cpp index b90408a..0a19b18 100644 --- a/vm/vmcore/src/class_support/Class.cpp +++ b/vm/vmcore/src/class_support/Class.cpp @@ -182,10 +182,8 @@ Method *class_get_method_from_vt_offset( unsigned offset) { assert(vt); - unsigned index = (offset - VTABLE_OVERHEAD - - vt->clss->n_virtual_method_entries * sizeof(void*)) - / sizeof(void*); - return &(vt->clss->methods[index]); + unsigned index = (offset - VTABLE_OVERHEAD) / sizeof(void*); + return vt->clss->vtable_descriptors[index]; } // class_get_method_from_vt_offset void* Field::get_address() diff --git a/vm/vmcore/src/exception/exceptions_jit.cpp b/vm/vmcore/src/exception/exceptions_jit.cpp index aeb717f..5c1fca5 100644 --- a/vm/vmcore/src/exception/exceptions_jit.cpp +++ b/vm/vmcore/src/exception/exceptions_jit.cpp @@ -336,9 +336,11 @@ #endif // VM_STATS set_unwindable(unwindable); } + BEGIN_RAISE_AREA; jvalue ret_val = {(jlong)0}; jvmti_process_method_exit_event(reinterpret_cast(method), JNI_TRUE, ret_val); + END_RAISE_AREA; // Goto previous frame si_goto_previous(si); diff --git a/vm/vmcore/src/jvmti/jvmti_break_intf.cpp b/vm/vmcore/src/jvmti/jvmti_break_intf.cpp index f7d43e8..b16fe93 100755 --- a/vm/vmcore/src/jvmti/jvmti_break_intf.cpp +++ b/vm/vmcore/src/jvmti/jvmti_break_intf.cpp @@ -406,12 +406,6 @@ VMBreakPoints::find_other_reference(VMBr return NULL; } - -#define INSTRUMENTATION_BYTE_HLT 0xf4 // HLT instruction -#define INSTRUMENTATION_BYTE_CLI 0xfa // CLI instruction -#define INSTRUMENTATION_BYTE_INT3 0xcc // INT 3 instruction -#define INSTRUMENTATION_BYTE INSTRUMENTATION_BYTE_INT3 - void VMBreakPoints::process_native_breakpoint() { @@ -562,6 +556,29 @@ #endif //_IA32_ && PLATFORM_POSIX && INS jump(code, jump_target); break; } + case InstructionDisassembler::INDIRECT_JUMP: + { + jint instruction_length = idisasm.get_length_with_prefix(); + char *jump_target = (char *)idisasm.get_target_address_from_context(®s); + + // Create JMP to the absolute address which conditional jump + // had in the execution buffer + jump((char *)instruction_buffer, jump_target); + break; + } + case InstructionDisassembler::INDIRECT_CALL: + { + jbyte *next_instruction = interrupted_instruction + instruction_length; + char *jump_target = (char *)idisasm.get_target_address_from_context(®s); + char *code = (char *)instruction_buffer; + + // Push "return address" to the $next_instruction + code = push(code, Imm_Opnd((POINTER_SIZE_INT)next_instruction)); + + // Jump to the target address of the call instruction + jump(code, jump_target); + break; + } } unlock(); @@ -596,6 +613,7 @@ VMBreakPoints::process_interpreter_break assert(interpreter_enabled()); + lock(); VMBreakPoint* bp = find_breakpoint(method, location); assert(bp); @@ -637,6 +655,7 @@ VMBreakPoints::process_interpreter_break } clear_intfs_processed_flags(); + unlock(); return orig_byte; } diff --git a/vm/vmcore/src/jvmti/jvmti_dasm.cpp b/vm/vmcore/src/jvmti/jvmti_dasm.cpp index b22a140..0544f3e 100644 --- a/vm/vmcore/src/jvmti/jvmti_dasm.cpp +++ b/vm/vmcore/src/jvmti/jvmti_dasm.cpp @@ -37,11 +37,11 @@ static InstructionDisassembler::Register case RegName_EBP: return InstructionDisassembler::IA32_REG_EBP; case RegName_ESP: return InstructionDisassembler::IA32_REG_ESP; default: - // currently not supported and unexpected - assert(false); break; } - return (InstructionDisassembler::Register)-1; + // Some other registers (e.g. AL or XMM or whatever) - not + // supported currently + return InstructionDisassembler::IA32_REG_NONE; } static void convertOperand2Opnd( @@ -87,20 +87,20 @@ static const char* get_reg_value( #else // _IA32_ -static const char* get_reg_value( - InstructionDisassembler::Register reg, - const Registers* pcontext) +const char* InstructionDisassembler::get_reg_value( + Register reg, + const Registers* pcontext) const { switch(reg) { - case InstructionDisassembler::IA32_REG_NONE: return NULL; - case InstructionDisassembler::IA32_REG_EAX: return (const char*)pcontext->eax; - case InstructionDisassembler::IA32_REG_EBX: return (const char*)pcontext->ebx; - case InstructionDisassembler::IA32_REG_ECX: return (const char*)pcontext->ecx; - case InstructionDisassembler::IA32_REG_EDX: return (const char*)pcontext->edx; - case InstructionDisassembler::IA32_REG_ESI: return (const char*)pcontext->esi; - case InstructionDisassembler::IA32_REG_EDI: return (const char*)pcontext->edi; - case InstructionDisassembler::IA32_REG_EBP: return (const char*)pcontext->ebp; - case InstructionDisassembler::IA32_REG_ESP: return (const char*)pcontext->esp; + case IA32_REG_NONE: return NULL; + case IA32_REG_EAX: return (const char*)pcontext->eax; + case IA32_REG_EBX: return (const char*)pcontext->ebx; + case IA32_REG_ECX: return (const char*)pcontext->ecx; + case IA32_REG_EDX: return (const char*)pcontext->edx; + case IA32_REG_ESI: return (const char*)pcontext->esi; + case IA32_REG_EDI: return (const char*)pcontext->edi; + case IA32_REG_EBP: return (const char*)pcontext->ebp; + case IA32_REG_ESP: return (const char*)pcontext->esp; default: assert(false); } return NULL; diff --git a/vm/vmcore/src/jvmti/jvmti_event.cpp b/vm/vmcore/src/jvmti/jvmti_event.cpp index 24ca6da..3cb412e 100644 --- a/vm/vmcore/src/jvmti/jvmti_event.cpp +++ b/vm/vmcore/src/jvmti/jvmti_event.cpp @@ -803,8 +803,18 @@ jvmti_process_single_step_event(jmethodI JNIEnv *jni_env = (JNIEnv *)jni_native_intf; jvmtiEnv *jvmti_env = (jvmtiEnv*) ti_env; + TRACE2("jvmti.break.ss", "Calling SingleStep callback for env " << jvmti_env << ": " << + class_get_name(method_get_class((Method*)method)) << "." << + method_get_name((Method*)method) << + method_get_descriptor((Method*)method) << " :" << location); + if (NULL != ti_env->event_table.SingleStep) ti_env->event_table.SingleStep(jvmti_env, jni_env, thread, method, location); + + TRACE2("jvmti.break.ss", "Finished SingleStep callback for env " << jvmti_env << ": " << + class_get_name(method_get_class((Method*)method)) << "." << + method_get_name((Method*)method) << + method_get_descriptor((Method*)method) << " :" << location); ti_env = next_env; } } @@ -1664,6 +1674,14 @@ void jvmti_send_vm_death_event() ti_env = next_env; } + + if (ti->is_single_step_enabled()) + { + // Stop single step and remove all breakpoints if there were some + jvmtiError errorCode = ti->jvmti_single_step_stop(); + assert(JVMTI_ERROR_NONE == errorCode); + } + ti->nextPhase(JVMTI_PHASE_DEAD); } diff --git a/vm/vmcore/src/jvmti/jvmti_step.cpp b/vm/vmcore/src/jvmti/jvmti_step.cpp index 2b71b97..7019f15 100644 --- a/vm/vmcore/src/jvmti/jvmti_step.cpp +++ b/vm/vmcore/src/jvmti/jvmti_step.cpp @@ -48,12 +48,11 @@ jvmti_GetWordValue( const unsigned char return result; } // jvmti_GetWordValue -static Method * -jvmti_get_invoked_virtual_method( VM_thread* thread ) +NativeCodePtr static get_ip_for_invoke_call_ip(VM_thread* thread, + unsigned location, unsigned next_location) { ASSERT_NO_INTERPRETER; -#if _IA32_ // create stack iterator from native StackIterator* si = si_create_from_native( thread ); si_transfer_all_preserved_registers(si); @@ -63,38 +62,64 @@ #if _IA32_ assert(!si_is_native(si)); // find correct ip in java frame NativeCodePtr ip = si_get_ip(si); - // get virtual table - VTable* vtable; - JitFrameContext* jitContext = si_get_jit_context(si); - unsigned short code = (*((unsigned short*)((char*)ip))); - switch( code ) + + Method *method = si_get_method(si); + assert(method); + + CodeChunkInfo *cci = si_get_code_chunk_info(si); + JIT *jit = cci->get_jit(); + + NativeCodePtr next_ip; + OpenExeJpdaError UNREF result = jit->get_native_location_for_bc(method, + (uint16)next_location, &next_ip); + assert(result == EXE_ERROR_NONE); + assert(ip < next_ip); + + VMBreakPoints *vm_brpt = VM_Global_State::loader_env->TI->vm_brpt; + VMBreakPoint *bp = vm_brpt->find_breakpoint(ip); + + InstructionDisassembler disasm; + if (bp) + disasm = *bp->disasm; + else + disasm = ip; + + // Iterate over this bytecode instructions untill we reach an + // indirect call in this bytecode which should be the + // invikevirtual or invokeinterface call + NativeCodePtr call_ip = NULL; + do { - case 0x50ff: - vtable = (VTable*)*(jitContext->p_eax); - break; - case 0x51ff: - vtable = (VTable*)*(jitContext->p_ecx); - break; - case 0x52ff: - vtable = (VTable*)*(jitContext->p_edx); - break; - case 0x53ff: - vtable = (VTable*)*(jitContext->p_ebx); - break; - default: - vtable = NULL; + ip = (NativeCodePtr)((POINTER_SIZE_INT)ip + disasm.get_length_with_prefix()); + + // Another thread could have instrumented this location for + // prediction of invokevirtual or invokeinterface, so it is + // necessary to check that location may be instrumented + uint8 b = *((uint8 *)ip); + if (b == INSTRUMENTATION_BYTE) + { + bp = vm_brpt->find_breakpoint(ip); + assert(bp); + disasm = *bp->disasm; + } + else + disasm = ip; + + if (disasm.get_type() == InstructionDisassembler::INDIRECT_CALL) + call_ip = ip; } - assert(vtable); - si_free(si); + while (ip < next_ip); - // get method from virtual table - Method *method = class_get_method_from_vt_offset( vtable, *((char*)ip + 2) ); - return method; + assert(call_ip); -#else // for !_IA32_ + TRACE2("jvmti.break.ss", "Predicting VIRTUAL type breakpoint on address: " << call_ip); - return NULL; -#endif // _IA32_ + // ip now points to the call instruction which actually invokes a + // virtual method. We're going to set a sythetic breakpoint there + // and allow execution up until that point to get the virtual + // table address and offset inside of it to determine exactly + // which method is going to invoked in runtime. + return call_ip; } // jvmti_get_invoked_virtual_method void @@ -161,9 +186,11 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)[0].method = method; (*next_step)[0].location = location; (*next_step)[0].native_location = NULL; + (*next_step)[0].no_event = false; (*next_step)[1].method = method; (*next_step)[1].location = offset; (*next_step)[1].native_location = NULL; + (*next_step)[1].no_event = false; break; // goto instructions @@ -177,6 +204,7 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)->method = method; (*next_step)->location = offset; (*next_step)->native_location = NULL; + (*next_step)->no_event = false; break; case OPCODE_GOTO_W: /* 0xc8 + s4 */ case OPCODE_JSR_W: /* 0xc9 + s4 */ @@ -188,6 +216,7 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)->method = method; (*next_step)->location = offset; (*next_step)->native_location = NULL; + (*next_step)->no_event = false; break; // tableswitch instruction @@ -206,12 +235,14 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)[0].location = (int)bytecode_index + jvmti_GetWordValue( bytecode, location ); (*next_step)[0].native_location = NULL; + (*next_step)[0].no_event = false; location += 12; for( int index = 1; index < number; index++, location += 4 ) { (*next_step)[index].method = method; (*next_step)[index].location = (int)bytecode_index + jvmti_GetWordValue( bytecode, location ); (*next_step)[index].native_location = NULL; + (*next_step)[index].no_event = false; } } break; @@ -230,21 +261,36 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)[0].location = (int)bytecode_index + jvmti_GetWordValue( bytecode, location ); (*next_step)[0].native_location = NULL; + (*next_step)[0].no_event = false; location += 12; for( int index = 1; index < number; index++, location += 8 ) { (*next_step)[index].method = method; (*next_step)[index].location = (int) + jvmti_GetWordValue( bytecode, location ); (*next_step)[index].native_location = NULL; + (*next_step)[index].no_event = false; } } break; // athrow and invokeinterface instruction case OPCODE_ATHROW: /* 0xbf */ + assert( !is_wide ); + break; case OPCODE_INVOKEINTERFACE:/* 0xb9 + u2 + u1 + u1 */ assert( !is_wide ); - // instructions are processed in helpers + { + NativeCodePtr ip = get_ip_for_invoke_call_ip(thread, location, + location + 5); + error = _allocate(sizeof(jvmti_StepLocation), + (unsigned char**)next_step ); + assert(error == JVMTI_ERROR_NONE); + *count = 1; + (*next_step)->method = method; + (*next_step)->location = location; + (*next_step)->native_location = ip; + (*next_step)->no_event = true; + } break; // return instructions @@ -278,6 +324,7 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)->method = klass->const_pool[index].CONSTANT_ref.method; (*next_step)->location = 0; (*next_step)->native_location = NULL; + (*next_step)->no_event = false; } } break; @@ -286,15 +333,16 @@ jvmti_SingleStepLocation( VM_thread* thr case OPCODE_INVOKEVIRTUAL: /* 0xb6 + u2 */ assert( !is_wide ); { - Method *func = jvmti_get_invoked_virtual_method( thread ); - if( !method_is_native(func) ) { - *count = 1; - error = _allocate( sizeof(jvmti_StepLocation), (unsigned char**)next_step ); - assert( error == JVMTI_ERROR_NONE ); - (*next_step)->method = func; - (*next_step)->location = 0; - (*next_step)->native_location = NULL; - } + NativeCodePtr ip = get_ip_for_invoke_call_ip(thread, location, + location + 3); + error = _allocate(sizeof(jvmti_StepLocation), + (unsigned char**)next_step ); + assert(error == JVMTI_ERROR_NONE); + *count = 1; + (*next_step)->method = method; + (*next_step)->location = location; + (*next_step)->native_location = ip; + (*next_step)->no_event = true; } break; @@ -355,6 +403,7 @@ jvmti_SingleStepLocation( VM_thread* thr (*next_step)->method = method; (*next_step)->location = location; (*next_step)->native_location = NULL; + (*next_step)->no_event = false; break; // ret instruction @@ -395,6 +444,76 @@ jvmti_setup_jit_single_step(DebugUtilsTI jvmti_set_single_step_breakpoints(ti, vm_thread, locations, locations_count); } +static void jvmti_start_single_step_in_virtual_method(DebugUtilsTI *ti, VMBreakInterface* intf, + VMBreakPointRef* bp_ref) +{ + VM_thread *vm_thread = p_TLS_vmthread; + Registers *regs = &vm_thread->jvmti_saved_exception_registers; + // This is a virtual breakpoint set exactly on the call + // instruction for the virtual method. In this place it is + // possible to determine the target method in runtime + bool *virtual_flag = (bool *)bp_ref->data; + assert(*virtual_flag == true); + + InstructionDisassembler *disasm = bp_ref->brpt->disasm; + const InstructionDisassembler::Opnd& op = disasm->get_opnd(0); + Method *method; + if (op.kind == InstructionDisassembler::Kind_Mem) + { + // Invokevirtual uses indirect call from VTable. The base + // address is in the register, offset is in displacement * + // scale. This method is much faster than + VTable* vtable = (VTable*)disasm->get_reg_value(op.base, regs); + assert(vtable); + // For x86 based architectures offset cannot be longer than 32 + // bits, so unsigned is ok here + unsigned offset = (unsigned)((POINTER_SIZE_INT)disasm->get_reg_value(op.index, regs) * + op.scale + op.disp); + method = class_get_method_from_vt_offset(vtable, offset); + } + else if (op.kind == InstructionDisassembler::Kind_Reg) + { + // This is invokeinterface bytecode which uses register + // call so we need to search through all methods for this + // one to find it, no way to get vtable and offset in it + NativeCodePtr ip = disasm->get_target_address_from_context(regs); + CodeChunkInfo *cci = vm_methods->find(ip); + if (cci) + method = cci->get_method(); + else + { + // This is an uncompiled interface method. We don't + // know its address and don't know its handle. To get + // the handle we need to parse LIL stub generated in + // compile_gen_compile_me. + InstructionDisassembler stub_disasm(ip); +#ifdef VM_STATS + // In case of VM_STATS first instuction should be + // skipped because it is a stats increment + ip = (NativeCodePtr)((POINTER_SIZE_INT)ip + stub_disasm.get_length_with_prefix()); + stub_disasm = ip; +#endif + // Now IP points on mov(stub, ecx_opnd, Imm_Opnd((int32)method)); + // where method is the method handle. Need to get its + // address from instruction, it is an immd operand in mov + assert(stub_disasm.get_operands_count() == 1); + + const InstructionDisassembler::Opnd& stub_op = stub_disasm.get_opnd(0); + assert(stub_op.kind == InstructionDisassembler::Kind_Imm); + method = (Method *)stub_op.imm; + } + } + + TRACE2("jvmti.break.ss", "Removing VIRTUAL single step breakpoint: " << bp_ref->brpt->addr); + // The determined method is the one which is called by + // invokevirtual or invokeinterface bytecodes. It should be + // started to be single stepped from the beginning + intf->remove_all(); + + jvmti_StepLocation method_start = {(Method *)method, 0}; + jvmti_set_single_step_breakpoints(ti, vm_thread, &method_start, 1); +} + // Callback function for JVMTI single step processing static bool jvmti_process_jit_single_step_event(VMBreakInterface* intf, VMBreakPointRef* bp_ref) { @@ -421,8 +540,13 @@ static bool jvmti_process_jit_single_ste Method* m = (Method*)method; NativeCodePtr addr = bp->addr; assert(addr); - assert(bp_ref->data == NULL); - + + if (NULL != bp_ref->data) + { + jvmti_start_single_step_in_virtual_method(ti, intf, bp_ref); + return true; + } + hythread_t h_thread = hythread_self(); jthread j_thread = jthread_get_java_thread(h_thread); ObjectHandle hThread = oh_allocate_local_handle(); @@ -536,11 +660,23 @@ void jvmti_set_single_step_breakpoints(D << " :" << locations[iii].location << " :" << locations[iii].native_location); + void *data = NULL; + if (locations[iii].no_event) + { + bool *virtual_flag; + jvmtiError error = _allocate(sizeof(bool), + (unsigned char**)&virtual_flag); + + assert(error == JVMTI_ERROR_NONE); + *virtual_flag = true; + data = virtual_flag; + } + VMBreakPointRef* ref = ss_state->predicted_breakpoints->add((jmethodID)locations[iii].method, locations[iii].location, locations[iii].native_location, - NULL); + data); assert(ref); } } @@ -559,9 +695,10 @@ void jvmti_remove_single_step_breakpoint jvmtiError jvmti_get_next_bytecodes_from_native(VM_thread *thread, jvmti_StepLocation **next_step, unsigned *count, - bool stack_step_up) + bool invoked_frame) { ASSERT_NO_INTERPRETER; + VMBreakPoints *vm_brpt = VM_Global_State::loader_env->TI->vm_brpt; *count = 0; // create stack iterator, current stack frame should be native @@ -578,7 +715,7 @@ jvmtiError jvmti_get_next_bytecodes_from } assert(!si_is_native(si)); - if( stack_step_up ) { + if( invoked_frame ) { // get previous stack frame si_goto_previous(si); } @@ -594,19 +731,111 @@ jvmtiError jvmti_get_next_bytecodes_from assert(result == EXE_ERROR_NONE); TRACE2( "jvmti.break.ss", "SingleStep method IP: " << ip ); - // set step location structure - *count = 1; - jvmtiError error = _allocate( sizeof(jvmti_StepLocation), (unsigned char**)next_step ); - if( error != JVMTI_ERROR_NONE ) { - si_free(si); - return error; + // In case stack iterator points to invoke (in invoked_frame) + // case the IP may point to an instruction after a call, but + // still on the invoke* bytecode. It can be found out by + // iterating through instructions inside of the same + // bytecode. If we find a call in it, then we're on a correct + // bytecode, if not, we're on a tail of an invoke* + // instruction. It is necessary to move one bytecode ahead in + // this case. + if (invoked_frame) + { + // Determine if the found bytecode if of an invoke type + const unsigned char *bytecode = func->get_byte_code_addr(); + uint16 next_location = 0; + + switch (bytecode[bc]) + { + case OPCODE_INVOKEINTERFACE: /* 0xb9 + u2 + u1 + u1 */ + next_location = bc + 5; + break; + case OPCODE_INVOKESPECIAL: /* 0xb7 + u2 */ + case OPCODE_INVOKESTATIC: /* 0xb8 + u2 */ + case OPCODE_INVOKEVIRTUAL: /* 0xb6 + u2 */ + next_location = bc + 3; + break; + } + + // Yes this is an invoke type bytecode + if (next_location) + { + NativeCodePtr next_ip; + OpenExeJpdaError UNREF result = jit->get_native_location_for_bc(func, + next_location, &next_ip); + assert(result == EXE_ERROR_NONE); + assert(ip < next_ip); + + VMBreakPoint *bp = vm_brpt->find_breakpoint(ip); + + InstructionDisassembler disasm; + if (bp) + disasm = *bp->disasm; + else + disasm = ip; + + NativeCodePtr call_ip = NULL; + do + { + ip = (NativeCodePtr)((POINTER_SIZE_INT)ip + disasm.get_length_with_prefix()); + + // Another thread could have instrumented this location for + // prediction of invokevirtual or invokeinterface, so it is + // necessary to check that location may be instrumented + uint8 b = *((uint8 *)ip); + if (b == INSTRUMENTATION_BYTE) + { + bp = vm_brpt->find_breakpoint(ip); + assert(bp); + disasm = *bp->disasm; + } + else + disasm = ip; + + // Bytecode may be either invokevirtual or + // invokeinterface which generate indirect calls or + // invokestatic or invokespecial which generate + // relative calls + if (disasm.get_type() == InstructionDisassembler::INDIRECT_CALL || + disasm.get_type() == InstructionDisassembler::RELATIVE_CALL) + call_ip = ip; + } + while (ip < next_ip); + + // We've found no call instruction in this + // bytecode. This means we're standing on the tail of + // invoke. Need to shift to the next bytecode + if (NULL == call_ip) + { + TRACE2("jvmti.break.ss", "SingleStep IP shifted in prediction to: " << call_ip); + bc = next_location; + } + } + // No this is not an invoke type bytecode, so the IP + // points to a normal bytecode after invoke. No need to + // shift to the next one. + + // set step location structure + *count = 1; + jvmtiError error = _allocate( sizeof(jvmti_StepLocation), (unsigned char**)next_step ); + if( error != JVMTI_ERROR_NONE ) { + si_free(si); + return error; + } + (*next_step)->method = func; + // IP in stack iterator points to a bytecode next after the one + // which caused call of the method. So next location is the 'bc' which + // IP points to. + (*next_step)->location = bc; + (*next_step)->native_location = ip; + (*next_step)->no_event = false; + } + else + { + // Find next bytecode after the one we're currently + // standing on + jvmti_SingleStepLocation(thread, func, bc, next_step, count); } - (*next_step)->method = func; - // IP in stack iterator points to a bytecode next after the one - // which caused call of the method. So next location is the 'bc' which - // IP points to. - (*next_step)->location = bc; - (*next_step)->native_location = ip; } si_free(si); return JVMTI_ERROR_NONE; diff --git a/vm/vmcore/src/util/win/ia32/nt_exception_filter.cpp b/vm/vmcore/src/util/win/ia32/nt_exception_filter.cpp index 8630ffd..d357bef 100644 --- a/vm/vmcore/src/util/win/ia32/nt_exception_filter.cpp +++ b/vm/vmcore/src/util/win/ia32/nt_exception_filter.cpp @@ -31,6 +31,14 @@ #include "jvmti_break_intf.h" #include #include +#if INSTRUMENTATION_BYTE == INSTRUMENTATION_BYTE_INT3 +#define JVMTI_EXCEPTION_STATUS STATUS_BREAKPOINT +#elif INSTRUMENTATION_BYTE == INSTRUMENTATION_BYTE_HLT || INSTRUMENTATION_BYTE == INSTRUMENTATION_BYTE_CLI +#define JVMTI_EXCEPTION_STATUS STATUS_PRIVILEGED_INSTRUCTION +#else +#error Unknown value of INSTRUMENTATION_BYTE +#endif + void nt_to_vm_context(PCONTEXT context, Registers* regs) { regs->eax = context->Eax; @@ -297,7 +305,7 @@ LONG NTAPI vectored_exception_handler(LP code == STATUS_INTEGER_DIVIDE_BY_ZERO || code == STATUS_STACK_OVERFLOW) && vm_identify_eip((void *)context->Eip) == VM_TYPE_JAVA) || - code == STATUS_BREAKPOINT) + code == JVMTI_EXCEPTION_STATUS) { run_default_handler = false; } else if (code == STATUS_STACK_OVERFLOW) { @@ -375,7 +383,7 @@ LONG NTAPI vectored_exception_handler(LP // assertions for breakpoints which it has set in Java inside of // breakpoint handling function. Otherwise this assert should not // fail in case _CrtDbgBreak() was added somewhere in VM. - assert(!hythread_is_suspend_enabled() || code == STATUS_BREAKPOINT); + assert(!hythread_is_suspend_enabled() || code == JVMTI_EXCEPTION_STATUS); Global_Env *env = VM_Global_State::loader_env; Class *exc_clss = 0; @@ -412,7 +420,7 @@ LONG NTAPI vectored_exception_handler(LP exc_clss = env->java_lang_ArithmeticException_Class; } break; - case STATUS_BREAKPOINT: + case JVMTI_EXCEPTION_STATUS: // JVMTI breakpoint in JITted code { Registers regs;