diff --git a/vm/jitrino/src/jet/cg.cpp b/vm/jitrino/src/jet/cg.cpp index 7857aac..cf274e6 100644 --- a/vm/jitrino/src/jet/cg.cpp +++ b/vm/jitrino/src/jet/cg.cpp @@ -670,6 +670,7 @@ void CodeGen::gen_call_vm_restore(bool e } // 2. vpark(); + gen_gc_stack(-1, true); // 3. va_list valist; va_start(valist, idx); diff --git a/vm/jitrino/src/jet/cg.h b/vm/jitrino/src/jet/cg.h index 0ed99d9..09c5fe6 100644 --- a/vm/jitrino/src/jet/cg.h +++ b/vm/jitrino/src/jet/cg.h @@ -181,6 +181,35 @@ public: void do_field_op(JavaByteCodes op, jtype jt, Field_Handle fld); /** + * @brief Generates modification watchpoints if VM need it. + * + * @param jt - field type. + * @param fld - field handle. + */ + void gen_modification_watchpoint(JavaByteCodes opcode, jtype jt, Field_Handle fld); + + /** + * @brief Generates access watchpoints if VM need it. + * + * @param jt - field type. + * @param fld - field handle. + */ + void CodeGen::gen_access_watchpoint(JavaByteCodes opcode, jtype jt, Field_Handle fld); + + /** + * @brief Restore all scratch registers and operand stack state + * + * @param saveBB - pointer to operand stack state. + */ + void pop_all_state(BBState* saveBB); + /** + * @brief Save all scratch registers and operand stack state + * + * @param saveBB - pointer to operand stack state. + */ + void push_all_state(BBState* saveBB); + + /** * @brief Generates code for INVOKE instructions. */ void gen_invoke(JavaByteCodes opcod, Method_Handle meth, diff --git a/vm/jitrino/src/jet/cg_fld_arr.cpp b/vm/jitrino/src/jet/cg_fld_arr.cpp index aeef9ea..e5d979b 100644 --- a/vm/jitrino/src/jet/cg_fld_arr.cpp +++ b/vm/jitrino/src/jet/cg_fld_arr.cpp @@ -232,6 +232,10 @@ void CodeGen::do_field_op(JavaByteCodes gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0, m_klass, jinst.op0, jinst.opcode); } + + if (!get && compilation_params.exe_notify_field_modification) { + gen_modification_watchpoint(opcode, jt, fld); + } Opnd where; if (field_op) { @@ -253,47 +257,12 @@ void CodeGen::do_field_op(JavaByteCodes // compilation, without access to g_refs_squeeze in runtime. assert(is_ia32() || g_refs_squeeze); - if (get) { - if (compilation_params.exe_notify_field_access) { - // Check whether VM need access notifications - - char* fld_tr_add; - char fld_tr_mask; - field_get_track_access_flag(fld, &fld_tr_add, &fld_tr_mask); - - Val fld_track_mask((int)fld_tr_mask); - - AR ar = valloc(jobj); - movp(ar, (void*)fld_tr_add); - Opnd fld_track_opnd(i32, ar, 0); - rlock(fld_track_opnd); - - alu(alu_test, fld_track_opnd, fld_track_mask.as_opnd()); - runlock(fld_track_opnd); - - unsigned br_off = br(z, 0, 0, taken); - - //JVMTI helper takes field handle, method handle, byte code location, pointer - //to reference for fields or NULL for statics - - const CallSig cs_ti_faccess(CCONV_HELPERS, jobj, jobj, i64, jobj); - rlock(cs_ti_faccess); - Val vlocation((jlong)m_pc); - Val vfield(jobj, fld); - Val vmeth(jobj, m_method); - Val vobject = Val(jobj, NULL); - - if (field_op) { - vobject = vstack(0); - } - - gen_args(cs_ti_faccess, 0, &vfield, &vmeth, &vlocation, &vobject); - gen_call_vm(cs_ti_faccess, rt_helper_ti_field_access, cs_ti_faccess.count()); - runlock(cs_ti_faccess); - - patch(br_off, ip()); - }// end if (compilation_params.exe_notify_field_access) + if (get && compilation_params.exe_notify_field_access) { + gen_access_watchpoint(opcode, jt, fld); + } + + if (get) { if (field_op) { // pop out ref @@ -343,69 +312,6 @@ void CodeGen::do_field_op(JavaByteCodes vunref(jt); - if (compilation_params.exe_notify_field_modification) { - // Check whether VM need access notifications - - char* fld_tr_add; - char fld_tr_mask; - field_get_track_modification_flag(fld, &fld_tr_add, &fld_tr_mask); - - Val fld_track_mask((int)fld_tr_mask); - - AR ar = valloc(jobj); - movp(ar, (void*)fld_tr_add); - Opnd fld_track_opnd(i32, ar, 0); - rlock(fld_track_opnd); - - alu(alu_test, fld_track_opnd, fld_track_mask.as_opnd()); - runlock(fld_track_opnd); - - unsigned br_off = br(z, 0, 0, taken); - - //JVMTI helper takes field handle, method handle, byte code location, pointer - //to reference for fields or NULL for statics, pointer to field value - - Val fieldVal; - Val fieldValPtr = Val(jobj, valloc(jobj)); - rlock(fieldValPtr); - int st_depth = field_op ? 0 :-1; - - if (jt != jvoid) { - // Make sure the top item is on the memory - vswap(st_depth + 1); - if (is_big(jt)) { - vswap(st_depth + 2); - } - const Val& s = vstack(st_depth + 1); - assert(s.is_mem()); - lea(fieldValPtr.as_opnd(), s.as_opnd()); - } - else { - Opnd stackTop(jobj, m_base, voff(m_stack.unused())); - lea(fieldValPtr.as_opnd(), stackTop); - } - runlock(fieldValPtr); - - const CallSig cs_ti_fmodif(CCONV_HELPERS, jobj, jobj, i64, jobj, jobj); - rlock(cs_ti_fmodif); - Val vlocation((jlong)m_pc); - - Val vfield(jobj, fld); - Val vmeth(jobj, m_method); - Val vobject = Val(jobj, NULL); - - if (field_op) { - vobject = vstack(st_depth); - } - - gen_args(cs_ti_fmodif, 0, &vfield, &vmeth, &vlocation, &vobject, &fieldValPtr); - gen_call_vm(cs_ti_fmodif, rt_helper_ti_field_modification, cs_ti_fmodif.count()); - runlock(cs_ti_fmodif); - - patch(br_off, ip()); - }// end if (compilation_params.exe_notify_field_modification) - - if (!is_ia32() && g_refs_squeeze && jt == jobj && vis_imm(0)) { const Val& s = m_jframe->dip(0); unsigned ref = (unsigned)(int_ptr)((const char*)s.pval() - OBJ_BASE); diff --git a/vm/jitrino/src/jet/cg_instr.cpp b/vm/jitrino/src/jet/cg_instr.cpp index c4cf2fb..df5e20d 100644 --- a/vm/jitrino/src/jet/cg_instr.cpp +++ b/vm/jitrino/src/jet/cg_instr.cpp @@ -56,7 +56,6 @@ void CodeGen::gen_gc_safe_point() } return; } - m_bbstate->seen_gcpt = true; // On Windows we could use a bit, but tricky way - we know about VM // internals and we know how Windows manages TIB, and thus we can get a @@ -74,6 +73,227 @@ void CodeGen::gen_gc_safe_point() patch(br_off, ip()); } +void CodeGen::gen_modification_watchpoint(JavaByteCodes opcode, jtype jt, Field_Handle fld) { + + unsigned ref_depth = is_wide(jt) ? 2 : 1; + bool field_op = (opcode == OPCODE_PUTFIELD) ? true : false; + + // Check whether VM need modification notifications + + char* fld_tr_add; + char fld_tr_mask; + field_get_track_modification_flag(fld, &fld_tr_add, &fld_tr_mask); + + Val fld_track_mask((int)fld_tr_mask); + + AR fld_trackAr = valloc(jobj); + movp(fld_trackAr, (void*)fld_tr_add); + Opnd fld_track_opnd(i32, fld_trackAr, 0); + //mov(fld_trackAr, Opnd(0xFFFFFFFF)); // Emulation to check access flag enabled + //Opnd fld_track_opnd(fld_trackAr); + + rlock(fld_track_opnd); + + alu(alu_test, fld_track_opnd, fld_track_mask.as_opnd()); + runlock(fld_track_opnd); + + unsigned br_off = br(z, 0, 0, taken); + + // Store all scratch registers and operand stack state + BBState saveBB; + push_all_state(&saveBB); + + //JVMTI helper takes field handle, method handle, byte code location, pointer + //to reference for fields or NULL for statics, pointer to field value + + AR fieldValBaseAr = valloc(jobj); + Val fieldValPtr = Val(jobj, fieldValBaseAr); + rlock(fieldValPtr); + + + if (jt != jvoid) { + // Make sure the value item is on the memory + vswap(0); + if (is_big(jt)) { + vswap(1); + } + const Val& s = vstack(0); + assert(s.is_mem()); + lea(fieldValPtr.as_opnd(), s.as_opnd()); + } else { + Opnd stackTop(jobj, m_base, voff(m_stack.unused())); + lea(fieldValPtr.as_opnd(), stackTop); + } + runlock(fieldValPtr); + +#ifndef _EM64T_ + // Workaround since do_mov do not put jlong on stack in gen_args on ia32 + const CallSig cs_ti_fmodif(CCONV_HELPERS, jobj, jobj, i32, i32, jobj, jobj); + Val vlocation((jlong)m_pc); + Val vlocationHi((jlong)0); +#else + const CallSig cs_ti_fmodif(CCONV_HELPERS, jobj, jobj, i64, jobj, jobj); + Val vlocation((jlong)m_pc); +#endif + + Val vfield(jobj, fld); + Val vmeth(jobj, m_method); + Val vobject = Val(jobj, NULL); + + if (field_op) { + vobject = vstack(ref_depth); + } +#ifndef _EM64T_ + // Workaround since do_mov do not put jlong on stack in gen_args on ia32 + gen_args(cs_ti_fmodif, 0, &vfield, &vmeth, &vlocationHi, &vlocation, &vobject, &fieldValPtr); +#else + gen_args(cs_ti_fmodif, 0, &vfield, &vmeth, &vlocation, &vobject, &fieldValPtr); +#endif + + // 2. Park all locals and operand stack + vpark(); + // Store gc info + gen_gc_stack(-1, true); + + // 3. Call VM + rlock(cs_ti_fmodif); + AR gr = valloc(jobj); + call( is_set(DBG_CHECK_STACK), gr, rt_helper_ti_field_modification, cs_ti_fmodif, cs_ti_fmodif.count()); + runlock(cs_ti_fmodif); + + //Restore operand stack state and scratch registers + pop_all_state(&saveBB); + + patch(br_off, ip()); +} + + +void CodeGen::gen_access_watchpoint(JavaByteCodes opcode, jtype jt, Field_Handle fld) { + + bool field_op = (opcode == OPCODE_GETFIELD) ? true : false; + + // Check whether VM need access notifications + + char* fld_tr_add; + char fld_tr_mask; + field_get_track_access_flag(fld, &fld_tr_add, &fld_tr_mask); + + Val fld_track_mask((int)fld_tr_mask); + AR fld_trackAr = valloc(jobj); + rlock(fld_trackAr); + + //mov(fld_trackAr, Opnd(0xFFFFFFFF)); // Emulation to check access flag enabled + //Opnd fld_track_opnd(fld_trackAr); + movp(fld_trackAr, (void*)fld_tr_add); + Opnd fld_track_opnd(i32, fld_trackAr, 0); + alu(alu_test, fld_track_opnd, fld_track_mask.as_opnd()); + + runlock(fld_trackAr); + + unsigned br_off = br(z, 0, 0, taken); + + // Store all scratch registers and operand stack state + BBState saveBB; + push_all_state(&saveBB); + + + //JVMTI helper takes field handle, method handle, byte code location, pointer + //to reference for fields or NULL for statics + +#ifndef _EM64T_ + // Workaround since do_mov do not put jlong on stack in gen_args on ia32 + const CallSig cs_ti_faccess(CCONV_HELPERS, jobj, jobj, i32, i32, jobj); + Val vlocation((jlong)m_pc); + Val vlocationHi((jlong)0); +#else + const CallSig cs_ti_faccess(CCONV_HELPERS, jobj, jobj, i64, jobj); + Val vlocation((jlong)m_pc); +#endif + rlock(cs_ti_faccess); + + + Val vfield(jobj, fld); + Val vmeth(jobj, m_method); + Val vobject = Val(jobj, NULL); + + if (field_op) { + vobject = vstack(0); + } + +#ifndef _EM64T_ + // Workaround since do_mov do not put jlong on stack in gen_args on ia32 + gen_args(cs_ti_faccess, 0, &vfield, &vmeth, &vlocationHi, &vlocation, &vobject); +#else + gen_args(cs_ti_faccess, 0, &vfield, &vmeth, &vlocation, &vobject); +#endif + + // 2. Park all locals and operand stack + vpark(); + // Store gc info + gen_gc_stack(-1, true); + + // 3. Call VM + rlock(cs_ti_faccess); + AR gr = valloc(jobj); + call( is_set(DBG_CHECK_STACK), gr, rt_helper_ti_field_access, cs_ti_faccess, cs_ti_faccess.count()); + runlock(cs_ti_faccess); + + //Restore operand stack state and scratch registers + pop_all_state(&saveBB); + + + patch(br_off, ip()); +} + + +void CodeGen::push_all_state(BBState *saveBB){ + *saveBB = *m_bbstate; + // 1. store scratch registers in a secret place + // 2. park everything + // 3. call whatever + // 4. restore scratch regs from the secret place + // 5. restore the state for callee-save registers + //----------------------------------------------- + // 1. + bool saveScratch = true; + for (unsigned i=0; i