Index: vm/vmcore/src/jit/jit_runtime_support.cpp =================================================================== --- vm/vmcore/src/jit/jit_runtime_support.cpp (revision 615004) +++ vm/vmcore/src/jit/jit_runtime_support.cpp (working copy) @@ -2170,9 +2170,9 @@ ASSERT_THROW_AREA; Global_Env* env = VM_Global_State::loader_env; - if (obj == NULL) { + if (obj == (ManagedObject*)VM_Global_State::loader_env->managed_null) { exn_throw_by_class(env->java_lang_NullPointerException_Class); - return NULL; + return obj; } Method* m = NULL; @@ -2222,9 +2222,9 @@ ASSERT_THROW_AREA; Global_Env* env = VM_Global_State::loader_env; - if (obj == NULL) { + if (obj == (ManagedObject*)VM_Global_State::loader_env->managed_null) { exn_throw_by_class(env->java_lang_NullPointerException_Class); - return NULL; + return obj; } Method* m = NULL; @@ -2356,7 +2356,7 @@ ///OPCODE_CHECKCAST static void *rth_checkcast_withresolve(Class_Handle klass, unsigned cp_idx, ManagedObject* obj) { - if (obj==NULL) { + if (obj == (ManagedObject*)VM_Global_State::loader_env->managed_null) { return obj; } Index: vm/jitrino/src/jet/cg_meth.cpp =================================================================== --- vm/jitrino/src/jet/cg_meth.cpp (revision 615004) +++ vm/jitrino/src/jet/cg_meth.cpp (working copy) @@ -730,6 +730,7 @@ } } +#if 0 void CodeGen::gen_invoke(JavaByteCodes opcod, Method_Handle meth, unsigned short cpIndex, const ::std::vector &args, jtype retType) { @@ -805,7 +806,6 @@ char* helper = opcod == OPCODE_INVOKESTATIC ? rt_helper_get_invokestatic_addr_withresolve : rt_helper_get_invokespecial_addr_withresolve; gen_call_vm(cs_get_is_addr, helper, 0, m_klass, cpIndex); - runlock(cs_get_is_addr); } else { assert(opcod == OPCODE_INVOKEVIRTUAL || opcod == OPCODE_INVOKEINTERFACE); static const CallSig cs_get_iv_addr(CCONV_HELPERS, iplatf, i32, jobj); @@ -816,7 +816,6 @@ Val vcpIdx(cpIndex); gen_args(cs_get_iv_addr, 0, &vclass, &vcpIdx, &thiz); gen_call_vm(cs_get_iv_addr, helper, 3); - runlock(cs_get_iv_addr); } rlock(gr_ret); //WARN: call addr is in gr_ret -> lock it @@ -907,7 +906,216 @@ alu(alu_sub, sp, stackFix); } } +#endif +void CodeGen::gen_invoke(JavaByteCodes opcod, Method_Handle meth, unsigned short cpIndex, + const ::std::vector &args, jtype retType) +{ + const unsigned slots = count_slots(args); + // where (stack depth) 'this' is stored for the method being invoked + // (if applicable) + const unsigned thiz_depth = slots - 1; + + const JInst& jinst = *m_curr_inst; + + CallSig cs(CCONV_MANAGED, args); + for (unsigned i=0; i lock it + + //2. call java method + stackFix = gen_stack_to_args(true, cs, 0); + vpark(); + gen_gc_stack(-1, true); + + AR gr = valloc(iplatf); + ld(jobj, gr, gr_ret); //load indirect addr + call(gr, cs, is_set(DBG_CHECK_STACK)); + runlock(gr_ret); + } + else if (opcod == OPCODE_INVOKEINTERFACE) { + // if it's INVOKEINTERFACE, then first resolve it + Class_Handle klass = method_get_class(meth); + const CallSig cs_vtbl(CCONV_HELPERS, jobj, jobj); + rlock(cs_vtbl); + + Val &thiz = vstack(thiz_depth, true); + rlock(thiz); + gen_check_null(thiz, true); + + // Prepare args for ldInterface helper + if (cs_vtbl.reg(0) == gr_x) { + assert(cs_vtbl.size() != 0); + alu(alu_sub, sp, cs_vtbl.size()); + st(jobj, thiz.reg(), sp, cs_vtbl.off(0)); + } + else { + if (cs_vtbl.size() != 0) { + assert(cs_vtbl.caller_pops()); + alu(alu_sub, sp, cs_vtbl.size()); + } + mov(cs_vtbl.get(0), thiz.as_opnd()); + } + runlock(thiz); + gen_call_vm(cs_vtbl, rt_helper_get_vtable, 1, klass); + runlock(cs_vtbl); + // + // Method's vtable is in gr_ret now, prepare stack + // + rlock(gr_ret); + //st(jobj, gr_ret, m_base, voff(m_stack.scratch())); + stackFix = gen_stack_to_args(true, cs, 0); + vpark(); + gen_gc_stack(-1, true); + unsigned offset = method_get_offset(meth); + //ld(jobj, gr_ret, m_base, voff(m_stack.scratch())); + runlock(gr_ret); + ld(jobj, gr_ret, gr_ret, offset); + call(gr_ret, cs, is_set(DBG_CHECK_STACK)); + } + else if (opcod == OPCODE_INVOKEVIRTUAL) { + Val &thiz = vstack(thiz_depth, true); + rlock(thiz); + + stackFix = gen_stack_to_args(true, cs, 0); + vpark(); + gen_gc_stack(-1, true); + // Check for null here - we just spilled all the args and + // parked all the registers, so we have a chance to use HW NPE + gen_check_null(thiz, true); + + AR gr = valloc(jobj); + unsigned offset = method_get_offset(meth); + Opnd ptr; + + if (g_vtbl_squeeze) { + ld4(gr, thiz.reg(), rt_vtable_offset); + AR gr_vtbase = valloc(jobj); + movp(gr_vtbase, (char*)VTBL_BASE+offset); + alu(jobj, alu_add, gr, gr_vtbase); + ptr = Opnd(jobj, gr, 0); + } + else { + ld(jobj, gr, thiz.reg(), rt_vtable_offset); + ptr = Opnd(jobj, gr, offset); + } + call(ptr, cs, is_set(DBG_CHECK_STACK)); + runlock(thiz); + } + else { + Val *thiz = NULL; + + if (!is_static) + thiz = &vstack(thiz_depth, false); + + stackFix = gen_stack_to_args(true, cs, 0); + vpark(); + gen_gc_stack(-1, true); + + if (!is_static) + // Check for null here - we just spilled all the args and + // parked all the registers, so we have a chance to use HW NPE + // For invokeSPECIAL, we're using indirect address provided by + // the VM. This means we do not read vtable, which means no + // memory access, so we can't use HW checks - have to use + // explicit one. Not a big loss, as the INVOKESPECIAL mostly + // comes right after NEW which guarantees non-null. + // in lazy resolution mode we must do manual check and provide helper with + // non-null results. + gen_check_null(*thiz, false); + + void * paddr = method_get_indirect_address(meth); +#ifdef _IA32_ + Opnd ptr(jobj, ar_x, paddr); +#else + AR gr = valloc(jobj); + movp(gr, paddr); + ld(jobj, gr, gr); + Opnd ptr(jobj, gr); +#endif + call(ptr, cs, is_set(DBG_CHECK_STACK)); + } + + // to unlock after gen_stack_to_args() + runlock(cs); + // to unlock after explicit lock at the top of this method + runlock(cs); + + if (retType != jvoid) { + gen_save_ret(retType); + } + if (stackFix != 0) { + alu(alu_sub, sp, stackFix); + } +} + void CodeGen::gen_args(const CallSig& cs, unsigned idx, const Val * parg0, const Val * parg1, const Val * parg2, const Val * parg3, const Val * parg4, const Val * parg5, const Val * parg6) @@ -923,7 +1131,7 @@ if (args[i] == 0) { break; } - rlock(*args[0]); + rlock(*args[i]); } // 2nd, generate moves for (unsigned i=0; i generate code to request offset static const CallSig cs_get_offset(CCONV_HELPERS, iplatf, i32, i32); gen_call_vm(cs_get_offset, rt_helper_field_get_offset_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut()); - runlock(cs_get_offset); rlock(gr_ret); Val& ref = vstack(ref_depth, true); runlock(gr_ret); @@ -254,7 +253,6 @@ } else { //field is not resolved -> generate code to request address static const CallSig cs_get_addr(CCONV_HELPERS, iplatf, i32, i32); gen_call_vm(cs_get_addr, rt_helper_field_get_address_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut()); - runlock(cs_get_addr); where = Opnd(jt, gr_ret, 0); } } Index: vm/jitrino/src/jet/cg_obj.cpp =================================================================== --- vm/jitrino/src/jet/cg_obj.cpp (revision 615004) +++ vm/jitrino/src/jet/cg_obj.cpp (working copy) @@ -120,7 +120,6 @@ assert(lazy); static CallSig ci_get_class_withresolve(CCONV_HELPERS, iplatf, i32); gen_call_vm(ci_get_class_withresolve, rt_helper_get_class_withresolve, 0, enclClass, cpIndex); - runlock(ci_get_class_withresolve); klassVal = Val(jobj, gr_ret); } else { klass = resolve_class(m_compileHandle, enclClass, cpIndex); @@ -212,7 +211,6 @@ Val vcpIdx(cpIdx); gen_args(cs_with_resolve, 0, &vclass, &vcpIdx, &tos); gen_call_vm(cs_with_resolve, helper, 3); - runlock(cs_with_resolve); vpop();//pop obj gen_save_ret(opcode == OPCODE_CHECKCAST ? jobj : i32); } Index: vm/jitrino/src/jet/enc.cpp =================================================================== --- vm/jitrino/src/jet/enc.cpp (revision 615004) +++ vm/jitrino/src/jet/enc.cpp (working copy) @@ -371,14 +371,24 @@ mov(Opnd(i32, sp, cs.off(i)), val); } else { +#ifdef _EM64T_ + long val = va_arg(valist, long); + mov(Opnd(i64, sp, cs.off(i)), val); +#else int val = lo32((jlong)(int_ptr)addr); mov(Opnd(i32, sp, cs.off(i)), val); val = hi32((jlong)(int_ptr)addr); mov(Opnd(i32, sp, cs.off(i)+4), val); +#endif } } else if (jt==i64) { +#ifdef _EM64T_ + long val = va_arg(valist, long); + mov(gr == gr_x ? Opnd(i64, sp, cs.off(i)) : Opnd(i64, gr), val); +#else assert(false); +#endif } else { int val = va_arg(valist, int);