Index: vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp (revision 629393) +++ vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp (working copy) @@ -22,8 +22,8 @@ #include "Ia32CallingConvention.h" #include "Ia32IRManager.h" -namespace Jitrino{ -namespace Ia32{ +namespace Jitrino { +namespace Ia32 { const CallingConvention * CallingConvention::str2cc(const char * cc_name) { if( NULL == cc_name ) { // default @@ -35,7 +35,7 @@ } if( !strcmpi(cc_name, "drl") ) { - return &CallingConvention_DRL; + return &CallingConvention_Managed; } if( !strcmpi(cc_name, "cdecl") ) { @@ -48,12 +48,12 @@ //======================================================================================== STDCALLCallingConvention CallingConvention_STDCALL; -DRLCallingConvention CallingConvention_DRL; CDECLCallingConvention CallingConvention_CDECL; +ManagedCallingConvention CallingConvention_Managed; //======================================================================================== -// class STDCALLCallingConvention +// STDCALLCallingConvention //======================================================================================== #ifdef _EM64T_ @@ -66,22 +66,68 @@ #endif #endif +#ifdef _IA32_ //______________________________________________________________________________________ -void STDCALLCallingConvention::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos)const +void STDCALLCallingConventionIA32::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos) const { - if (kind==ArgKind_InArg){ -#ifdef _EM64T_ - uint32 gpreg = 0; -#ifdef _WIN64 -#define fpreg gpreg + if (kind == ArgKind_InArg) { + for (uint32 i=0; iType::Float ||typeTag0){ - infos[i].slotCount=0; - }else{ - switch(typeTag){ - case Type::Void: - infos[i].slotCount=0; - break; - case Type::Float: - case Type::Double: - case Type::Single: -#ifdef _EM64T_ - infos[i].slotCount=1; - infos[i].slots[0]=RegName_XMM0; -#else - infos[i].slotCount=1; - infos[i].slots[0]=RegName_FP0; -#endif - break; - default: - { - OpndSize size=IRManager::getTypeSize(typeTag); -#ifdef _EM64T_ - infos[i].slotCount=1; - infos[i].slots[0]=RegName_RAX; - - if (size==OpndSize_128){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_RDX; - } -#else - assert(size!=OpndSize_Null && size<=OpndSize_64); - - infos[i].slotCount=1; - infos[i].slots[0]=RegName_EAX; - - if (size==OpndSize_64){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_EDX; - } -#endif + } else { + assert(kind == ArgKind_RetArg); + assert(count <= 1); + if (count == 1) { + Type::Tag typeTag=(Type::Tag)infos[0].typeTag; + infos[0].isReg=true; + switch (typeTag) { + case Type::Void: + infos[0].slotCount=0; + break; + case Type::Float: + case Type::Double: + case Type::Single: + infos[0].slotCount=1; + infos[0].slots[0]=RegName_XMM0; + break; + default: { + OpndSize size=IRManager::getTypeSize(typeTag); + infos[0].slotCount=1; + infos[0].slots[0]=RegName_RAX; + + if (size == OpndSize_128) { + infos[0].slotCount=2; + infos[0].slots[1]=RegName_RDX; } } - } + }; } } } +#endif +#ifdef _IA32_ //______________________________________________________________________________________ -Constraint STDCALLCallingConvention::getCalleeSavedRegs(OpndKind regKind)const +Constraint STDCALLCallingConventionIA32::getCalleeSavedRegs(OpndKind regKind) const { switch (regKind){ case OpndKind_GPReg: + return (Constraint(RegName_EBX)|RegName_EBP|RegName_ESI|RegName_EDI); + default: + return Constraint(); + } +} + +#else +Constraint STDCALLCallingConventionEM64T::getCalleeSavedRegs(OpndKind regKind) const +{ + switch (regKind){ + case OpndKind_GPReg: #ifdef _WIN64 return (Constraint(RegName_RBX)|RegName_RBP|RegName_R12|RegName_R13|RegName_R14|RegName_R15|RegName_RSI|RegName_RDI); -#elif _EM64T_ - return (Constraint(RegName_RBX)|RegName_RBP|RegName_R12|RegName_R13|RegName_R14|RegName_R15); #else - return (Constraint(RegName_EBX)|RegName_EBP|RegName_ESI|RegName_EDI); + return (Constraint(RegName_RBX)|RegName_RBP|RegName_R12|RegName_R13|RegName_R14|RegName_R15); #endif default: return Constraint(); } } -#ifdef _EM64T_ +#endif + + //______________________________________________________________________________________ -void CDECLCallingConvention::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos)const +#ifdef _IA32_ +void ManagedCallingConventionIA32::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos) const { - if (kind==ArgKind_InArg){ - for (uint32 i=0; i0){ - infos[i].isReg=false; - infos[i].slotCount=0; - }else{ - switch(typeTag){ - case Type::Void: - infos[i].isReg=false; - infos[i].slotCount=0; - break; - case Type::Float: - case Type::Double: - case Type::Single: - infos[i].isReg=true; - infos[i].slotCount=1; - infos[i].slots[0]=RegName_XMM0; - break; - default: - { - OpndSize size=IRManager::getTypeSize(typeTag); - infos[i].slotCount=1; - infos[i].slots[0]=RegName_RAX; - infos[i].isReg=true; - if (size==OpndSize_128){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_RDX; - } - } - } - } - } } + return STDCALLCallingConventionIA32::getOpndInfo(kind, count, infos); } - +#else #endif -//______________________________________________________________________________________ - - }; // namespace Ia32 } Index: vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h (revision 629393) +++ vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h (working copy) @@ -63,14 +63,14 @@ //======================================================================================== // class CallingConvention //======================================================================================== + /** -Interface CallingConvention describes a particular calling convention. - -As calling convention rules can be more or less formally defined, -it is worth to define this entity as a separate class or interface -Implementers of this interface are used as arguments to some IRManager methods - -*/ + * Interface CallingConvention describes a particular calling convention. + * + * As calling convention rules can be more or less formally defined, + * it is worth to define this entity as a separate class or interface + * Implementers of this interface are used as arguments to some IRManager methods + */ class CallingConvention { public: @@ -129,79 +129,96 @@ //======================================================================================== -// class STDCALLCallingConvention +// STDCALLCallingConvention //======================================================================================== -/** Implementation of CallingConvention for the STDCALL calling convention -*/ -class STDCALLCallingConvention: public CallingConvention +/** + * Implementation of CallingConvention for the STDCALL calling convention + */ +class STDCALLCallingConventionIA32: public CallingConvention { public: - virtual ~STDCALLCallingConvention() {} - virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos)const; - virtual Constraint getCalleeSavedRegs(OpndKind regKind)const; -#ifdef _EM64T_ - virtual bool calleeRestoresStack()const{ return false; } - virtual uint32 getStackAlignment()const { return STACK_ALIGNMENT; } -#else - virtual bool calleeRestoresStack()const{ return true; } -#endif - virtual bool pushLastToFirst()const{ return true; } + virtual ~STDCALLCallingConventionIA32() {} + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; + virtual Constraint getCalleeSavedRegs(OpndKind regKind) const; + virtual bool calleeRestoresStack() const{ return true; } + virtual bool pushLastToFirst() const { return true; } }; +class STDCALLCallingConventionEM64T: public CallingConvention +{ +public: + + virtual ~STDCALLCallingConventionEM64T() {} + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; + virtual Constraint getCalleeSavedRegs(OpndKind regKind) const; + virtual bool calleeRestoresStack() const { return false; } + virtual uint32 getStackAlignment() const { return STACK_ALIGNMENT; } + virtual bool pushLastToFirst() const { return true; } + +}; + //======================================================================================== -// class DRLCallingConvention +// CDECL CallingConvention //======================================================================================== /** - * Implementation of CallingConvention for the DRL IA32 calling convention + * Implementation of CallingConvention for the CDECL calling convention */ -class DRLCallingConventionIA32: public STDCALLCallingConvention +class CDECLCallingConventionIA32: public STDCALLCallingConventionIA32 { public: - virtual ~DRLCallingConventionIA32() {} - virtual bool pushLastToFirst()const{ return false; } - virtual uint32 getStackAlignment()const { return STACK_ALIGNMENT; } + virtual ~CDECLCallingConventionIA32() {} + virtual bool calleeRestoresStack() const { return false; } }; -/** - * Implementation of CallingConvention for the DRL EM64T calling convention - */ -class DRLCallingConventionEM64T: public STDCALLCallingConvention +class CDECLCallingConventionEM64T: public STDCALLCallingConventionEM64T { public: - virtual ~DRLCallingConventionEM64T() {} - virtual uint32 getStackAlignment()const { return STACK_ALIGNMENT; } + virtual ~CDECLCallingConventionEM64T() {} }; //======================================================================================== -// class CDECLCallingConvention +// Managed CallingConvention //======================================================================================== -/** Implementation of CallingConvention for the CDECL calling convention -*/ -class CDECLCallingConvention: public STDCALLCallingConvention + +/** + * Implementation of CallingConvention for the Managed calling convention + */ +class ManagedCallingConventionIA32: public STDCALLCallingConventionIA32 { public: - virtual ~CDECLCallingConvention() {} - virtual bool calleeRestoresStack()const{ return false; } -#ifdef _EM64T_ - virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos)const; - virtual bool pushLastToFirst()const{ return true; } - virtual uint32 getStackAlignment()const { return STACK_ALIGNMENT; } -#endif + virtual ~ManagedCallingConventionIA32() {} + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; + virtual bool pushLastToFirst( ) const { return false; } + virtual uint32 getStackAlignment() const { return STACK_ALIGNMENT; } }; +/** + * Implementation of CallingConvention for the DRL EM64T calling convention + */ +class ManagedCallingConventionEM64T: public STDCALLCallingConventionEM64T +{ +public: + virtual ~ManagedCallingConventionEM64T() {} +}; + + #ifdef _EM64T_ -typedef DRLCallingConventionEM64T DRLCallingConvention; +typedef STDCALLCallingConventionEM64T STDCALLCallingConvention; +typedef CDECLCallingConventionEM64T CDECLCallingConvention; +typedef ManagedCallingConventionEM64T ManagedCallingConvention; #else -typedef DRLCallingConventionIA32 DRLCallingConvention; +typedef STDCALLCallingConventionIA32 STDCALLCallingConvention; +typedef CDECLCallingConventionIA32 CDECLCallingConvention; +typedef ManagedCallingConventionIA32 ManagedCallingConvention; #endif extern STDCALLCallingConvention CallingConvention_STDCALL; -extern DRLCallingConvention CallingConvention_DRL; extern CDECLCallingConvention CallingConvention_CDECL; +extern ManagedCallingConvention CallingConvention_Managed; }; // namespace Ia32 } Index: vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (revision 629393) +++ vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (working copy) @@ -1262,7 +1262,7 @@ HELPER_CALLING_CONVENTION callConv = compilationInterface.getRuntimeHelperCallingConvention(helperId); switch (callConv){ case CALLING_CONVENTION_DRL: - return &CallingConvention_DRL; + return &CallingConvention_Managed; case CALLING_CONVENTION_STDCALL: return &CallingConvention_STDCALL; case CALLING_CONVENTION_CDECL: @@ -1276,7 +1276,7 @@ //_________________________________________________________________________________________________ const CallingConvention * IRManager::getCallingConvention(MethodDesc * methodDesc)const { - return &CallingConvention_DRL; + return &CallingConvention_Managed; } //_________________________________________________________________________________________________ Index: vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (revision 629393) +++ vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (working copy) @@ -260,7 +260,7 @@ const CallingConvention * getCallingConvention(MethodDesc * methodDesc)const; - const CallingConvention * getDefaultManagedCallingConvention() const { return &CallingConvention_DRL; } + const CallingConvention * getDefaultManagedCallingConvention() const { return &CallingConvention_Managed; } EntryPointPseudoInst * getEntryPointInst()const { return entryPointInst; } Index: vm/jitrino/src/jet/bcproc.cpp =================================================================== --- vm/jitrino/src/jet/bcproc.cpp (revision 629393) +++ vm/jitrino/src/jet/bcproc.cpp (working copy) @@ -440,24 +440,36 @@ return; } switch(jinst.opcode) { - case OPCODE_IRETURN: - gen_return(i32); + case OPCODE_IRETURN: { + static const CallSig cs(CCONV_MANAGED, i32); + gen_return(cs); break; - case OPCODE_LRETURN: - gen_return(i64); + } + case OPCODE_LRETURN: { + static const CallSig cs(CCONV_MANAGED, i64); + gen_return(cs); break; - case OPCODE_FRETURN: - gen_return(flt32); + } + case OPCODE_FRETURN: { + static const CallSig cs(CCONV_MANAGED, flt32); + gen_return(cs); break; - case OPCODE_DRETURN: - gen_return(dbl64); + } + case OPCODE_DRETURN: { + static const CallSig cs(CCONV_MANAGED, dbl64); + gen_return(cs); break; - case OPCODE_ARETURN: - gen_return(jobj); + } + case OPCODE_ARETURN: { + static const CallSig cs(CCONV_MANAGED, jobj); + gen_return(cs); break; - case OPCODE_RETURN: - gen_return(jvoid); + } + case OPCODE_RETURN: { + static const CallSig cs(CCONV_MANAGED, jvoid); + gen_return(cs); break; + } default: assert(false); break; }; } Index: vm/jitrino/src/jet/cg.cpp =================================================================== --- vm/jitrino/src/jet/cg.cpp (revision 629393) +++ vm/jitrino/src/jet/cg.cpp (working copy) @@ -48,10 +48,10 @@ namespace Jitrino { namespace Jet { -const CallSig ci_helper_o(CCONV_HELPERS, jobj); -const CallSig ci_helper_v(CCONV_HELPERS); -const CallSig ci_helper_oi(CCONV_HELPERS, jobj, i32); -const CallSig ci_helper_linkerr(CCONV_HELPERS, jobj, i32, i32); +const CallSig ci_helper_o(CCONV_HELPERS, jvoid, jobj); +const CallSig ci_helper_v(CCONV_HELPERS, jvoid); +const CallSig ci_helper_oi(CCONV_HELPERS, jobj, jobj, i32); +const CallSig ci_helper_linkerr(CCONV_HELPERS, jvoid, jobj, i32, i32); void CodeGen::do_mov(const Val& dst_s, const Val& src_s, bool skipTypeCheck) { Index: vm/jitrino/src/jet/cg.h =================================================================== --- vm/jitrino/src/jet/cg.h (revision 629393) +++ vm/jitrino/src/jet/cg.h (working copy) @@ -324,17 +324,18 @@ void gen_athrow(void); /** - * @brief Update BBState and pushes the given jtype on operand stack, - * as if the code just executed a call that returned the given jtype. + * @brief Update BBState and pushes return value on operand stack, + * as if the code just executed a call that returned the given value. * * The item location is set according to the calling convention on * which registers to use to return value of the given type. * * On IA-32 (where the float/double are returned through FPU) the * item is spilled (code is generated to do so) into memory first. - * @param jtyp - type of the value 'returned' + * @param cs - calling signature descrubing the method to push return + * value for. */ - void gen_save_ret(jtype jtyp); + void gen_save_ret(const CallSig& cs); /** * @brief Generates code to call one of the throw_ helpers. Index: vm/jitrino/src/jet/cg_arith.cpp =================================================================== --- vm/jitrino/src/jet/cg_arith.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_arith.cpp (working copy) @@ -300,52 +300,62 @@ unsigned stackFix = 0; bool shft = op == OPCODE_ISHL || op == OPCODE_ISHR || op == OPCODE_IUSHR; + const CallSig* rcs = NULL; if (is_f(jt)) { + assert(jt == dbl64 || jt == flt32); char * helper = NULL; bool is_dbl = jt == dbl64; if (op == OPCODE_INEG) { - CallSig cs(CCONV_STDCALL, jt); - stackFix = gen_stack_to_args(true, cs, 0, 1); + static const CallSig cs_dbl(CCONV_STDCALL, dbl64, dbl64); + static const CallSig cs_flt(CCONV_STDCALL, flt32, flt32); + rcs = is_dbl? &cs_dbl : &cs_flt; + stackFix = gen_stack_to_args(true, *rcs, 0, 1); helper = is_dbl ? (char*)&rt_h_neg_dbl64 : (char*)&rt_h_neg_flt32; - gen_call_novm(cs, helper, 1); - runlock(cs); + gen_call_novm(*rcs, helper, 1); + runlock(*rcs); } else { //if (m_jframe->dip(1).stype == st_imm && ) - CallSig cs(CCONV_STDCALL, jt, jt, i32); - stackFix = gen_stack_to_args(true, cs, 0, 2); + static const CallSig cs_dbl(CCONV_STDCALL, dbl64, dbl64, dbl64, i32); + static const CallSig cs_flt(CCONV_STDCALL, flt32, flt32, flt32, i32); + rcs = is_dbl? &cs_dbl : &cs_flt; + stackFix = gen_stack_to_args(true, *rcs, 0, 2); helper = is_dbl ? (char*)&rt_h_dbl_a : (char*)&rt_h_flt_a; - gen_call_novm(cs, helper, 2, op); - runlock(cs); + gen_call_novm(*rcs, helper, 2, op); + runlock(*rcs); } } else if (jt==i64) { if (op == OPCODE_INEG) { - CallSig cs(CCONV_STDCALL, jt); - stackFix = gen_stack_to_args(true, cs, 0, 1); - gen_call_novm(cs, (void*)&rt_h_neg_i64, 1); - runlock(cs); + static const CallSig cs(CCONV_STDCALL, i64, i64); + rcs = &cs; + stackFix = gen_stack_to_args(true, *rcs, 0, 1); + gen_call_novm(*rcs, (void*)&rt_h_neg_i64, 1); + runlock(*rcs); } else if (shft) { - CallSig cs(CCONV_STDCALL, jt, i32, i32); - stackFix = gen_stack_to_args(true, cs, 0, 2); - gen_call_novm(cs, (void*)&rt_h_i64_shift, 2, op); - runlock(cs); + static const CallSig cs(CCONV_STDCALL, i64, i64, i32, i32); + rcs = &cs; + stackFix = gen_stack_to_args(true, *rcs, 0, 2); + gen_call_novm(*rcs, (void*)&rt_h_i64_shift, 2, op); + runlock(*rcs); } else { - CallSig cs(CCONV_STDCALL, jt, jt, i32); - stackFix = gen_stack_to_args(true, cs, 0, 2); - gen_call_novm(cs, (void*)&rt_h_i64_a, 2, op); - runlock(cs); + static const CallSig cs(CCONV_STDCALL, i64, i64, i64, i32); + rcs = &cs; + stackFix = gen_stack_to_args(true, *rcs, 0, 2); + gen_call_novm(*rcs, (void*)&rt_h_i64_a, 2, op); + runlock(*rcs); } } else { assert(jt==i32); if (op == OPCODE_INEG) { - CallSig cs(CCONV_STDCALL, jt); - stackFix = gen_stack_to_args(true, cs, 0, 1); - gen_call_novm(cs, (void*)&rt_h_neg_i32, 1); - runlock(cs); + static const CallSig cs(CCONV_STDCALL, i32, i32); + rcs = &cs; + stackFix = gen_stack_to_args(true, *rcs, 0, 1); + gen_call_novm(*rcs, (void*)&rt_h_neg_i32, 1); + runlock(*rcs); } else if (op == OPCODE_IADD || op == OPCODE_ISUB) { const Val& op2 = vstack(0); @@ -365,13 +375,15 @@ return; } else { - CallSig cs(CCONV_STDCALL, jt, jt, i32); - stackFix = gen_stack_to_args(true, cs, 0, 2); - gen_call_novm(cs, (void*)&rt_h_i32_a, 2, op); - runlock(cs); + static const CallSig cs(CCONV_STDCALL, i32, i32, i32, i32); + rcs = &cs; + stackFix = gen_stack_to_args(true, *rcs, 0, 2); + gen_call_novm(*rcs, (void*)&rt_h_i32_a, 2, op); + runlock(*rcs); } } - gen_save_ret(jt); + assert(rcs != NULL); + gen_save_ret(*rcs); if (stackFix != 0) { alu(alu_sub, sp, stackFix); } @@ -384,14 +396,14 @@ return; } char *helper = (char *) cnv_matrix_impls[from][to]; - CallSig cs(CCONV_STDCALL, from); + const CallSig cs(CCONV_STDCALL, to, from); unsigned stackFix = gen_stack_to_args(true, cs, 0); gen_call_novm(cs, helper, 1); if (stackFix != 0) { alu(alu_sub, sp, stackFix); } runlock(cs); - gen_save_ret(to); + gen_save_ret(cs); } void CodeGen::gen_x_cmp(JavaByteCodes op, jtype jt) @@ -408,26 +420,26 @@ helper = op == OPCODE_FCMPG ? (char*)&rt_h_fcmp_g : (char*)&rt_h_fcmp_l; } - const CallSig cs(CCONV_STDCALL, jt, jt); + const CallSig cs(CCONV_STDCALL, i32, jt, jt); unsigned stackFix = gen_stack_to_args(true, cs, 0); gen_call_novm(cs, helper, 2); if (stackFix != 0) { alu(alu_sub, sp, stackFix); } runlock(cs); - gen_save_ret(i32); + gen_save_ret(cs); return; } assert(op == OPCODE_LCMP); char *helper = (char *)rt_h_lcmp; - static const CallSig cs(CCONV_STDCALL, i64, i64); + static const CallSig cs(CCONV_STDCALL, i32, i64, i64); unsigned stackFix = gen_stack_to_args(true, cs, 0); gen_call_novm(cs, helper, 2); if (stackFix != 0) { alu(alu_sub, sp, stackFix); } runlock(cs); - gen_save_ret(i32); + gen_save_ret(cs); } Index: vm/jitrino/src/jet/cg_dbg.cpp =================================================================== --- vm/jitrino/src/jet/cg_dbg.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_dbg.cpp (working copy) @@ -33,7 +33,7 @@ namespace Jitrino { namespace Jet { -const CallSig CodeGen::cs_trace_arg(CCONV_STDCALL, jobj, i32, i32); +const CallSig CodeGen::cs_trace_arg(CCONV_STDCALL, jvoid, jobj, i32, i32); void CodeGen::dbg_check_mem(void) @@ -137,7 +137,7 @@ strcpy(lost, tmp_buf); strcat(lost, id_buf); if (save_regs) { push_all(); } - static const CallSig cs(CCONV_STDCALL, jobj); + static const CallSig cs(CCONV_STDCALL, jvoid, jobj); call(is_set(DBG_CHECK_STACK), gr0, (void*)&dbg_rt_out, cs, 0, lost); if (save_regs) { pop_all(); } } Index: vm/jitrino/src/jet/cg_fld_arr.cpp =================================================================== --- vm/jitrino/src/jet/cg_fld_arr.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_fld_arr.cpp (working copy) @@ -131,7 +131,7 @@ // stack: [.., aref, idx, val] if (jt == jobj && helperOk) { gen_write_barrier(m_curr_inst->opcode, NULL, Opnd(0)); - static const CallSig cs_aastore(CCONV_HELPERS, jobj, i32, jobj); + static const CallSig cs_aastore(CCONV_HELPERS, jvoid, jobj, i32, jobj); unsigned stackFix = gen_stack_to_args(true, cs_aastore, 0); gen_call_vm(cs_aastore, rt_helper_aastore, 3); if (stackFix != 0) { @@ -238,9 +238,10 @@ Val& ref = vstack(ref_depth, true); where = Opnd(jt, ref.reg(), fld_offset); } else { //field is not resolved -> generate code to request offset - static const CallSig cs_get_offset(CCONV_HELPERS, iplatf, i32, i32); + static const CallSig cs_get_offset(CCONV_HELPERS, iplatf, iplatf, i32, i32); gen_call_vm(cs_get_offset, rt_helper_field_get_offset_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut()); runlock(cs_get_offset); + AR gr_ret = cs_get_offset.ret_reg(0); rlock(gr_ret); Val& ref = vstack(ref_depth, true); runlock(gr_ret); @@ -252,9 +253,10 @@ char * fld_addr = (char*)field_get_address(fieldOp.fld); where = vaddr(jt, fld_addr); } else { //field is not resolved -> generate code to request address - static const CallSig cs_get_addr(CCONV_HELPERS, iplatf, i32, i32); + static const CallSig cs_get_addr(CCONV_HELPERS, iplatf, iplatf, i32, i32); gen_call_vm(cs_get_addr, rt_helper_field_get_address_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut()); runlock(cs_get_addr); + AR gr_ret = cs_get_addr.ret_reg(0); where = Opnd(jt, gr_ret, 0); } } Index: vm/jitrino/src/jet/cg_instr.cpp =================================================================== --- vm/jitrino/src/jet/cg_instr.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_instr.cpp (working copy) @@ -83,6 +83,7 @@ // portable and 'official' way: gen_call_vm(platform_v, rt_helper_get_tls_base_ptr, 0); // The address of flag is now in gr_ret + AR gr_ret = platform_v.ret_reg(0); Opnd mem(i32, gr_ret, rt_suspend_req_flag_offset); alu(alu_cmp, mem, Opnd(0)); unsigned br_off = br(z, 0, 0, taken); @@ -125,11 +126,11 @@ #ifndef _EM64T_ // Workaround since do_mov do not put jlong on stack in gen_args on ia32 - const CallSig cs_ti_fmodif(CCONV_HELPERS, jobj, jobj, i32, i32, jobj, jobj); + static const CallSig cs_ti_fmodif(CCONV_HELPERS, jvoid, jobj, jobj, i32, i32, jobj, jobj); Val vlocation((jlong)m_pc); Val vlocationHi((jlong)0); #else - const CallSig cs_ti_fmodif(CCONV_HELPERS, jobj, jobj, i64, jobj, jobj); + static const CallSig cs_ti_fmodif(CCONV_HELPERS, jvoid, jobj, jobj, i64, jobj, jobj); Val vlocation((jlong)m_pc); #endif @@ -222,11 +223,11 @@ #ifndef _EM64T_ // Workaround since do_mov do not put jlong on stack in gen_args on ia32 - const CallSig cs_ti_faccess(CCONV_HELPERS, jobj, jobj, i32, i32, jobj); + static const CallSig cs_ti_faccess(CCONV_HELPERS, jvoid, jobj, jobj, i32, i32, jobj); Val vlocation((jlong)m_pc); Val vlocationHi((jlong)0); #else - const CallSig cs_ti_faccess(CCONV_HELPERS, jobj, jobj, i64, jobj); + static const CallSig cs_ti_faccess(CCONV_HELPERS, jvoid, jobj, jobj, i64, jobj); Val vlocation((jlong)m_pc); #endif rlock(cs_ti_faccess); @@ -354,7 +355,7 @@ // WB4C has the following signature: //(object written to, slot written to, value written to slot) - static const CallSig wb4c_sig(CCONV_CDECL, jobj, jobj, jobj); + static const CallSig wb4c_sig(CCONV_CDECL, jvoid, jobj, jobj, jobj); //static char* wb4c_helper = xxx_gc_heap_slot_write_ref static char* wb4c_helper = (char*)vm_get_rt_support_addr(VM_RT_GC_HEAP_WRITE_REF); @@ -365,7 +366,7 @@ } // WB4J has the following signature: //(object written to, slot written to, value written to slot, metaA, metaB, mode) - static const CallSig wb4j_sig(CCONV_MANAGED, jobj, jobj, jobj, i32, i32, i32); + static const CallSig wb4j_sig(CCONV_CDECL, jvoid, jobj, jobj, jobj, i32, i32, i32); static char* wb4j_helper = NULL; Index: vm/jitrino/src/jet/cg_meth.cpp =================================================================== --- vm/jitrino/src/jet/cg_meth.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_meth.cpp (working copy) @@ -44,7 +44,7 @@ /** * CallSig for monitor_enter and monitor_exit helpers. */ -static CallSig cs_mon(CCONV_HELPERS, jobj); +static const CallSig cs_mon(CCONV_HELPERS, jvoid, jobj); void Compiler::gen_prolog(void) { @@ -515,7 +515,7 @@ alu(alu_cmp, mem, Opnd(0)); unsigned br_off = br(z, 0, 0, taken); - static const CallSig cs_ti_menter(CCONV_HELPERS, jobj); + static const CallSig cs_ti_menter(CCONV_HELPERS, jvoid, jobj); gen_call_vm(cs_ti_menter, rt_helper_ti_method_enter, 0, m_method); patch(br_off, ip()); @@ -552,8 +552,9 @@ } } -void Compiler::gen_return(jtype retType) +void Compiler::gen_return(const CallSig& cs) { + jtype retType = cs.ret_jt(); if (is_set(DBG_TRACE_EE)) { gen_dbg_rt(true, "exiting : %s", meth_fname()); } @@ -608,7 +609,7 @@ if (compilation_params.exe_notify_method_exit) { // JVMTI helper takes pointer to return value and method handle - const CallSig cs_ti_mexit(CCONV_HELPERS, jobj, jobj); + static const CallSig cs_ti_mexit(CCONV_HELPERS, jvoid, jobj, jobj); // The call is a bit unusual, and is processed as follows: // we load an address of the top of the operand stack into // a temporary register, and then pass this value as pointer @@ -649,26 +650,28 @@ patch(br_off, ip()); } + AR out_reg = cs.ret_reg(0); if (is_f(retType)) { -#ifdef _IA32_ - // On IA-32 always swap to memory first, then upload into FPU - vswap(0); - ld(retType, fr_ret, m_base, vstack_off(0)); -#else - // Make sure the item is not immediate - Val op = vstack(0, vis_imm(0)); - if (!op.is_reg() || op.reg() != fr_ret) { - Opnd ret(retType, fr_ret); + if (out_reg == fp0) { + // On IA-32 always swap to memory first, then upload into FPU + vswap(0); + ld(retType, out_reg, m_base, vstack_off(0)); + } else { + // Make sure the item is not immediate + Val op = vstack(0, vis_imm(0)); + if (!op.is_reg() || op.reg() != out_reg) { + Opnd ret(retType, out_reg); mov(ret, op.as_opnd()); + } } -#endif } else if (is_big(retType)) { #ifdef _IA32_ vswap(0); vswap(1); - ld4(eax.reg(), m_base, vstack_off(0)); - ld4(edx.reg(), m_base, vstack_off(1)); + AR out_reg1 = cs.ret_reg(1); + ld4(out_reg, m_base, vstack_off(0)); + ld4(out_reg1, m_base, vstack_off(1)); #else assert(false && "Unexpected case - 'big' type on EM64T"); #endif @@ -676,8 +679,8 @@ } else if (retType != jvoid) { Val& op = vstack(0); - if (!op.is_reg() || op.reg() != gr_ret) { - Opnd ret(retType, gr_ret); + if (!op.is_reg() || op.reg() != out_reg) { + Opnd ret(retType, out_reg); mov(ret, op.as_opnd()); } } @@ -740,7 +743,7 @@ const JInst& jinst = *m_curr_inst; - CallSig cs(CCONV_MANAGED, args); + CallSig cs(CCONV_MANAGED, retType, args); for (unsigned i=0; i lock it @@ -833,7 +839,7 @@ else if (opcod == OPCODE_INVOKEINTERFACE) { // if it's INVOKEINTERFACE, then first resolve it Class_Handle klass = method_get_class(meth); - const CallSig cs_vtbl(CCONV_HELPERS, jobj, jobj); + const CallSig cs_vtbl(CCONV_HELPERS, iplatf, jobj, jobj); // Prepare args for ldInterface helper if (cs_vtbl.reg(0) == gr_x) { assert(cs_vtbl.size() != 0); @@ -848,7 +854,7 @@ mov(cs_vtbl.get(0), thiz.as_opnd()); } gen_call_vm(cs_vtbl, rt_helper_get_vtable, 1, klass); - + AR gr_ret = cs_vtbl.ret_reg(0); // // Method's vtable is in gr_ret now, prepare stack // @@ -902,7 +908,7 @@ runlock(cs); if (retType != jvoid) { - gen_save_ret(retType); + gen_save_ret(cs); } if (stackFix != 0) { alu(alu_sub, sp, stackFix); @@ -939,10 +945,11 @@ } } -void CodeGen::gen_save_ret(jtype jt) +void CodeGen::gen_save_ret(const CallSig& cs) { + jtype jt = cs.ret_jt(); assert(jt != jvoid); - AR ar = is_f(jt) ? fr_ret : gr_ret; + AR ar = cs.ret_reg(0); if (jt==i8) { sx1(Opnd(i32, ar), Opnd(jt,ar)); jt = i32; @@ -956,7 +963,7 @@ jt = i32; } #ifdef _IA32_ - if(ar == fr_ret) { + if(ar == fp0) { // Cant use vstack_off right here, as the item is not yet pushed. unsigned slot = m_jframe->size(); if (is_wide(jt)) { @@ -964,13 +971,12 @@ } vpush(Val(jt, m_base, voff(m_stack.stack_slot(slot)))); // - st(jt, fr_ret, m_base, vstack_off(0)); + st(jt, fp0, m_base, vstack_off(0)); } else if (is_big(jt)) { assert(jt==i64); - static const AR eax = virt(RegName_EAX); - static const AR edx = virt(RegName_EDX); - vpush2(Val(jt, eax), Val(jt, edx)); + AR ar1 = cs.ret_reg(1); + vpush2(Val(jt, ar), Val(jt, ar1)); } else #endif @@ -985,7 +991,7 @@ AR gtmp = gr0; //ld(jobj, gtmp, bp, m_stack.stack_slot(m_jframe->depth2slot(0))); Opnd tmp(jt, gtmp); - mov(tmp, Opnd(jt, gr_ret)); + mov(tmp, Opnd(jt, ar)); if (cs_trace_arg.reg(0) != gr_x) { if (cs_trace_arg.size() != 0) { alu(alu_sub, sp, cs_trace_arg.size()); Index: vm/jitrino/src/jet/cg_obj.cpp =================================================================== --- vm/jitrino/src/jet/cg_obj.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_obj.cpp (working copy) @@ -55,7 +55,7 @@ return; } assert(lazy); - static const CallSig cs_newarray_withresolve(CCONV_HELPERS, iplatf, i32, i32); + static const CallSig cs_newarray_withresolve(CCONV_HELPERS, jobj, iplatf, i32, i32); Val sizeVal = vstack(0); // setup constant parameters first, Val vclass(iplatf, enclClass); @@ -63,7 +63,7 @@ gen_args(cs_newarray_withresolve, 0, &vclass, &vcpIdx, &sizeVal); gen_call_vm(cs_newarray_withresolve, rt_helper_new_array_withresolve, 3); vpop();// pop array size - gen_save_ret(jobj); + gen_save_ret(cs_newarray_withresolve); // the returned can not be null, marking as such. vstack(0).set(VA_NZ); @@ -82,7 +82,7 @@ gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0, m_klass, jinst.op0, jinst.opcode); } - static const CallSig cs_new_arr(CCONV_HELPERS, i32, jobj); + static const CallSig cs_new_arr(CCONV_HELPERS, jobj, i32, jobj); unsigned stackFix = gen_stack_to_args(true, cs_new_arr, 0, 1); gen_call_vm(cs_new_arr, rt_helper_new_array, 1, ah); runlock(cs_new_arr); @@ -90,7 +90,7 @@ if (stackFix != 0) { alu(alu_sub, sp, stackFix); } - gen_save_ret(jobj); + gen_save_ret(cs_new_arr); // the returned can not be null, marking as such. vstack(0).set(VA_NZ); // allocation assumes GC invocation @@ -118,9 +118,10 @@ bool resolve = !lazy || class_is_cp_entry_resolved(m_compileHandle, enclClass, cpIndex); if(!resolve) { assert(lazy); - static CallSig ci_get_class_withresolve(CCONV_HELPERS, iplatf, i32); + static const CallSig ci_get_class_withresolve(CCONV_HELPERS, jobj, iplatf, i32); gen_call_vm(ci_get_class_withresolve, rt_helper_get_class_withresolve, 0, enclClass, cpIndex); runlock(ci_get_class_withresolve); + AR gr_ret = ci_get_class_withresolve.ret_reg(0); klassVal = Val(jobj, gr_ret); } else { klass = resolve_class(m_compileHandle, enclClass, cpIndex); @@ -129,7 +130,7 @@ rlock(klassVal); // to protect gr_ret while setting up helper args // note: need to restore the stack - the cdecl-like function - CallSig ci(CCONV_CDECL|CCONV_MEM|CCONV_L2R, args); + CallSig ci(CCONV_CDECL|CCONV_MEM|CCONV_L2R|CCONV_RETURN_FP_THROUGH_FPU, jobj, args); unsigned stackFix = gen_stack_to_args(true, ci, 0, num_dims); runlock(klassVal); @@ -146,7 +147,7 @@ if (stackFix != 0) { alu(alu_sub, sp, stackFix); } - gen_save_ret(jobj); + gen_save_ret(ci); // the returned can not be null, marking as such. vstack(0).set(VA_NZ); // allocation assumes GC invocation @@ -158,9 +159,11 @@ { bool lazy = m_lazy_resolution; bool resolve = !lazy || class_is_cp_entry_resolved(m_compileHandle, enclClass, cpIndex); + const CallSig* ci = NULL; if (resolve) { Class_Handle klass = resolve_class_new(m_compileHandle, enclClass, cpIndex); if (klass == NULL) { + ci = &ci_helper_linkerr; gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0, enclClass, cpIndex, OPCODE_NEW); } else { if ( klass!=enclClass && class_needs_initialization(klass)) { @@ -168,15 +171,17 @@ } unsigned size = class_get_boxed_data_size(klass); Allocation_Handle ah = class_get_allocation_handle(klass); - static CallSig ci_new(CCONV_HELPERS, i32, jobj); + static const CallSig ci_new(CCONV_HELPERS, jobj, i32, jobj); + ci = &ci_new; gen_call_vm(ci_new, rt_helper_new, 0, size, ah); } } else { assert(lazy); - static CallSig ci_new_with_resolve(CCONV_HELPERS, iplatf, i32); + static const CallSig ci_new_with_resolve(CCONV_HELPERS, jobj, iplatf, i32); + ci = &ci_new_with_resolve; gen_call_vm(ci_new_with_resolve, rt_helper_new_withresolve, 0, enclClass, cpIndex); } - gen_save_ret(jobj); + gen_save_ret(*ci); vstack(0).set(VA_NZ);// the returned can not be null, marking as such. m_bbstate->seen_gcpt = true;// allocation assumes GC invocation @@ -193,19 +198,23 @@ // resolution has failed gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0, enclClass, cpIdx, opcode); } - static const CallSig cs(CCONV_HELPERS, jobj, jobj); + static const CallSig cs_checkcast(CCONV_HELPERS, jobj, jobj, jobj); + static const CallSig cs_instanceof(CCONV_HELPERS, i32, jobj, jobj); + const CallSig& cs = (opcode == OPCODE_CHECKCAST) ? cs_checkcast : cs_instanceof; + char * helper = (opcode == OPCODE_CHECKCAST) ? rt_helper_checkcast : rt_helper_instanceof; unsigned stackFix = gen_stack_to_args(true, cs, 0, 1); - char * helper = opcode == OPCODE_CHECKCAST ? rt_helper_checkcast : rt_helper_instanceof; gen_call_vm(cs, helper, 1, klass); if (stackFix != 0) { alu(alu_sub, sp, stackFix); } runlock(cs); - gen_save_ret(opcode == OPCODE_CHECKCAST ? jobj : i32); + gen_save_ret(cs); } else { assert(lazy); - static const CallSig cs_with_resolve(CCONV_HELPERS, iplatf, i32, jobj); - char * helper = opcode == OPCODE_CHECKCAST ? rt_helper_checkcast_withresolve : rt_helper_instanceof_withresolve; + static const CallSig cs_checkcast_with_resolve(CCONV_HELPERS, jobj, iplatf, i32, jobj); + static const CallSig cs_instanceof_with_resolve(CCONV_HELPERS, i32, iplatf, i32, jobj); + const CallSig& cs_with_resolve = (opcode == OPCODE_CHECKCAST) ? cs_checkcast_with_resolve : cs_instanceof_with_resolve; + char * helper = (opcode == OPCODE_CHECKCAST) ? rt_helper_checkcast_withresolve : rt_helper_instanceof_withresolve; Val tos = vstack(0); // setup constant parameters first, Val vclass(iplatf, enclClass); @@ -214,7 +223,7 @@ gen_call_vm(cs_with_resolve, helper, 3); runlock(cs_with_resolve); vpop();//pop obj - gen_save_ret(opcode == OPCODE_CHECKCAST ? jobj : i32); + gen_save_ret(cs_with_resolve); } } @@ -222,7 +231,7 @@ { const JInst& jinst = *m_curr_inst; gen_check_null(0); - static const CallSig cs_mon(CCONV_HELPERS, jobj); + static const CallSig cs_mon(CCONV_HELPERS, jvoid, jobj); unsigned stackFix = gen_stack_to_args(true, cs_mon, 0); gen_call_vm(cs_mon, jinst.opcode == OPCODE_MONITORENTER ? @@ -235,7 +244,7 @@ void CodeGen::gen_athrow(void) { - static const CallSig cs_throw(CCONV_HELPERS, jobj); + static const CallSig cs_throw(CCONV_HELPERS, jvoid, jobj); unsigned stackFix = gen_stack_to_args(true, cs_throw, 0); gen_call_vm(cs_throw, rt_helper_throw, 1); runlock(cs_throw); Index: vm/jitrino/src/jet/cg_regs.cpp =================================================================== --- vm/jitrino/src/jet/cg_regs.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_regs.cpp (working copy) @@ -694,14 +694,14 @@ // if top of the stack is currently not on the gr_ret, then // force it to be there Val& ev = m_jframe->dip(0); - if (!ev.is_reg() || ev.reg() != gr_ret) { + if (!ev.is_reg() || ev.reg() != gr0) { // locals were just spilled and no other stack items left - // therefore gr_ret must be unused, simply load the Exception - assert(rrefs(gr_ret) == 0); - Opnd reg(ev.jt(), gr_ret); + // therefore gr0 must be unused, simply load the Exception + assert(rrefs(gr0) == 0); + Opnd reg(ev.jt(), gr0); do_mov(reg, ev.as_opnd()); rfree(ev); - ev.to_reg(gr_ret); + ev.to_reg(gr0); rref(ev); } } @@ -718,8 +718,8 @@ assert(m_jframe->size() == 1); Val& s = m_jframe->dip(0); if (!s.is_reg()) { - rref(gr_ret); - s = Val(jobj, gr_ret); + rref(gr0); + s = Val(jobj, gr0); // We're entering exception handler - that do not have 'direct' // (non exception) ways in it - the object on the top of the // stack is exception and is guaranteed to be non-null. @@ -728,7 +728,7 @@ } } else { - assert(s.reg() == gr_ret); + assert(s.reg() == gr0); } } // We always process 0th BB as multiref BB - see also gen_prolog() @@ -768,7 +768,7 @@ for (unsigned i=0; isize(); i++) { Val& s = m_jframe->dip(i); if (i==0 && bbinfo.ehandler) { - assert(s.is_reg() && s.reg() == gr_ret); + assert(s.is_reg() && s.reg() == gr0); } else { s = Val(s.jt(), m_base, vstack_off(i)); Index: vm/jitrino/src/jet/cg_stk.cpp =================================================================== --- vm/jitrino/src/jet/cg_stk.cpp (revision 629393) +++ vm/jitrino/src/jet/cg_stk.cpp (working copy) @@ -68,7 +68,7 @@ } assert(m_curr_inst->opcode != OPCODE_LDC2_W); gen_call_vm(ci_helper_oi, rt_helper_ldc_string, 0, m_klass, m_curr_inst->op0); - gen_save_ret(jobj); + gen_save_ret(ci_helper_oi); vstack(0).set(VA_NZ); m_bbstate->seen_gcpt = true; } Index: vm/jitrino/src/jet/compiler.cpp =================================================================== --- vm/jitrino/src/jet/compiler.cpp (revision 629393) +++ vm/jitrino/src/jet/compiler.cpp (working copy) @@ -1033,7 +1033,8 @@ // That's why the exception object acts like a return value - for // example on IA32 it's in EAX. // - gen_save_ret(jobj); + static const CallSig cs(CCONV_MANAGED, jobj); + gen_save_ret(cs); } if (is_set(DBG_TRACE_CG)) { Index: vm/jitrino/src/jet/compiler.h =================================================================== --- vm/jitrino/src/jet/compiler.h (revision 629393) +++ vm/jitrino/src/jet/compiler.h (working copy) @@ -294,7 +294,7 @@ /** * @brief Generates method's epilogue (on RETURN instructions) code. */ - void gen_return(jtype retType); + void gen_return(const CallSig& cs); /** * @brief Prepares BBState as it was left by gen_bb_leave(). Index: vm/jitrino/src/jet/csig.cpp =================================================================== --- vm/jitrino/src/jet/csig.cpp (revision 629393) +++ vm/jitrino/src/jet/csig.cpp (working copy) @@ -29,15 +29,31 @@ namespace Jitrino { namespace Jet { -const CallSig helper_v(CCONV_HELPERS); -const CallSig platform_v(CCONV_HELPERS); +const CallSig helper_v(CCONV_HELPERS, jvoid); +const CallSig platform_v(CCONV_HELPERS, iplatf); -void CallSig::init(void) +void CallSig::init() { unsigned num = (unsigned)m_args.size(); m_data.resize(num); unsigned fps = 0, gps = 0; + // Assign return value + m_ret_reg[0] = -1; + m_ret_reg[1] = -1; + if (is_f(m_ret_jt)) { + if (m_cc & CCONV_RETURN_FP_THROUGH_FPU) { + m_ret_reg[0] = fp0; + } else { + m_ret_reg[0] = fr0; + } + } else if (m_ret_jt != jvoid) { + m_ret_reg[0] = gr0; + if (is_wide(m_ret_jt)) { + m_ret_reg[1] = gr3; + } + } + // // Assign registers // Index: vm/jitrino/src/jet/csig.h =================================================================== --- vm/jitrino/src/jet/csig.h (revision 629393) +++ vm/jitrino/src/jet/csig.h (working copy) @@ -51,11 +51,6 @@ #define CCONV_CALLER_POPS (0x00000002) /** - * @brief All args go though memory. - */ -#define CCONV_MEM (0x00000020) - -/** * @brief When entering a function, obey the (sp)%%4 == 0 rule. */ #define CCONV_STACK_ALIGN4 (0x00000004) @@ -72,20 +67,29 @@ #define CCONV_STACK_ALIGN16 (0x00000010) /** - * Mask to extract stack alignment form calling convention. + * @brief Mask to extract stack alignment form calling convention. */ #define CCONV_STACK_ALIGN_MASK (CCONV_STACK_ALIGN4 | CCONV_STACK_ALIGN_HALF16 | CCONV_STACK_ALIGN16) +/** + * @brief All args go though memory. + */ +#define CCONV_MEM (0x00000020) /** + * @brief Use FPU register stack to return floating point, xmm0 otherwise. + */ +#define CCONV_RETURN_FP_THROUGH_FPU (0x00000040) + +/** * @brief IA-32's stdcall convention. */ -#define CCONV_STDCALL_IA32 (CCONV_MEM) +#define CCONV_STDCALL_IA32 (CCONV_MEM | CCONV_RETURN_FP_THROUGH_FPU) /** * @brief IA-32's cdecl convention. */ -#define CCONV_CDECL_IA32 (CCONV_CALLER_POPS | CCONV_MEM) +#define CCONV_CDECL_IA32 (CCONV_CALLER_POPS | CCONV_MEM | CCONV_RETURN_FP_THROUGH_FPU) #ifdef _EM64T_ /** @@ -122,7 +126,7 @@ * @brief A special case - VM's helper MULTIANEWARRAY always has cdecl-like * convention. */ -#define CCONV_MULTIANEWARRAY CCONV_CDECL_IA32 +#define CCONV_MULTIANEWARRAY CCONV_CDECL_IA32 #ifdef _EM64T_ /** @@ -133,7 +137,7 @@ #define CCONV_MANAGED CCONV_MANAGED_IA32 #endif -#define CCONV_HELPERS CCONV_STDCALL +#define CCONV_HELPERS CCONV_STDCALL ///@} // ~JET_CCONV @@ -192,11 +196,12 @@ /** * @brief Initializes CallSig object with the given arg types. */ - CallSig(unsigned cc, + CallSig(unsigned cc, jtype ret = jvoid, jtype arg0=jvoid, jtype arg1=jvoid, jtype arg2=jvoid, jtype arg3=jvoid, jtype arg4=jvoid, jtype arg5=jvoid) { m_cc = cc; + m_ret_jt = ret; if (arg0 != jvoid) { m_args.push_back(arg0); } if (arg1 != jvoid) { m_args.push_back(arg1); } if (arg2 != jvoid) { m_args.push_back(arg2); } @@ -232,8 +237,9 @@ * @brief Constructs and initializes CallSig object with the given * calling convention and list of args types. */ - CallSig(unsigned cc, const vector& args) + CallSig(unsigned cc, const jtype ret, const vector& args) { + m_ret_jt = ret; init(cc, args); } @@ -302,6 +308,22 @@ } /** + * @params i slot number. For example on IA32 ret_reg(0) is eax, ret_reg(1) is edx. + * @returns register which holds return value, or ar_x + * of the value comes through the memory. + */ + AR ret_reg(unsigned i) const + { + assert(i < 2); + return (m_ret_reg[i] <= 0) ? ar_x : (AR)m_ret_reg[i]; + } + + /** + * @returns type of return value. + */ + jtype ret_jt() const { return m_ret_jt; } + + /** * @returns Offset (in bytes) from #sp of the given argument, or -1 if * the argument is passed on register. */ @@ -366,7 +388,7 @@ * * Counts args offsets and required stack size. */ - void init(void); + void init(); /** * @brief An info about argument types. */ @@ -399,6 +421,11 @@ * CallSig object. */ unsigned m_cc; + /** + * @brief + */ + int m_ret_reg[2]; + jtype m_ret_jt; }; /** Index: vm/jitrino/src/jet/enc.cpp =================================================================== --- vm/jitrino/src/jet/enc.cpp (revision 629393) +++ vm/jitrino/src/jet/enc.cpp (working copy) @@ -191,11 +191,9 @@ if (ar == sp) { return "sp"; } -#ifdef _IA32_ - if (ar == fr_ret) { - return "fr_ret"; + if (ar == fp0) { + return "fp0"; } -#endif if (platf) { return to_str_impl(ar); } Index: vm/jitrino/src/jet/enc.h =================================================================== --- vm/jitrino/src/jet/enc.h (revision 629393) +++ vm/jitrino/src/jet/enc.h (working copy) @@ -286,9 +286,6 @@ * The type index is unique only within the given group of registers and * lies in the range of [gr_idx(gr0); gr_idx(gr0+gr_total-1)] for gr * registers and [fr_idx(fr0); fr_idx(fr0+fr_total-1)], inclusive. - * - * @note On IA-32, fr_ret is treated in a special way - see - * Encoder class description. * */ enum AR { @@ -313,15 +310,8 @@ // // Specials // -#ifdef _EM64T_ - fr_ret = fr0, -#else - fr_ret, -#endif - gr_ret = gr0, + fp0, // top FPU stacked register // - // - // #ifdef _EM64T_ gr_num=15, /// not including sp gr_total = 16, /// including sp @@ -340,7 +330,7 @@ */ inline bool is_f(AR ar) { - return (fr0 <= ar && ar < (fr0+gr_total)); + return (fr0 <= ar && ar < (fr0+fr_total)); } /** @@ -420,12 +410,6 @@ */ inline unsigned ar_idx(AR ar) { -#ifdef _IA32_ - if (ar == fr_ret) { - // fake usage of fr0 - ar = fr0; - } -#endif assert(ar-gr0 < ar_total); return ar-gr0; } @@ -824,12 +808,10 @@ * - (all) 'PUSH fr' is emulated as * 'sub sp, num_of_slots_for(dbl64) ; mov [sp], fr'. 'POP fr' is emulated * the same way. - * - (IA-32)fr_ret is 'virtual' register and is treated in a special way - - * all IA-32 calling conventions use FPU stack to return float point - * values, so only 'mov/ld fr_ret, mem' and 'mov/st mem, fr_ret' are + * - (IA-32) Only 'mov/ld fp0, mem' and 'mov/st mem, fp0' are * allowed. In this case, FST/FLD instructions are generated. \b NOTE: * this simulation is only done in #fld and #fst methods, you can \b not - * do #mov with fr_ret. This limitation is intentional, to remove + * do #mov with fp0. This limitation is intentional, to remove * additional check and branch from the hot exectuion path in #mov. * * call() operation is made indirect only (trough a GR register). This is @@ -1076,7 +1058,7 @@ * Loads from memory into the specified register. * * Just a wrapper around mov(). - * @note On IA32 fr_ret loads are threated in a special way. + * @note On IA32 fp0 loads are threated in a special way. */ void ld(jtype jt, AR ar, AR base, int disp=0, AR index = ar_x, unsigned scale=0) @@ -1106,7 +1088,7 @@ * Loads from memory into the specified FR register. * * Just a wrapper around mov(). - * @note On IA32 fr_ret loads are threated in a special way. + * @note On IA32 fp0 loads are threated in a special way. */ void fld(jtype jt, AR ar, AR base, int disp=0, AR index = ar_x, unsigned scale=0); @@ -1114,7 +1096,7 @@ * Stores from the specified FR register into memory . * * Just a wrapper around mov(). - * @note On IA32 fr_ret stores are threated in a special way. + * @note On IA32 fp0 stores are threated in a special way. */ void fst(jtype jt, AR ar, AR base, int disp=0, AR index = gr_x, unsigned scale=0); Index: vm/jitrino/src/jet/enc_ia32.cpp =================================================================== --- vm/jitrino/src/jet/enc_ia32.cpp (revision 629393) +++ vm/jitrino/src/jet/enc_ia32.cpp (working copy) @@ -78,7 +78,9 @@ reg = getRegName(OpndKind_XMMReg, jt == jvoid ? OpndSize_64 : to_size(jt), idx); } - else { + else if (ar == fp0) { + reg = RegName_FP0; + } else { assert(idxget_class()->get_name()->bytes << " " @@ -242,11 +298,14 @@ arg_words += argId; argId = sz - argId; + static const IntFuncPtr invoke_managed_func = gen_invoke_int_managed_func(); + static const FloatFuncPtr invoke_float_managed_func = gen_invoke_float_managed_func(); + static const DoubleFuncPtr invoke_double_managed_func = gen_invoke_double_managed_func(); + switch(ret_type) { case JAVA_TYPE_VOID: invoke_managed_func(arg_words, argId, meth_addr); break; - case JAVA_TYPE_CLASS: case JAVA_TYPE_ARRAY: case JAVA_TYPE_STRING: @@ -268,11 +327,11 @@ case JAVA_TYPE_CHAR: case JAVA_TYPE_SHORT: case JAVA_TYPE_INT: - resultPtr->i = ((IntFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr); + resultPtr->i = invoke_managed_func(arg_words, argId, meth_addr); break; case JAVA_TYPE_FLOAT: - resultPtr->f = ((FloatFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr); + resultPtr->f = invoke_float_managed_func(arg_words, argId, meth_addr); break; case JAVA_TYPE_LONG: @@ -280,7 +339,7 @@ break; case JAVA_TYPE_DOUBLE: - resultPtr->d = ((DoubleFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr); + resultPtr->d = invoke_double_managed_func(arg_words, argId, meth_addr); break; default: