From ace78a2ee1ee68a86160145adea9426cac65950b Mon Sep 17 00:00:00 2001 From: Alexander Astapchuk Date: Sat, 13 Jan 2007 17:07:22 +0600 Subject: [PATCH 1/6] [drlvm][fastcc]Fast calling convention for IA-32; part vm/jitrino --- .../codegenerator/ia32/Ia32CallingConvention.cpp | 389 ++++++++++++-------- .../src/codegenerator/ia32/Ia32CallingConvention.h | 130 ++++--- vm/jitrino/src/codegenerator/ia32/Ia32CgUtils.cpp | 13 +- .../src/codegenerator/ia32/Ia32CodeEmitter.cpp | 29 ++- .../src/codegenerator/ia32/Ia32IRManager.cpp | 125 +++++--- vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h | 32 ++- vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp | 23 +- vm/jitrino/src/codegenerator/ia32/Ia32Inst.h | 12 +- .../src/codegenerator/ia32/Ia32InternalTrace.cpp | 15 +- vm/jitrino/src/codegenerator/ia32/Ia32Printer.cpp | 49 ++- .../codegenerator/ia32/Ia32RuntimeInterface.cpp | 61 ++-- .../src/codegenerator/ia32/Ia32StackInfo.cpp | 43 ++- vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.h | 34 +-- .../src/codegenerator/ia32/Ia32StackLayout.cpp | 213 +++++++----- vm/jitrino/src/jet/cg_dbg.cpp | 5 + vm/jitrino/src/jet/jet.cpp | 1 - vm/jitrino/src/jet/rt.cpp | 4 +- vm/jitrino/src/optimizer/deadcodeeliminator.cpp | 6 +- vm/jitrino/src/shared/PlatformDependant.h | 2 +- vm/jitrino/src/shared/mkernel.cpp | 21 +- vm/jitrino/src/shared/mkernel.h | 12 +- vm/jitrino/src/vm/VMInterface.h | 5 +- vm/jitrino/src/vm/drl/DrlJITInterface.cpp | 161 ++++++--- vm/jitrino/src/vm/drl/DrlVMInterface.cpp | 22 +- vm/jitrino/src/vm/drl/DrlVMInterface.h | 21 +- 25 files changed, 908 insertions(+), 520 deletions(-) diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp index 77a926e..8ead31c 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.cpp @@ -16,212 +16,307 @@ */ /** * @author Vyacheslav P. Shakin - * @version $Revision: 1.6.22.3 $ */ #include "Ia32CallingConvention.h" #include "Ia32IRManager.h" +#include "callconv.h" + +namespace Jitrino { +namespace Ia32 { + +STDCALLCallingConvention CallingConvention_STDCALL; +DRLCallingConvention CallingConvention_DRL; +CDECLCallingConvention CallingConvention_CDECL; +FastCallCallingConvention CallingConvention_Fastcall; +DRLFastCallCallingConvention CallingConvention_DRLFast; + +static bool isFloatPointType(Type::Tag typeTag) +{ + assert(typeTag != Type::Float); + return (typeTag == Type::Double) || (typeTag == Type::Single); +} -namespace Jitrino{ -namespace Ia32{ const CallingConvention * CallingConvention::str2cc(const char * cc_name) { - if( NULL == cc_name ) { // default + if (NULL == cc_name) { // default return &CallingConvention_STDCALL; } - if( !strcmpi(cc_name, "stdcall") ) { + if (!strcmpi(cc_name, "stdcall")) { return &CallingConvention_STDCALL; } - if( !strcmpi(cc_name, "drl") ) { + if (!strcmpi(cc_name, "drl")) { return &CallingConvention_DRL; } - if( !strcmpi(cc_name, "cdecl") ) { + if (!strcmpi(cc_name, "cdecl")) { return &CallingConvention_CDECL; } - assert( false ); - return NULL; -} - -//======================================================================================== -STDCALLCallingConvention CallingConvention_STDCALL; -DRLCallingConvention CallingConvention_DRL; -CDECLCallingConvention CallingConvention_CDECL; + if (!strcmpi(cc_name, "cdecl")) { + return &CallingConvention_CDECL; + } + if (!strcmpi(cc_name, "fastcall")) { + return &CallingConvention_Fastcall; + } -//======================================================================================== -// class STDCALLCallingConvention -//======================================================================================== + if (!strcmpi(cc_name, "drlfast")) { + return &CallingConvention_DRLFast; + } + assert(false); + return NULL; +} #ifdef _EM64T_ const RegName fastCallGPRegs[6] = {RegName_RDI, RegName_RSI, RegName_RDX, RegName_RCX, RegName_R8, RegName_R9} ; const RegName fastCallFPRegs[8] = {RegName_XMM0,RegName_XMM1,RegName_XMM2,RegName_XMM3,RegName_XMM4,RegName_XMM5,RegName_XMM6,RegName_XMM7}; #endif -//______________________________________________________________________________________ -void STDCALLCallingConvention::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos)const +void CallingConventionBase::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos) const { if (kind==ArgKind_InArg){ #ifdef _EM64T_ - uint32 gpreg = 0; - uint32 fpreg = 0; -#endif - for (uint32 i=0; iType::Float ||typeTag=Type::Single) && fpreg < lengthof(fastCallFPRegs))) { - infos[i].slotCount=1; - infos[i].slots[0]=fastCallFPRegs[fpreg]; - infos[i].isReg=true; + } + else if(isFloatPointType(typeTag) && fpreg < lengthof(fastCallFPRegs)) { + info.isReg = true; + info.slots[0] = fastCallFPRegs[fpreg]; fpreg++; - } else { - infos[i].slotCount=1; - infos[i].slots[0]=RegName_Null; - infos[i].isReg=false; } - - -#else - OpndSize size=IRManager::getTypeSize(typeTag); - assert(size!=OpndSize_Null && size<=OpndSize_64); - - infos[i].slotCount=1; - infos[i].slots[0]=RegName_Null; - infos[i].isReg=false; - - if (size==OpndSize_64){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_Null; + else { + info.isReg = false; + info.slots[0] = RegName_Null; } -#endif - } - }else{ +#else + // Presume everything is passed on stack. + // Derivatives will fix when needed. for (uint32 i=0; i0){ - infos[i].slotCount=0; - }else{ - switch(typeTag){ - case Type::Void: - infos[i].slotCount=0; - break; - case Type::Float: - case Type::Double: - case Type::Single: + infos[i].slotCount = 1; + infos[i].slots[0] = RegName_Null; + infos[i].isReg = false; + Type::Tag typeTag = infos[i].typeTag; + OpndSize typeSize = IRManager::getTypeSize(typeTag); + assert(OpndSize_8 <= typeSize && typeSize <= OpndSize_64); + if (typeSize >= OpndSize_64) { + infos[i].slotCount = 2; + infos[i].slots[1] = RegName_Null; + infos[i].isReg = false; + } + } +#endif + return; + } + + assert(kind == ArgKind_RetArg); + // The return value's expected to be singular or none + if (count == 0) { + return; + } + assert(count == 1); + + Type::Tag typeTag = infos[0].typeTag; + + if (typeTag == Type::Void) { + infos[0].slotCount = 0; + } + else if (isFloatPointType(typeTag)) { + infos[0].slotCount = 1; + infos[0].isReg = true; #ifdef _EM64T_ - infos[i].slotCount=1; - infos[i].slots[0]=RegName_XMM0; + infos[0].slots[0] = RegName_XMM0; #else - infos[i].slotCount=1; - infos[i].slots[0]=RegName_FP0; + infos[0].slots[0] = RegName_FP0; #endif - break; - default: - { - OpndSize size=IRManager::getTypeSize(typeTag); + } + else { + // 128 bits are not expected in Java world + assert(IRManager::getTypeSize(typeTag) != OpndSize_128); + infos[0].slotCount = 1; + infos[0].isReg = true; #ifdef _EM64T_ - infos[i].slotCount=1; - infos[i].slots[0]=RegName_RAX; - - if (size==OpndSize_128){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_RDX; - } + infos[0].slots[0] = RegName_RAX; #else - assert(size!=OpndSize_Null && size<=OpndSize_64); - - infos[i].slotCount=1; - infos[i].slots[0]=RegName_EAX; - - if (size==OpndSize_64){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_EDX; - } -#endif - } - } - } + infos[0].slots[0] = RegName_EAX; + OpndSize size = IRManager::getTypeSize(typeTag); + if (size == OpndSize_64) { + infos[0].slotCount = 2; + infos[0].slots[1] = RegName_EDX; } +#endif } } -//______________________________________________________________________________________ -Constraint STDCALLCallingConvention::getCalleeSavedRegs(OpndKind regKind)const +Constraint CallingConventionBase::getCalleeSavedRegs(OpndKind regKind) const { - switch (regKind){ - case OpndKind_GPReg: + Constraint calleeSave; + if (regKind == OpndKind_GPReg) { + #ifdef _EM64T_ - return (Constraint(RegName_RBX)|RegName_RBP|RegName_R12|RegName_R13|RegName_R14|RegName_R15); + calleeSave = (Constraint(RegName_RBX)|RegName_RBP| + RegName_R12|RegName_R13|RegName_R14|RegName_R15); #else - return (Constraint(RegName_EBX)|RegName_EBP|RegName_ESI|RegName_EDI); + calleeSave = (Constraint(RegName_EBX)|RegName_EBP|RegName_ESI|RegName_EDI); #endif - default: - return Constraint(); } + return calleeSave; } + + #ifdef _EM64T_ -//______________________________________________________________________________________ -void CDECLCallingConvention::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos)const + +void CDECLCallingConvention::getOpndInfo(ArgKind kind, uint32 count, OpndInfo * infos)const { - if (kind==ArgKind_InArg){ - for (uint32 i=0; i0){ - infos[i].isReg=false; - infos[i].slotCount=0; - }else{ - switch(typeTag){ - case Type::Void: - infos[i].isReg=false; - infos[i].slotCount=0; - break; - case Type::Float: - case Type::Double: - case Type::Single: - infos[i].isReg=true; - infos[i].slotCount=1; - infos[i].slots[0]=RegName_XMM0; - break; - default: - { - OpndSize size=IRManager::getTypeSize(typeTag); - infos[i].slotCount=1; - infos[i].slots[0]=RegName_RAX; - infos[i].isReg=true; - - if (size==OpndSize_128){ - infos[i].slotCount=2; - infos[i].slots[1]=RegName_RDX; - } - } - } - } + if (kind == ArgKind_InArg) { + for(uint32 i=0; i CallingConventionVector; - -//======================================================================================== -// class STDCALLCallingConvention -//======================================================================================== -/** Implementation of CallingConvention for the STDCALL calling convention -*/ - -class STDCALLCallingConvention: public CallingConvention -{ -public: - - virtual ~STDCALLCallingConvention() {} - virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos)const; - virtual Constraint getCalleeSavedRegs(OpndKind regKind)const; +/** + * Provides common, platform-wide characteristics of calling conventions. + */ +class CallingConventionBase : public CallingConvention +{ +protected: + virtual Constraint getCalleeSavedRegs(OpndKind regKind) const; + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; #ifdef _EM64T_ - virtual bool calleeRestoresStack()const{ return false; } + virtual bool calleeRestoresStack(void) const { return false; } #else - virtual bool calleeRestoresStack()const{ return true; } + virtual bool calleeRestoresStack(void) const { return true; } #endif - virtual bool pushLastToFirst()const{ return true; } - + virtual bool pushLastToFirst(void) const { return true; } }; -//======================================================================================== -// class DRLCallingConvention -//======================================================================================== -/** Implementation of CallingConvention for the DRL IA32 calling convention -*/ -class DRLCallingConvention: public STDCALLCallingConvention -{ -public: - virtual ~DRLCallingConvention() {} - virtual bool pushLastToFirst()const{ return false; } +/** STDCALL implementation.*/ +class STDCALLCallingConvention: public CallingConventionBase +{ +public: + virtual ~STDCALLCallingConvention() {} }; -//======================================================================================== -// class CDECLCallingConvention -//======================================================================================== -/** Implementation of CallingConvention for the CDECL calling convention -*/ -class CDECLCallingConvention: public STDCALLCallingConvention -{ -public: +/** Implementation of CallingConvention for the CDECL calling convention */ +class CDECLCallingConvention: public CallingConventionBase +{ +public: virtual ~CDECLCallingConvention() {} virtual bool calleeRestoresStack()const{ return false; } #ifdef _EM64T_ @@ -149,9 +135,49 @@ public: #endif }; +/** Implementation of default convention for managed code on IA32.*/ +class DRLCallingConvention: public CallingConventionBase +{ +public: + virtual ~DRLCallingConvention() {} + virtual bool pushLastToFirst(void) const { return false; } + +}; + +#ifdef _EM64T_ + +class FastCallCallingConvention : public CallingConventionBase {}; +class DRLFastCallCallingConvention : public CallingConventionBase {}; + +#else // _EM64T_ +class FastCallCallingConvention : public CallingConventionBase +{ + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; + + virtual bool pushLastToFirst(void) const + { + return false; + } +}; + +class DRLFastCallCallingConvention : public CallingConventionBase +{ + virtual void getOpndInfo(ArgKind kind, uint32 argCount, OpndInfo * infos) const; + virtual Constraint getCalleeSavedRegs(OpndKind regKind) const; + + virtual bool pushLastToFirst(void) const + { + return false; + } +}; + +#endif // ~ifdef _EM64T_ + extern STDCALLCallingConvention CallingConvention_STDCALL; extern DRLCallingConvention CallingConvention_DRL; extern CDECLCallingConvention CallingConvention_CDECL; +extern FastCallCallingConvention CallingConvention_Fastcall; +extern DRLFastCallCallingConvention CallingConvention_DRLFast; }; // namespace Ia32 } diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32CgUtils.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32CgUtils.cpp index 0fe8608..6a912ac 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32CgUtils.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32CgUtils.cpp @@ -127,13 +127,22 @@ bool OpndUtils::isConstAreaItem(const Op bool OpndUtils::isFPConst(const Opnd* op, double dVal) { const void* addr = extractAddrOfConst(op); - return (addr == NULL) ? false : (dVal == *(const float*)addr); + if (addr == NULL) { + return false; + } + // Only test bit-to-bit equality to avoid all this NaN/sign/etc problems + bool equal = *((long long*)&dVal) == *(long long*)addr; + return equal; } bool OpndUtils::isFPConst(const Opnd* op, float fVal) { const void* addr = extractAddrOfConst(op); - return (addr == NULL) ? false : (fVal == *(const float*)addr); + if (addr == NULL) { + return false; + } + bool equal = *((uint32*)&fVal) == *(uint32*)addr; + return equal; } int OpndUtils::extractIntConst(const Opnd* op) diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32CodeEmitter.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32CodeEmitter.cpp index 4f4afb2..06e6694 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32CodeEmitter.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32CodeEmitter.cpp @@ -340,9 +340,28 @@ void CodeEmitter::emitCode( void ) { #ifdef _DEBUG memset(codeStreamStart, 0xCC, maxMethodSize); #endif + // debugging support - insert breakpoint at specified inst/BB or at the method entry + bool break_at_entry = false; + unsigned break_inst_id = (unsigned)-1; + unsigned break_bb_id = (unsigned)-1; + const char* brk_command = getArg("break@"); + if (brk_command == NULL) { + // no op + } + else if (strnicmp(brk_command, "BB_", 3) == 0) { + // user wants to break at the BB entry + break_bb_id = atoi(brk_command + 3); + } + else if (strnicmp(brk_command, "I", 1) == 0) { + // user wants to break at the specific Inst + break_inst_id = atoi(brk_command + 1); + } + else { + getArg("break@", break_at_entry); + } int alignment = getIntArg("align", 0); - + LoopTree * lt = irManager->getFlowGraph()->getLoopTree(); uint8 * ip = codeStreamStart; @@ -376,7 +395,13 @@ void CodeEmitter::emitCode( void ) { } else { #endif uint8 * instStartIp = ip; - inst->setCodeOffset( instStartIp-blockStartIp ); + inst->setCodeOffset(instStartIp-blockStartIp); + + if (break_inst_id == inst->getId()) { + *ip = 0xCC; // == INT3 + ++ip; + } + ip = inst->emit(ip); #ifdef _EM64T_ } diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp index abbb9ee..13f7ff4 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp @@ -67,7 +67,8 @@ IRManager::IRManager(MemoryManager& memM internalHelperInfos(memManager), infoMap(memManager), verificationLevel(0), hasCalls(false), hasNonExceptionCalls(false), laidOut(false), codeStartAddr(NULL) -{ +{ + xmmTotalRegUsage = 0; for (uint32 i=0; isetEntryNode(fg->createBlockNode()); @@ -1219,30 +1220,42 @@ Inst * IRManager::newPushPopSequence(Mne return instList; } -//_________________________________________________________________________________________________ -const CallingConvention * IRManager::getCallingConvention(CompilationInterface::RuntimeHelperId helperId)const -{ - CompilationInterface::VmCallingConvention callConv=compilationInterface.getRuntimeHelperCallingConvention(helperId); - switch (callConv){ - case CompilationInterface::CallingConvention_Drl: - return &CallingConvention_DRL; - case CompilationInterface::CallingConvention_Stdcall: - return &CallingConvention_STDCALL; - case CompilationInterface::CallingConvention_Cdecl: - return &CallingConvention_CDECL; - default: - assert(0); - return NULL; - } +const CallingConvention* IRManager::getCallingConvention(MethodDesc * methodDesc) const +{ + return getDefaultManagedCallingConvention(); } -//_________________________________________________________________________________________________ -const CallingConvention * IRManager::getCallingConvention(MethodDesc * methodDesc)const +const CallingConvention* IRManager::getDefaultManagedCallingConvention(void) const { - return &CallingConvention_DRL; + return getCallingConvention(compilationInterface.getManagedCallingConvention()); +} + +const CallingConvention* IRManager::getCallingConvention( + CompilationInterface::RuntimeHelperId helperId) const +{ + return getCallingConvention(compilationInterface.getRuntimeHelperCallingConvention(helperId)); +} + +const CallingConvention* IRManager::getCallingConvention( + CompilationInterface::VmCallingConvention vmCC) +{ + switch(vmCC) { + case CompilationInterface::CallingConvention_Drl: + return &CallingConvention_DRL; + case CompilationInterface::CallingConvention_DRLFast: + return &CallingConvention_DRLFast; + case CompilationInterface::CallingConvention_Stdcall: + return &CallingConvention_STDCALL; + case CompilationInterface::CallingConvention_Cdecl: + return &CallingConvention_CDECL; + case CompilationInterface::CallingConvention_Fastcall: + return &CallingConvention_Fastcall; + default: + assert(0); + } + return NULL; } -//_________________________________________________________________________________________________ Opnd * IRManager::defArg(Type * type, uint32 position) { assert(NULL != entryPointInst); @@ -1267,24 +1280,31 @@ Opnd * IRManager::getRegOpnd(RegName reg } return regOpnds[idx]; } -void IRManager::calculateTotalRegUsage(OpndKind regKind) { - assert(regKind == OpndKind_GPReg); +void IRManager::calculateTotalRegUsage(OpndKind regKind) +{ + assert(regKind == OpndKind_GPReg || regKind == OpndKind_XMMReg); + uint32& usage = regKind == OpndKind_GPReg ? gpTotalRegUsage : xmmTotalRegUsage; uint32 opndCount=getOpndCount(); for (uint32 i=0; iisPlacedIn(regKind)) - gpTotalRegUsage |= getRegMask(opnd->getRegName()); + if (opnd->isPlacedIn(regKind)) { + usage |= getRegMask(opnd->getRegName()); + } } -#ifdef _EM64T_ + #ifdef _EM64T_ //FIXME!! - gpTotalRegUsage |= getRegMask(RegName_R15); - gpTotalRegUsage |= getRegMask(RegName_R13); - gpTotalRegUsage |= getRegMask(RegName_R14); -#endif + if (regKind == OpndKind_GPReg) { + gpTotalRegUsage |= getRegMask(RegName_R15); + gpTotalRegUsage |= getRegMask(RegName_R13); + gpTotalRegUsage |= getRegMask(RegName_R14); + } + #endif } //_________________________________________________________________________________________________ uint32 IRManager::getTotalRegUsage(OpndKind regKind)const { - return gpTotalRegUsage; + assert(regKind == OpndKind_GPReg || regKind == OpndKind_XMMReg); + const uint32& usage = regKind == OpndKind_GPReg ? gpTotalRegUsage : xmmTotalRegUsage; + return usage; } //_________________________________________________________________________________________________ bool IRManager::isPreallocatedRegOpnd(Opnd * opnd) @@ -1498,16 +1518,14 @@ void IRManager::calculateLivenessInfo() for (Nodes::const_iterator it = nodes.begin(),end = nodes.end();it!=end; ++it) { CGNode* node = (CGNode*)*it; if (node == exitNode) { -#ifndef _EM64T_ - if (!methodDesc.isStatic() - && (methodDesc.isSynchronized() || methodDesc.isMethodClassIsLikelyExceptionType())) - { + if (methodNeedsImmortalThis(methodDesc)) { BitSet * exitLs = node->getLiveAtEntry(); EntryPointPseudoInst * entryPointInst = getEntryPointInst(); - Opnd * thisOpnd = entryPointInst->getOpnd(0); - exitLs->setBit(thisOpnd->getId(), true); - } -#endif + Opnd * thisOpnd = entryPointInst->thisOpnd; + if (thisOpnd != NULL) { + exitLs->setBit(thisOpnd->getId(), true); + } + } continue; } bool processNode = true; @@ -1647,12 +1665,21 @@ void IRManager::resetOpndConstraints() void IRManager::finalizeCallSites() { -#ifdef _EM64T_ if (!getMethodDesc().isStatic()) { - entryPointInst->thisOpnd = newMemOpnd(getTypeFromTag(Type::UnmanagedPtr), MemOpndKind_StackAutoLayout, getRegOpnd(STACK_REG), 0); - entryPointInst->getBasicBlock()->appendInst(newCopyPseudoInst(Mnemonic_MOV, entryPointInst->thisOpnd, entryPointInst->getOpnd(0))); + Opnd* opnd0 = entryPointInst->getOpnd(0); + + if (methodNeedsImmortalThis(methodDesc) && opnd0->isPlacedIn(OpndKind_GPReg)) { + //Type* uptrType = getTypeFromTag(Type::UnmanagedPtr); + Type* thizType = opnd0->getType(); + Opnd* stackReg = getRegOpnd(STACK_REG); + entryPointInst->thisOpnd = newMemOpnd(thizType, MemOpndKind_StackAutoLayout, stackReg, 0); + Inst* ii = newCopyPseudoInst(Mnemonic_MOV, entryPointInst->thisOpnd, opnd0); + entryPointInst->getBasicBlock()->appendInst(ii); + } + else { + entryPointInst->thisOpnd = opnd0; + } } -#endif const Nodes& nodes = fg->getNodes(); for (Nodes::const_iterator it = nodes.begin(), end = nodes.end(); it!=end; ++it) { @@ -2235,6 +2262,20 @@ void SessionAction::computeDominators(vo cfg->setDominatorTree(dominatorTree); } +bool IRManager::methodNeedsImmortalThis(MethodDesc& method) +{ + if (method.isStatic()) { + return false; + } + if (method.isSynchronized()) { + return true; + } + if (method.isMethodClassIsLikelyExceptionType() && + method.isInstanceInitializer()) { + return true; + } + return false; +} } //namespace Ia32 } //namespace Jitrino diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h b/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h index ab0621a..ee03ac6 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h +++ b/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h @@ -255,11 +255,22 @@ public: void setHasNonExceptionCalls(){ hasNonExceptionCalls=true; } bool getHasNonExceptionCalls()const{ return hasNonExceptionCalls; } //----------------------------------------------------------------------------------------------- - const CallingConvention * getCallingConvention(CompilationInterface::RuntimeHelperId helperId)const; - - const CallingConvention * getCallingConvention(MethodDesc * methodDesc)const; - - const CallingConvention * getDefaultManagedCallingConvention() const { return &CallingConvention_DRL; } + /** + * Returns calling convention for the given runtime helper. + */ + const CallingConvention * getCallingConvention(CompilationInterface::RuntimeHelperId helperId) const; + /** + * Returns calling convention for the given method. + */ + const CallingConvention * getCallingConvention(MethodDesc * methodDesc) const; + /** + * Returns default calling convention for managed code. + */ + const CallingConvention * getDefaultManagedCallingConvention(void) const; + /** + * Returns CallingConvention object for the given VmCallingConvention id. + */ + static const CallingConvention * getCallingConvention(CompilationInterface::VmCallingConvention vmCC); EntryPointPseudoInst * getEntryPointInst()const { return entryPointInst; } @@ -420,6 +431,8 @@ public: uint32 getLayoutOpndAlignment(Opnd * opnd); void finalizeCallSites(); + static bool methodNeedsImmortalThis(MethodDesc& method); + /** Calculates dislacement from stack entry point for every instruction. */ @@ -497,6 +510,15 @@ protected: OpndVector opnds; uint32 gpTotalRegUsage; + /** + * A mask that describes XMM registers usage across CFG. + * + * The value is cached and may be retrieved with getTotalRegUsage(). + * + * @note Only valid after calculateTotalRegUsage call and may become + * invalid on subsequent operations that modify CFG/operands. + */ + uint32 xmmTotalRegUsage; EntryPointPseudoInst * entryPointInst; diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp index 7bf6805..0582608 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp @@ -16,7 +16,6 @@ */ /** * @author Vyacheslav P. Shakin - * @version $Revision: 1.14.12.1.4.4 $ */ #include "Ia32Inst.h" @@ -882,16 +881,20 @@ void CallingConventionClient::layoutAuxi //========================================================================================================= // class EntryPointPseudoInst //========================================================================================================= -EntryPointPseudoInst::EntryPointPseudoInst(IRManager * irm, int id, const CallingConvention * cc) -#ifdef _EM64T_ - : Inst(Mnemonic_NULL, id, Inst::Form_Extended), thisOpnd(0), callingConventionClient(irm->getMemoryManager(), cc) -#else - : Inst(Mnemonic_NULL, id, Inst::Form_Extended), callingConventionClient(irm->getMemoryManager(), cc) -#endif -{ kind=Kind_EntryPointPseudoInst; callingConventionClient.setOwnerInst(this); } -//_________________________________________________________________________________________________________ +EntryPointPseudoInst::EntryPointPseudoInst( + IRManager * irm, + int id, + const CallingConvention * cc) : + Inst(Mnemonic_NULL, id, Inst::Form_Extended), + thisOpnd(NULL), + callingConventionClient(irm->getMemoryManager(), cc) +{ + kind=Kind_EntryPointPseudoInst; + callingConventionClient.setOwnerInst(this); +} + Opnd * EntryPointPseudoInst::getDefArg(uint32 i)const -{ +{ return NULL; } diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h b/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h index 381b03c..2dad54a 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h +++ b/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h @@ -16,7 +16,6 @@ */ /** * @author Vyacheslav P. Shakin - * @version $Revision: 1.18.12.2.4.3 $ */ #ifndef _IA32_INST_H_ @@ -1170,7 +1169,7 @@ public: void pushInfo(Inst::OpndRole role, Type::Tag typeTag) { CallingConvention::OpndInfo info; - info.typeTag=(uint32)typeTag; info.slotCount=0; + info.typeTag=typeTag; info.slotCount=0; StlVector & infos = getInfos(role); infos.push_back(info); } @@ -1225,11 +1224,10 @@ public: const CallingConventionClient& getCallingConventionClient()const { return callingConventionClient; } virtual bool hasSideEffect()const { return true; } - -#ifdef _EM64T_ - Opnd * thisOpnd; -#endif - //-------------------------------------------------------------------- + /** + * Keeps this operand for instance methods. NULL for static ones. + */ + Opnd* thisOpnd; protected: CallingConventionClient callingConventionClient; diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32InternalTrace.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32InternalTrace.cpp index 9089111..606785a 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32InternalTrace.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32InternalTrace.cpp @@ -52,7 +52,6 @@ static inline void m_assert(bool cond) } static MemoryManager mm(0x100, "printRuntimeOpndInternalHelper"); -static DrlVMTypeManager *tm = NULL; void __stdcall methodEntry(const char * methodName, uint32 argInfoCount, CallingConvention::OpndInfo * argInfos) { @@ -62,12 +61,15 @@ void __stdcall methodEntry(const char * #else context.esp=(POINTER_SIZE_INT)(&methodName+sizeof(POINTER_SIZE_INT)); // must point to the beginning of incoming stack args #endif - ::std::ostream & os=Log::cat_rt()->out(); + ::std::ostream & os=Log::cat_rt()->out(); os<<"__METHOD_ENTRY__:"<init(); } @@ -92,6 +94,7 @@ void __stdcall methodEntry(const char * GCMap::checkObject(*tm, *(const void**)(const void*)arg); } } +#endif } //____________________________ _____________________________________________________________________ @@ -147,10 +150,10 @@ void InternalTrace::runImpl() argInfoOpnd, }; Inst * internalTraceInst=irManager->newInternalRuntimeHelperCallInst("itrace_method_entry", 3, args, NULL); - internalTraceInst->insertBefore(inst); + internalTraceInst->insertAfter(inst); }else{ - Opnd * args[3]={ methodNameOpnd, - irManager->newImmOpnd(irManager->getTypeManager().getInt32Type(), 0), + Opnd * args[3]={ methodNameOpnd, + irManager->newImmOpnd(irManager->getTypeManager().getInt32Type(), 0), irManager->newImmOpnd(irManager->getTypeManager().getUnmanagedPtrType(irManager->getTypeManager().getIntPtrType()), 0), }; Inst * internalTraceInst=irManager->newInternalRuntimeHelperCallInst("itrace_method_entry", 3, args, NULL); diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32Printer.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32Printer.cpp index 58fc312..f7d3b81 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32Printer.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32Printer.cpp @@ -16,7 +16,6 @@ */ /** * @author Vyacheslav P. Shakin - * @version $Revision: 1.11.14.2.4.3 $ */ #include "Ia32Printer.h" @@ -109,12 +108,23 @@ void IRPrinter::printCFG(uint32 indent) { assert(irManager!=NULL); std::ostream& os = getStream(); - const Nodes& nodes = irManager->getFlowGraph()->getNodesPostOrder(); - //topological ordering - for (Nodes::const_reverse_iterator it = nodes.rbegin(), end = nodes.rend(); it!=end; ++it) { - Node* node = *it; - printNode(node, indent); - os<getFlowGraph(); + BasicBlock* entry = (BasicBlock*)cfg->getEntryNode(); + if (entry->getLayoutSucc() == NULL) { + // No layout was performed yet + const Nodes& nodes = cfg->getNodesPostOrder(); + //topological ordering + for (Nodes::const_reverse_iterator it = nodes.rbegin(), end = nodes.rend(); it!=end; ++it) { + Node* node = *it; + printNode(node, indent); + os<getLayoutSucc()) { + printNode(bb, indent); + os<isBlockNode()) { os << "BB_"; - } else if (node->isDispatchNode()) { + } else if (irManager && node->isDispatchNode()) { if (node!=irManager->getFlowGraph()->getUnwindNode()) { os << "DN_"; } else { @@ -140,9 +150,9 @@ void IRPrinter::printNodeName(const Node os << "EN_"; } os << node->getId(); - if (node == irManager->getFlowGraph()->getEntryNode()) { + if (irManager && node == irManager->getFlowGraph()->getEntryNode()) { os << "_prolog"; - } else if (irManager->isEpilog(node)) { + } else if (irManager && irManager->isEpilog(node)) { os << "_epilog"; } } @@ -1310,17 +1320,16 @@ void dumpIR( std::ostream& out = Log::log(LogStream::IRDUMP).out(); out << "-------------------------------------------------------------" << ::std::endl; - char title[128]; - strcpy(title, readablePrefix); - strcat(title, readableStageName); + char title[128] = {0}; + snprintf(title, sizeof(title)-1, "%s%s", readablePrefix, readableStageName); - char subKind[128]; - assert(subKind1!=NULL); - strcpy(subKind, subKind1); - if (subKind2!=NULL && subKind2[0]!=0){ - strcat(subKind, "."); - strcat(subKind, subKind2); + const char* format = "%s"; + if (subKind2!=NULL && subKind2[0]!=0) { + format = "%s.%s"; } + char subKind[128] = {0}; + assert(subKind1!=NULL); + snprintf(subKind, sizeof(subKind)-1, format, subKind1, subKind2); Log::printIRDumpBegin(out, stageId, readableStageName, subKind); if (subKind2!=0){ diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32RuntimeInterface.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32RuntimeInterface.cpp index 4f16150..9c9a961 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32RuntimeInterface.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32RuntimeInterface.cpp @@ -16,7 +16,6 @@ */ /** * @author Intel, Nikolay A. Sidelnikov - * @version $Revision: 1.8.14.2.4.4 $ */ #include "Ia32IRManager.h" @@ -47,16 +46,26 @@ void* RuntimeInterface::getAddressOfThis return (void*)&default_this; } assert(context); - StackInfo stackInfo; + void** pip = NULL; + POINTER_SIZE_INT sp = 0; #ifdef _EM64T_ - stackInfo.read(methodDesc, *context->p_rip, isFirst); - assert(isFirst || (POINTER_SIZE_INT)context->p_rip+8 == context->rsp); - return (void *)(context->rsp + stackInfo.getStackDepth() + (int)stackInfo.getOffsetOfThis()); + pip = (void**)context->p_rip; + sp = context->rsp; #else - stackInfo.read(methodDesc, *context->p_eip, isFirst); - assert(isFirst || (uint32)context->p_eip+4 == context->esp); - return (void *)(context->esp + stackInfo.getStackDepth() + stackInfo.getOffsetOfThis()); + pip = (void**)context->p_eip; + sp = context->esp; #endif + + void* ip = *pip; + +#ifdef _DEBUG + char* addr = (char*)methodDesc->getCodeBlockAddress(0); + unsigned len = methodDesc->getCodeBlockSize(0); + assert(addr<=ip && ip<(addr+len)); +#endif + StackInfo stackInfo; + stackInfo.read(methodDesc, (POINTER_SIZE_INT)ip, isFirst); + return (void *)(sp + stackInfo.getStackDepth() + stackInfo.getOffsetOfThis()); } void RuntimeInterface::fixHandlerContext(MethodDesc* methodDesc, JitFrameContext* context, bool isFirst) @@ -70,7 +79,10 @@ void RuntimeInterface::fixHandlerContex stackInfo.fixHandlerContext(context); } -bool RuntimeInterface::getBcLocationForNative(MethodDesc* method, uint64 native_pc, uint16 *bc_pc) +bool RuntimeInterface::getBcLocationForNative( + MethodDesc* method, + uint64 native_pc, + uint16 *bc_pc) { StackInfo stackInfo; @@ -78,36 +90,41 @@ bool RuntimeInterface::getBcLocationForN uint32 stackInfoSize = stackInfo.readByteSize(infoBlock); uint32 gcMapSize = GCMap::readByteSize(infoBlock + stackInfoSize); - const char* methName; - uint64 bcOffset = BcMap::get_bc_location_for_native(native_pc, infoBlock + stackInfoSize + gcMapSize); if (bcOffset != ILLEGAL_VALUE) { *bc_pc = (uint16)bcOffset; return true; - } else if (Log::isLogEnabled(LogStream::RT)) { - methName = method->getName(); - Log::log(LogStream::RT) << "Byte code for method: " << methName << " IP = " << native_pc - << " not found " << std::endl; + } + if (Log::isLogEnabled(LogStream::RT)) { + Log::log(LogStream::RT) + << "Byte code for method: " << method->getName() + << " IP = " << (void*)(POINTER_SIZE_INT)native_pc + << " not found " << std::endl; } return false; } -bool RuntimeInterface::getNativeLocationForBc(MethodDesc* method, uint16 bc_pc, uint64 *native_pc) { + +bool RuntimeInterface::getNativeLocationForBc( + MethodDesc* method, + uint16 bc_pc, + uint64 *native_pc) +{ StackInfo stackInfo; Byte* infoBlock = method->getInfoBlock(); uint32 stackInfoSize = stackInfo.readByteSize(infoBlock); uint32 gcMapSize = GCMap::readByteSize(infoBlock + stackInfoSize); - const char* methName; - uint64 ncAddr = BcMap::get_native_location_for_bc(bc_pc, infoBlock + stackInfoSize + gcMapSize); if (ncAddr != ILLEGAL_VALUE) { *native_pc = ncAddr; return true; - } else if (Log::isLogEnabled(LogStream::RT)) { - methName = method->getName(); - Log::log(LogStream::RT) << "Byte code for method: " << methName << " IP = " << native_pc - << " not found " << std::endl; + } + if (Log::isLogEnabled(LogStream::RT)) { + Log::log(LogStream::RT) + << "Address for method: " << method->getName() + << " PC = " << bc_pc + << " not found " << std::endl; } return false; } diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp index 204aa06..ef6f191 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp @@ -16,7 +16,6 @@ */ /** * @author Intel, Nikolay A. Sidelnikov - * @version $Revision: 1.14.14.3.4.4 $ */ #include "Ia32IRManager.h" @@ -270,6 +269,19 @@ void StackInfo::unwind(MethodDesc* pMeth context->p_ebx = (POINTER_SIZE_INT *) offset; } context->esp += offset_step; //IP register size + + char* xmmBase = (char*)(context->esp + xmmcalleeOffset); + + for (unsigned i=0; ip_xmms[i] = (double*)xmmBase; + xmmBase -= sizeof(double); + } + #endif } @@ -336,28 +348,25 @@ void StackInfo::fixHandlerContext(JitFra #endif } -void StackInfo::registerInsts(IRManager& irm) +void StackInfo::registerInsts(IRManager& irm) { - if (!irm.getMethodDesc().isStatic()) { -#ifdef _EM64T_ - EntryPointPseudoInst * entryPointInst = irm.getEntryPointInst(); + EntryPointPseudoInst * entryPointInst = irm.getEntryPointInst(); + if (!irm.getMethodDesc().isStatic() && + entryPointInst->thisOpnd->isPlacedIn(OpndKind_Mem)) { offsetOfThis = (uint32)entryPointInst->thisOpnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement)->getImmValue(); -#else - EntryPointPseudoInst * entryPointInst = irm.getEntryPointInst(); - offsetOfThis = (uint32)entryPointInst->getOpnd(0)->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement)->getImmValue(); -#endif } const Nodes& nodes = irm.getFlowGraph()->getNodes(); for (Nodes::const_iterator it = nodes.begin(), end = nodes.end(); it!=end; ++it) { Node* node = *it; - if (node->isBlockNode()){ - for (Inst * inst=(Inst*)node->getFirstInst(); inst!=NULL; inst=inst->getNextInst()){ - if(inst->getMnemonic() == Mnemonic_CALL) { - (*stackDepthInfo)[(POINTER_SIZE_INT)inst->getCodeStartAddr()+inst->getCodeSize()]= - StackDepthInfo(((CallInst *)inst)->getCallingConventionClient().getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask(), - inst->getStackDepth(), - inst->getCodeSize()); - } + if (!node->isBlockNode()) { + continue; + } + for (Inst * inst=(Inst*)node->getFirstInst(); inst!=NULL; inst=inst->getNextInst()){ + if(inst->getMnemonic() == Mnemonic_CALL) { + (*stackDepthInfo)[(POINTER_SIZE_INT)inst->getCodeStartAddr()+inst->getCodeSize()]= + StackDepthInfo(((CallInst *)inst)->getCallingConventionClient().getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask(), + inst->getStackDepth(), + inst->getCodeSize()); } } } diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.h b/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.h index 4c67a83..6129b38 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.h +++ b/vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.h @@ -16,7 +16,6 @@ */ /** * @author Intel, Nikolay A. Sidelnikov - * @version $Revision: 1.13.14.3.4.4 $ */ #ifndef _IA32_STACK_INFO_H_ @@ -91,9 +90,8 @@ public: itraceMethodExitString(0), eipOffset(0), icalleeMask(0),icalleeOffset(0), - fcallee(0),foffset(0), - acallee(0),aoffset(0), - localOffset(0), + xmmcalleeMask(0), xmmcalleeOffset(0), + localOffset(0), calleeSaveRegsMask(0), stackDepth(-1), offsetOfThis(0){ stackDepthInfo = new(mm) DepthMap(mm);} @@ -103,9 +101,8 @@ public: itraceMethodExitString(0), eipOffset(0), icalleeMask(0),icalleeOffset(0), - fcallee(0),foffset(0), - acallee(0),aoffset(0), - localOffset(0), + xmmcalleeMask(0), xmmcalleeOffset(0), + localOffset(0), stackDepthInfo(NULL), calleeSaveRegsMask(0), stackDepth(-1),offsetOfThis(0) {} @@ -154,20 +151,14 @@ public: int getRetEIPOffset() const {return eipOffset;} uint32 getIntCalleeMask() const {return icalleeMask;} - + int getIntCalleeOffset() const {return icalleeOffset;} - - uint32 getFPCalleeMask() const {return fcallee;} - - int getFPCalleeOffset() const {return foffset;} - uint32 getApplCalleeMask() const {return acallee;} - - int getApplCalleeOffset() const {return aoffset;} + uint32 getXmmCalleeMask() const {return xmmcalleeMask;} int getLocalOffset() const {return localOffset;} - uint32 getOffsetOfThis() const {return offsetOfThis;} + int getOffsetOfThis() const {return offsetOfThis;} /** returns byte size of StackInfo data */ @@ -192,19 +183,16 @@ private: uint32 icalleeMask; int icalleeOffset; - uint32 fcallee; - int foffset; - - uint32 acallee; - int aoffset; + uint32 xmmcalleeMask; + int xmmcalleeOffset; int localOffset; - + DepthMap * stackDepthInfo; uint32 calleeSaveRegsMask; int stackDepth; - uint32 offsetOfThis; + int offsetOfThis; friend class StackLayouter; }; diff --git a/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp b/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp index 6af283a..41e18a2 100644 --- a/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp +++ b/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp @@ -16,7 +16,6 @@ */ /** * @author Intel, Nikolay A. Sidelnikov - * @version $Revision: 1.10.14.1.4.4 $ */ #include "Ia32IRManager.h" @@ -104,25 +103,22 @@ protected: int getLocalBase(){ return localBase; } int getLocalEnd(){ return localEnd; } - int getApplCalleeBase(){ return acalleeBase; } - int getApplCalleeEnd(){ return acalleeEnd; } int getFloatCalleeBase(){ return fcalleeBase; } int getFloatCalleeEnd(){ return fcalleeEnd; } int getIntCalleeBase(){ return icalleeBase; } int getIntCalleeEnd(){ return icalleeEnd; } int getRetEIPBase(){ return retEIPBase; } int getRetEIPEnd(){ return retEIPEnd; } - int getInArgBase(){ return inargBase; } + int getInArgBase(){ return inargBase; } int getInArgEnd(){ return inargEnd; } uint32 getFrameSize(){ return frameSize; } - uint32 getOutArgSize(){ return outArgSize; } + + static uint32 count_bits(unsigned mask); int localBase; int localEnd; int fcalleeBase; int fcalleeEnd; - int acalleeBase; - int acalleeEnd; int icalleeBase; int icalleeEnd; @@ -131,26 +127,32 @@ protected: int inargBase; int inargEnd; uint32 frameSize; - uint32 outArgSize; #ifdef _EM64T_ bool stackCorrection; #endif StackInfo * stackInfo; - - MemoryManager memoryManager; - + Opnd* allocatedStackSizeOpnd; }; static ActionFactory _stack("stack"); +uint32 StackLayouter::count_bits(unsigned mask) +{ + uint32 count = 0; + for (uint32 i=0, probe=1; iicalleeMask = methodCC->getCalleeSavedRegs(OpndKind_GPReg).getMask() & irm.getTotalRegUsage(OpndKind_GPReg); + + irm.calculateTotalRegUsage(OpndKind_XMMReg); + stackInfo->xmmcalleeMask = methodCC->getCalleeSavedRegs(OpndKind_XMMReg).getMask(); createProlog(); createEpilog(); irm.calculateStackDepth(); @@ -186,12 +193,8 @@ void StackLayouter::runImpl() //fill StackInfo object stackInfo->frameSize = getFrameSize(); - stackInfo->icalleeMask = irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask() & irm.getTotalRegUsage(OpndKind_GPReg); stackInfo->icalleeOffset = getIntCalleeBase(); - stackInfo->fcallee = irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_FPReg).getMask(); - stackInfo->foffset = getFloatCalleeBase(); - stackInfo->acallee = 0; //VSH: TODO - get rid off appl regs irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_ApplicationReg); - stackInfo->aoffset = getApplCalleeBase(); + stackInfo->xmmcalleeOffset = getFloatCalleeBase(); stackInfo->localOffset = getLocalBase(); stackInfo->eipOffset = getRetEIPBase(); } @@ -209,6 +212,7 @@ void StackLayouter::checkUnassignedOpnds void StackLayouter::createProlog() { IRManager & irm=getIRManager(); + Type* int32type = irm.getTypeManager().getInt32Type(); for (uint32 i = 0; igetRefCount() && opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) { Opnd * dispOpnd=opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); if (dispOpnd==NULL){ - dispOpnd=irm.newImmOpnd(irm.getTypeManager().getInt32Type(), 0); + dispOpnd=irm.newImmOpnd(int32type, 0); opnd->setMemOpndSubOpnd(MemOpndSubOpndKind_Displacement, dispOpnd); } dispOpnd->assignImmValue(0); @@ -231,54 +235,38 @@ void StackLayouter::createProlog() retEIPEnd = inargBase = offset; - uint32 slotSize=sizeof(POINTER_SIZE_INT); + uint32 slotSize=sizeof(POINTER_SIZE_INT); EntryPointPseudoInst * entryPointInst = irManager->getEntryPointInst(); + assert(entryPointInst); assert(entryPointInst->getNode()==irManager->getFlowGraph()->getEntryNode()); - if (entryPointInst) {//process entry-point instruction - const StlVector& stackOpndInfos = - ((const EntryPointPseudoInst*)entryPointInst)->getCallingConventionClient().getStackOpndInfos(Inst::OpndRole_Def); - for (uint32 i=0, n=stackOpndInfos.size(); i& stackOpndInfos = + ((const EntryPointPseudoInst*)entryPointInst)->getCallingConventionClient().getStackOpndInfos(Inst::OpndRole_Def); + + for (uint32 i=0, n=stackOpndInfos.size(); igetCallingConventionClient().getCallingConvention()->pushLastToFirst() ? - ((n-1)*slotSize-stackOpndInfos[i].offset): + !entryPointInst->getCallingConventionClient().getCallingConvention()->pushLastToFirst() ? + ((n-1)*slotSize-stackOpndInfos[i].offset): #endif - stackOpndInfos[i].offset; + stackOpndInfos[i].offset; - Opnd * opnd=entryPointInst->getOpnd(stackOpndInfos[i].opndIndex); - Opnd * disp = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); - disp->assignImmValue(offset+argOffset); - } + Opnd * opnd=entryPointInst->getOpnd(stackOpndInfos[i].opndIndex); + Opnd * disp = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); + disp->assignImmValue(offset+argOffset); } - inargEnd = offset; icalleeEnd = offset = 0; - uint32 calleeSavedRegs=irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask(); - - uint32 usageRegMask = irManager->getTotalRegUsage(OpndKind_GPReg); - Inst * lastPush = NULL; -#ifdef _EM64T_ - //-------------------------------- - unsigned counter = 0; - for (uint32 reg = RegName_R15; reg >= RegName_RAX ; reg--) { + for (uint32 reg = LAST_GP_REG; reg>=FIRST_GP_REG; reg--) {//push callee-save registers onto stack uint32 mask = getRegMask((RegName)reg); - if((mask & calleeSavedRegs) && (usageRegMask & mask)) counter++; - } - //-------------------------------- - for (uint32 reg = RegName_R15; reg >= RegName_RAX ; reg--) { -#else - for (uint32 reg = RegName_EDI; reg>=RegName_EAX; reg--) {//push callee-save registers onto stack -#endif - uint32 mask = getRegMask((RegName)reg); - if((mask & calleeSavedRegs) && (usageRegMask & mask)) { + if(stackInfo->icalleeMask & mask) { Inst * inst = irm.newInst(Mnemonic_PUSH, irm.getRegOpnd((RegName)reg)); if (!lastPush) lastPush = inst; @@ -287,18 +275,28 @@ void StackLayouter::createProlog() } } #ifdef _EM64T_ + unsigned counter = count_bits(stackInfo->icalleeMask); if(!(counter & 1)) { Opnd * rsp = irManager->getRegOpnd(STACK_REG); - Type* uint64_type = irManager->getTypeFromTag(Type::UInt64); - Inst* newIns = irManager->newInst(Mnemonic_SUB,rsp,irManager->newImmOpnd(uint64_type, slotSize)); + Type* int8type = irManager->getTypeFromTag(Type::Int8); + Inst* newIns = irManager->newInst(Mnemonic_SUB, rsp, irManager->newImmOpnd(int8type, slotSize)); newIns->insertAfter(entryPointInst); offset -= slotSize; stackCorrection = 1; } #endif - icalleeBase = fcalleeEnd = fcalleeBase = acalleeEnd = acalleeBase = localEnd = offset; - + fcalleeBase = offset; + // + // Calculate XMM stack to save. The spill code is generated below, + // after the stack frame get created (SUB esp, frameSize) + // + uint32 numXmmsToSave = count_bits(stackInfo->xmmcalleeMask); + uint32 xmmStackBytes = numXmmsToSave * sizeof(double); + // + offset -= xmmStackBytes; + // + icalleeBase = fcalleeEnd = localEnd = offset; IRManager::AliasRelation * relations = new(irm.getMemoryManager()) IRManager::AliasRelation[irm.getOpndCount()]; irm.getAliasRelations(relations);// retrieve relations no earlier than all memory locations are assigned @@ -346,50 +344,93 @@ void StackLayouter::createProlog() localBase = offset; - if (localEnd>localBase) { - Inst* newIns = irm.newInst(Mnemonic_SUB, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd-localBase)); - newIns->insertAfter(lastPush ? lastPush : entryPointInst); + Inst* instSubEsp = NULL; + if (fcalleeBase>localBase) { + allocatedStackSizeOpnd = irm.newImmOpnd(int32type, fcalleeBase-localBase); + instSubEsp = irm.newInst(Mnemonic_SUB, irm.getRegOpnd(STACK_REG), allocatedStackSizeOpnd); + instSubEsp->insertAfter(lastPush ? lastPush : entryPointInst); } frameSize = icalleeEnd -localBase; -} + // + // Save XMMs + // + Opnd* stackReg = irManager->getRegOpnd(STACK_REG); + + Type* double_type = irManager->getTypeFromTag(Type::Double); + + uint32 xmmOff = fcalleeBase - sizeof(double); + + for (uint32 xmm = FIRST_XMM_REG; xmm <= LAST_XMM_REG; xmm++) { + uint32 mask = getRegMask((RegName)xmm); + if (!(stackInfo->xmmcalleeMask & mask)) { + continue; + } + Opnd* xmmReg = irManager->newRegOpnd(double_type, (RegName)xmm); + Opnd* spillSlot = irManager->newMemOpnd(double_type, MemOpndKind_StackAutoLayout, stackReg, xmmOff); + Inst* ii = irManager->newInst(Mnemonic_MOVQ, spillSlot, xmmReg); + ii->insertAfter(instSubEsp); + xmmOff -= sizeof(double); + } + +} void StackLayouter::createEpilog() -{ // Predeccessors of en and irm.isEpilog(en->pred) +{ + Opnd* stackReg = irManager->getRegOpnd(STACK_REG); + Type* double_type = irManager->getTypeFromTag(Type::Double); + // Predeccessors of en and irm.isEpilog(en->pred) IRManager & irm=getIRManager(); - uint32 calleeSavedRegs=irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask(); const Edges& inEdges = irm.getFlowGraph()->getExitNode()->getInEdges(); - uint32 usageRegMask = irManager->getTotalRegUsage(OpndKind_GPReg); for (Edges::const_iterator ite = inEdges.begin(), ende = inEdges.end(); ite!=ende; ++ite) { Edge* edge = *ite; - if (irm.isEpilog(edge->getSourceNode())) { - Node * epilog = edge->getSourceNode(); - Inst * retInst=(Inst*)epilog->getLastInst(); - assert(retInst->hasKind(Inst::Kind_RetInst)); - if (localEnd>localBase) { - //restore stack pointer - Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd-localBase)); - newIns->insertBefore(retInst); - } -#ifdef _EM64T_ - for (uint32 reg = RegName_R15; reg >= RegName_RAX ; reg--) {//pop callee-save registers -#else - for (uint32 reg = RegName_EDI; reg >= RegName_EAX ; reg--) {//pop callee-save registers -#endif - uint32 mask = getRegMask((RegName)reg); - if ((mask & calleeSavedRegs) && (usageRegMask & mask)) { - Inst* newIns = irm.newInst(Mnemonic_POP, irm.getRegOpnd((RegName)reg)); - newIns->insertBefore(retInst); - } + if (!irm.isEpilog(edge->getSourceNode())) { + continue; + } + Node * epilog = edge->getSourceNode(); + Inst * retInst=(Inst*)epilog->getLastInst(); + assert(retInst->hasKind(Inst::Kind_RetInst)); + // + // Restore XMMs + // + uint32 xmmOff = fcalleeBase - sizeof(double); + + for (uint32 xmm = FIRST_XMM_REG; xmm <= LAST_XMM_REG; xmm ++) { + uint32 mask = getRegMask((RegName)xmm); + if (!(stackInfo->xmmcalleeMask & mask)) { + continue; } -#ifdef _EM64T_ - if (stackCorrection) {//restore stack pointer - Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), sizeof(POINTER_SIZE_INT))); + Opnd* xmmReg = irManager->newRegOpnd(double_type, (RegName)xmm); + Opnd* spillSlot = irManager->newMemOpnd(double_type, MemOpndKind_StackAutoLayout, stackReg, xmmOff); + Inst* ii = irManager->newInst(Mnemonic_MOVQ, xmmReg, spillSlot); + ii->insertBefore(retInst); + xmmOff -= sizeof(double); + } + // + // Remove allocated stack frame + // + if (allocatedStackSizeOpnd != NULL) { + //restore stack pointer + Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), allocatedStackSizeOpnd); + newIns->insertBefore(retInst); + } + // + // pop callee-save registers + // + for (uint32 reg = LAST_GP_REG; reg >= FIRST_GP_REG ; reg--) { + uint32 mask = getRegMask((RegName)reg); + if (mask & stackInfo->icalleeMask) { + Inst* newIns = irm.newInst(Mnemonic_POP, irm.getRegOpnd((RegName)reg)); newIns->insertBefore(retInst); } -#endif } +#ifdef _EM64T_ + if (stackCorrection) {//restore stack pointer + Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), sizeof(POINTER_SIZE_INT))); + newIns->insertBefore(retInst); + } +#endif } } diff --git a/vm/jitrino/src/jet/cg_dbg.cpp b/vm/jitrino/src/jet/cg_dbg.cpp index 9d39d81..e276919 100644 --- a/vm/jitrino/src/jet/cg_dbg.cpp +++ b/vm/jitrino/src/jet/cg_dbg.cpp @@ -83,6 +83,11 @@ void __stdcall CodeGen::dbg_trace_arg(vo void CodeGen::gen_dbg_check_stack(bool start) { + if (m_infoBlock.get_bc_size() == 1 && m_bc[0] == OPCODE_RETURN && !g_jvmtiMode) { + // empty method, nothing to do; the same is in gen_return(); + return; + } + if (start) { // We store SP before a code to be checked ... st(jobj, sp, m_base, voff(m_stack.dbg_scratch())); diff --git a/vm/jitrino/src/jet/jet.cpp b/vm/jitrino/src/jet/jet.cpp index 05de246..4a01524 100644 --- a/vm/jitrino/src/jet/jet.cpp +++ b/vm/jitrino/src/jet/jet.cpp @@ -16,7 +16,6 @@ */ /** * @author Alexander Astapchuk - * @version $Revision: 1.7.12.4.2.8 $ */ /** * @file diff --git a/vm/jitrino/src/jet/rt.cpp b/vm/jitrino/src/jet/rt.cpp index 4ed8719..fda4831 100644 --- a/vm/jitrino/src/jet/rt.cpp +++ b/vm/jitrino/src/jet/rt.cpp @@ -190,8 +190,8 @@ void rt_unwind(JIT_Handle jit, Method_Ha } #else // - // Highly optimized version for IA32 - the loop of 4 callee-save - // register is unrolled, every constant value is precomputed and + // Highly optimized version for IA32 - the loop of 4 callee-save + // registers is unrolled, every constant value is precomputed and // cached. // unsigned map_off, mask; diff --git a/vm/jitrino/src/optimizer/deadcodeeliminator.cpp b/vm/jitrino/src/optimizer/deadcodeeliminator.cpp index 818fb6b..740afbf 100644 --- a/vm/jitrino/src/optimizer/deadcodeeliminator.cpp +++ b/vm/jitrino/src/optimizer/deadcodeeliminator.cpp @@ -525,7 +525,9 @@ markLiveInst(Inst* inst, // add instruction's sources to the work list // assert(inst); - assert(inst->getNode()); + if (inst->getNode() == NULL) { + return; + } uint32 instId = inst->getId(); assert(usedInstWidth); assert((instId >= minInstId) && (instId < maxInstId)); @@ -551,7 +553,7 @@ markLiveInst(Inst* inst, uint8 opndWidth = usesBitsOfOpnd(dstWidth, inst, i); assert(def); - assert(def->getNode()); + //assert(def->getNode()); uint32 defId = def->getId(); if (!((minInstId <= defId) && (defId < maxInstId))) { diff --git a/vm/jitrino/src/shared/PlatformDependant.h b/vm/jitrino/src/shared/PlatformDependant.h index 6391df0..d0255a9 100644 --- a/vm/jitrino/src/shared/PlatformDependant.h +++ b/vm/jitrino/src/shared/PlatformDependant.h @@ -68,7 +68,7 @@ return _copysign(1, f) < 0; } //---- - + #define snprintf _snprintf #endif #endif // _PLATFORMDEPENDANT_H_ diff --git a/vm/jitrino/src/shared/mkernel.cpp b/vm/jitrino/src/shared/mkernel.cpp index bc94159..1bd367d 100644 --- a/vm/jitrino/src/shared/mkernel.cpp +++ b/vm/jitrino/src/shared/mkernel.cpp @@ -180,14 +180,25 @@ const unsigned Runtime::num_cpus = Runti unsigned Runtime::init_num_cpus(void) { -#ifdef PLATFORM_POSIX - int num = (int)sysconf(_SC_NPROCESSORS_ONLN); - return num == -1 ? 0 : 1; -#else +#ifdef _WIN32 SYSTEM_INFO sinfo; GetSystemInfo(&sinfo); return sinfo.dwNumberOfProcessors; -#endif +#else + int num = (int)sysconf(_SC_NPROCESSORS_ONLN); + return num == -1 ? 0 : 1; +#endif +} + +unsigned Runtime::get_current_thread_id(void) +{ +#ifdef _WIN32 + DWORD dwThreadId = GetCurrentThreadId(); + return dwThreadId; +#else + pthread_t thr_id = pthread_self(); + return thr_id; +#endif } }; // ~namespace Jitrino diff --git a/vm/jitrino/src/shared/mkernel.h b/vm/jitrino/src/shared/mkernel.h index 2c573c7..3101ecd 100644 --- a/vm/jitrino/src/shared/mkernel.h +++ b/vm/jitrino/src/shared/mkernel.h @@ -84,7 +84,7 @@ namespace Jitrino { * @see AutoUnlock */ class Mutex { -#ifdef PLATFORM_POSIX +#if !defined(_WIN32) // // *nix implementation // @@ -116,8 +116,8 @@ public: void unlock(void) { pthread_mutex_unlock(&m_handle); } private: pthread_mutex_t m_handle; - -#else // not PLATFORM_POSIX + +#else // ifdef _WIN32 // // Win* implementation @@ -130,7 +130,7 @@ public: private: CRITICAL_SECTION m_cs; -#endif // ~ifdef PLATFORM_POSIX +#endif // ~if !defined(_WIN32) /** * @brief Disallows copying. */ @@ -229,6 +229,10 @@ public: * @brief Tests whether we're running in multi-CPU box. */ static bool is_smp(void) { return cpus() > 1; } + /** + * @brief Returns OS id of current thread. + */ + static unsigned get_current_thread_id(void); private: /** * @brief Number of CPUs. diff --git a/vm/jitrino/src/vm/VMInterface.h b/vm/jitrino/src/vm/VMInterface.h index 4cc4305..fc0bbf6 100644 --- a/vm/jitrino/src/vm/VMInterface.h +++ b/vm/jitrino/src/vm/VMInterface.h @@ -531,9 +531,12 @@ public: virtual bool mayInlineObjectSynchronization(ObjectSynchronizationInfo & syncInfo) = 0; enum VmCallingConvention { - CallingConvention_Drl = 0, + CallingConvention_Unknown, + CallingConvention_Drl, + CallingConvention_DRLFast, CallingConvention_Stdcall, CallingConvention_Cdecl, + CallingConvention_Fastcall, Num_CallingConvention }; diff --git a/vm/jitrino/src/vm/drl/DrlJITInterface.cpp b/vm/jitrino/src/vm/drl/DrlJITInterface.cpp index e277e33..9ce96b7 100644 --- a/vm/jitrino/src/vm/drl/DrlJITInterface.cpp +++ b/vm/jitrino/src/vm/drl/DrlJITInterface.cpp @@ -327,26 +327,90 @@ JITEXPORT OpenMethodExecutionParams JIT_ return compilation_capabilities; } +/** + * Internal helper, used for runtime logging. + * + * All args accept NULL-s. + * + * @param what - string to print out + * @param method - method whose fully qualified name is printed + * @param context - a context whose IP and SP get printed + * @param doEndl - if \c true, then end-of-line printed after the string + * @param userData - if not \c NULL, then printed out as a pointer + */ +static void LogIt( + const char* what, + Method_Handle method, + const JitFrameContext* context, + bool doEndl, + const void* userData = NULL, + bool showThread = false) +{ + bool logEnabled = false; +#ifdef _DEBUG + logEnabled = Log::cat_rt()->isEnabled(); +#endif + if (!logEnabled) { + return; + } + + LogStream* rt = Log::cat_rt(); + + if (showThread) { + rt->printf("thr#%5u | ", Runtime::get_current_thread_id()); + } + + // what are we doing? + if (what != NULL) { + rt->printf("%s", what); + } + if (method != NULL) { + const char* kname = class_get_name(method_get_class(method)); + const char* mname = method_get_name(method); + rt->printf("(%s.%s) ", kname, mname); + } + + // what is the context? + if (context != NULL) { +#if defined(_IPF_) + // TODO: add print code for IPF +#elif defined(_EM64T_) + Log::cat_rt()->printf("IP = %p (@%p), SP = %p ", + *context->p_rip, context->p_rip, context->rsp); +#else + Log::cat_rt()->printf("IP = %p (@%p), SP = %p ", + *context->p_eip, context->p_eip, context->esp); +#endif + } + + if (userData != NULL) { + Log::cat_rt()->printf("userData=%p ", userData); + } + + if (doEndl) { + Log::cat_rt()->out() << std::endl; + } +} + extern "C" JITEXPORT void JIT_unwind_stack_frame(JIT_Handle jit, Method_Handle method, ::JitFrameContext *context) { -#ifdef _DEBUG - if(Log::cat_rt()->isEnabled()) - Log::cat_rt()->out() << "UNWIND_STACK_FRAME(" << - class_get_name(method_get_class(method)) << "." << - method_get_name(method) << ")" << ::std::endl; -#endif + LogIt("UNWINDIND_STACK_FRAME", method, context, false, NULL, true); #ifdef USE_FAST_PATH if (isJET(jit)) { Jet::rt_unwind(jit, method, context); - return; } + else #endif - DrlVMMethodDesc methodDesc(method, jit); - Jitrino::UnwindStack(&methodDesc, context, context->is_ip_past == FALSE); + { + DrlVMMethodDesc methodDesc(method, jit); + Jitrino::UnwindStack(&methodDesc, context, context->is_ip_past == FALSE); + } + + LogIt("=>", NULL, context, true); } extern "C" @@ -355,34 +419,29 @@ JIT_get_root_set_from_stack_frame(JIT_Ha GC_Enumeration_Handle enum_handle, ::JitFrameContext *context) { -#ifdef _DEBUG - if(Log::cat_rt()->isEnabled()) - Log::cat_rt()->out() << "GET_ROOT_SET_FROM_STACK_FRAME(" << - class_get_name(method_get_class(method)) << "." << - method_get_name(method) << ")" << ::std::endl; -#endif - + LogIt("GET_ROOT_SET_FROM_STACK_FRAME", method, context, false, NULL, true); #ifdef USE_FAST_PATH if (isJET(jit)) { Jet::rt_enum(jit, method, enum_handle, context); - return; } + else #endif - - DrlVMMethodDesc methodDesc(method, jit); - DrlVMGCInterface gcInterface(enum_handle); - Jitrino::GetGCRootSet(&methodDesc, &gcInterface, context, - context->is_ip_past == FALSE); + { + DrlVMMethodDesc methodDesc(method, jit); + DrlVMGCInterface gcInterface(enum_handle); + Jitrino::GetGCRootSet(&methodDesc, &gcInterface, context, + context->is_ip_past == FALSE); + } + LogIt("=>", NULL, context, true); } extern "C" JITEXPORT uint32 JIT_get_inline_depth(JIT_Handle jit, InlineInfoPtr ptr, uint32 offset) { - if (Log::cat_rt()->isEnabled()) { - Log::cat_rt()->out() << "GET_INLINE_DEPTH()" << ::std::endl; - } - return Jitrino::GetInlineDepth(ptr, offset); + uint32 depth = Jitrino::GetInlineDepth(ptr, offset); + LogIt("GET_INLINE_DEPTH", NULL, NULL, true, (void*)(POINTER_SIZE_INT)depth); + return depth; } extern "C" @@ -390,20 +449,18 @@ JITEXPORT Method_Handle JIT_get_inlined_method(JIT_Handle jit, InlineInfoPtr ptr, uint32 offset, uint32 inline_depth) { - if (Log::cat_rt()->isEnabled()) { - Log::cat_rt()->out() << "GET_INLINED_METHOD()" << ::std::endl; - } - return Jitrino::GetInlinedMethod(ptr, offset, inline_depth); + Method_Handle method = Jitrino::GetInlinedMethod(ptr, offset, inline_depth); + LogIt("GET_INLINE_METHOD", method, NULL, true, (void*)(POINTER_SIZE_INT)offset); + return method; } extern "C" JITEXPORT uint16 JIT_get_inlined_bc(JIT_Handle jit, InlineInfoPtr ptr, uint32 offset, uint32 inline_depth) { - if (Log::cat_rt()->isEnabled()) { - Log::cat_rt()->out() << "GET_INLINED_BC()" << ::std::endl; - } - return Jitrino::GetInlinedBc(ptr, offset, inline_depth); + uint16 bc = Jitrino::GetInlinedBc(ptr, offset, inline_depth); + LogIt("GET_INLINED_BC", NULL, NULL, true, (void*)(POINTER_SIZE_INT)bc); + return bc; } extern "C" @@ -437,23 +494,19 @@ JITEXPORT void JIT_fix_handler_context(JIT_Handle jit, Method_Handle method, ::JitFrameContext *context) { -#ifdef _DEBUG - if(Log::cat_rt()->isEnabled()) - Log::cat_rt()->out() << "FIX_HANDLER_CONTEXT(" << - class_get_name(method_get_class(method)) << "." - << method_get_name(method) << ")" << ::std::endl; -#endif - + LogIt("FIX_HANDLER_CONTEXT", method, context, false, NULL, true); #ifdef USE_FAST_PATH if (isJET(jit)) { Jet::rt_fix_handler_context(jit, method, context); - return; } + else #endif - - DrlVMMethodDesc methodDesc(method, jit); - Jitrino::FixHandlerContext(&methodDesc, context, - context->is_ip_past == FALSE); + { + DrlVMMethodDesc methodDesc(method, jit); + Jitrino::FixHandlerContext(&methodDesc, context, + context->is_ip_past == FALSE); + } + LogIt("=>", NULL, context, true); } extern "C" @@ -461,14 +514,22 @@ JITEXPORT void * JIT_get_address_of_this(JIT_Handle jit, Method_Handle method, const ::JitFrameContext *context) { + void* theAddress = NULL; + #ifdef USE_FAST_PATH if (isJET(jit)) { - return Jet::rt_get_address_of_this(jit, method, context); + theAddress = Jet::rt_get_address_of_this(jit, method, context); } + else #endif - DrlVMMethodDesc methodDesc(method, jit); - return Jitrino::GetAddressOfThis(&methodDesc, context, - context->is_ip_past == FALSE); + { + DrlVMMethodDesc methodDesc(method, jit); + theAddress = Jitrino::GetAddressOfThis(&methodDesc, context, + context->is_ip_past == FALSE); + } + LogIt("GET_ADDRESS_OF_THIS", method, context, true, + theAddress ? *(void**)theAddress : NULL); + return theAddress; } extern "C" diff --git a/vm/jitrino/src/vm/drl/DrlVMInterface.cpp b/vm/jitrino/src/vm/drl/DrlVMInterface.cpp index 97e8968..0363504 100644 --- a/vm/jitrino/src/vm/drl/DrlVMInterface.cpp +++ b/vm/jitrino/src/vm/drl/DrlVMInterface.cpp @@ -654,7 +654,27 @@ DrlVMCompilationInterface::setMethodHasS } } -CompilationInterface::VmCallingConvention +CompilationInterface::VmCallingConvention +DrlVMCompilationInterface::toVmCC(CallingConvention cc) +{ + switch(cc) { + case CC_Vm: return CallingConvention_Drl; + case CC_Stdcall: return CallingConvention_Stdcall; + case CC_Cdecl: return CallingConvention_Cdecl; + case CC_Fastcall: return CallingConvention_Fastcall; + case CC_DRLFast: return CallingConvention_DRLFast; + default: assert(false); + } + return CallingConvention_Unknown; +} + +CompilationInterface::VmCallingConvention +DrlVMCompilationInterface::getManagedCallingConvention(void) +{ + return toVmCC(vm_managed_calling_convention()); +} + +CompilationInterface::VmCallingConvention DrlVMCompilationInterface::getRuntimeHelperCallingConvention(RuntimeHelperId id) { switch(id) { case Helper_NewMultiArray: diff --git a/vm/jitrino/src/vm/drl/DrlVMInterface.h b/vm/jitrino/src/vm/drl/DrlVMInterface.h index b264c8c..166c37e 100644 --- a/vm/jitrino/src/vm/drl/DrlVMInterface.h +++ b/vm/jitrino/src/vm/drl/DrlVMInterface.h @@ -571,18 +571,14 @@ public: // bool mayInlineObjectSynchronization(ObjectSynchronizationInfo & syncInfo); - // - // Returns the calling convention for managed code. - // - VmCallingConvention getManagedCallingConvention() { - switch (vm_managed_calling_convention()) { - case CC_Vm: - return CallingConvention_Drl; - default: - assert(0); - return (VmCallingConvention) -1; - }; - } + /** + * Returns the calling convention for managed code. + */ + VmCallingConvention getManagedCallingConvention(void); + + /** + * Returns the calling convention for the specified helper. + */ VmCallingConvention getRuntimeHelperCallingConvention(RuntimeHelperId id); bool compileMethod(MethodDesc *method); @@ -623,6 +619,7 @@ public: } private: + static VmCallingConvention toVmCC(CallingConvention cc); Type* getTypeFromDrlVMTypeHandle(Type_Info_Handle); VM_RT_SUPPORT translateHelperId(RuntimeHelperId runtimeHelperId); JIT_Handle getJitHandle() const; -- 1.4.4