Index: trunk/vm/include/jit_runtime_support.h
===================================================================
--- trunk/vm/include/jit_runtime_support.h (revision 591610)
+++ trunk/vm/include/jit_runtime_support.h (working copy)
@@ -64,8 +64,8 @@
VM_RT_NEW_VECTOR_USING_VTABLE=101,
/**
* @param The parameters are the following:
+ * \arg Vector length
* \arg Allocation_Handle for the vector class
- * \arg Vector length
*
* @return Reference to the new object.
*
@@ -92,8 +92,8 @@
VM_RT_LDC_STRING=103,
/**
* @param The parameters are the following:
+ * \arg Class handle of the class owning the const pool
* \arg Const pool index pointing to a CONSTANT_Class.
- * \arg Class handle of the class owning the const pool
*
* @return \arg Reference to the String object.
* \arg Reference to the const string represented by an entry in the
@@ -304,9 +304,9 @@
VM_RT_AASTORE=402,
/**
* @param The parameters are the following:
+ * \arg Array
+ * \arg Index
* \arg Element
- * \arg Index
- * \arg Array
*
* @return None.
*
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32CallingConvention.h (working copy)
@@ -27,7 +27,31 @@
#include "Ia32IRConstants.h"
#include "Ia32Constraint.h"
+/**
+ * When entering a function, obey the (sp)%%4 == 0 rule.
+ */
+#define STACK_ALIGN4 (0x00000004)
+/**
+ * When entering a function, obey the (sp+8)%%16 == 0 rule
+ * (Required by Intel 64 calling convention).
+ */
+#define STACK_ALIGN_HALF16 (0x00000008)
+
+/**
+ * When entering a function, obey the (sp)%%16 == 0 rule.
+ */
+#define STACK_ALIGN16 (0x00000010)
+
+
+#ifdef _EM64T_
+ #define STACK_REG RegName_RSP
+ #define STACK_ALIGNMENT STACK_ALIGN_HALF16
+#else
+ #define STACK_REG RegName_ESP
+ #define STACK_ALIGNMENT STACK_ALIGN16
+#endif
+
namespace Jitrino
{
namespace Ia32{
@@ -85,7 +109,13 @@
/** True arguments are pushed from the last to the first, false in the other case
*/
virtual bool pushLastToFirst()const =0;
+
/**
+ * Defines alignment of all arguments passed on the memmory stack plus return pointer.
+ */
+ virtual uint32 getAlignment()const { return 0; }
+
+ /**
* Maps a string representation of CallingConvention to the
* appropriate CallingConvention_* item.
* If cc_name is NULL, then default for this platform convention
@@ -123,14 +153,26 @@
//========================================================================================
// class DRLCallingConvention
//========================================================================================
-/** Implementation of CallingConvention for the DRL IA32 calling convention
-*/
-class DRLCallingConvention: public STDCALLCallingConvention
+
+/**
+ * Implementation of CallingConvention for the DRL IA32 calling convention
+ */
+class DRLCallingConventionIA32: public STDCALLCallingConvention
{
public:
- virtual ~DRLCallingConvention() {}
+ virtual ~DRLCallingConventionIA32() {}
virtual bool pushLastToFirst()const{ return false; }
+ virtual uint32 getAlignment()const { return STACK_ALIGNMENT; }
+};
+/**
+ * Implementation of CallingConvention for the DRL EM64T calling convention
+ */
+class DRLCallingConventionEM64T: public STDCALLCallingConvention
+{
+public:
+ virtual ~DRLCallingConventionEM64T() {}
+ virtual uint32 getAlignment()const { return STACK_ALIGN16; }
};
//========================================================================================
@@ -149,10 +191,19 @@
#endif
};
+#ifdef _EM64T_
+typedef DRLCallingConventionEM64T DRLCallingConvention;
+#else
+typedef DRLCallingConventionIA32 DRLCallingConvention;
+#endif
+
extern STDCALLCallingConvention CallingConvention_STDCALL;
extern DRLCallingConvention CallingConvention_DRL;
extern CDECLCallingConvention CallingConvention_CDECL;
}; // namespace Ia32
+
+
+
}
#endif
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.cpp (working copy)
@@ -804,25 +804,39 @@
//_________________________________________________________________________________________________
void CallingConventionClient::finalizeInfos(Inst::OpndRole role, CallingConvention::ArgKind argKind)
{
- assert(callingConvention!=NULL);
+ uint32 slotNumber=0;
StlVector & infos = getInfos(role);
- callingConvention->getOpndInfo(argKind, (uint32)infos.size(), infos.empty()?(CallingConvention::OpndInfo*)NULL:&infos.front());
- bool lastToFirst=callingConvention->pushLastToFirst();
- uint32 slotNumber=0;
- for (
- uint32 i=lastToFirst?0:(uint32)infos.size()-1,
- end=lastToFirst?(uint32)infos.size():(uint32)-1,
- inc=lastToFirst?1:-1;
- i!=end;
- i+=inc
- ){
- CallingConvention::OpndInfo & info=infos[i];
- for (uint32 j=0; jgetOpndInfo(argKind, (uint32)infos.size(),
+ infos.empty() ? (CallingConvention::OpndInfo*)NULL : &infos.front());
+
+ for (uint32 i = 0, n = (uint32)infos.size(); i != n; i++) {
+ uint32 index = callingConvention->pushLastToFirst() ? i : (n - 1 - i);
+ CallingConvention::OpndInfo & info = infos[index];
+ for (uint32 j = 0; j < info.slotCount; j++) {
if (!info.isReg)
- info.slots[j]=0xFFFF & slotNumber++;
+ info.slots[j] = 0xFFFF & slotNumber++;
}
}
- (role==Inst::OpndRole_Def?defArgStackDepth:useArgStackDepth)=slotNumber*sizeof(POINTER_SIZE_INT);
+ unsigned stackOpndSize = slotNumber * sizeof(POINTER_SIZE_INT);
+ unsigned stackAlignmentSize = 0;
+
+ if (argKind == CallingConvention::ArgKind_InArg) {
+ // Compute stack alignment.
+ unsigned stackOnEnterSize = stackOpndSize + sizeof(POINTER_SIZE_INT);
+ unsigned alignment = callingConvention->getAlignment();
+
+ if (alignment != 0 && stackOnEnterSize & (alignment - 1)) {
+ stackAlignmentSize = alignment - (stackOnEnterSize & (alignment - 1));
+ }
+ }
+
+ stackOpndSize += stackAlignmentSize;
+
+ (role == Inst::OpndRole_Def ? defArgStackDepth : useArgStackDepth) = stackOpndSize;
+ (role == Inst::OpndRole_Def ? defArgStackDepthAlignment : useArgStackDepthAlignment) = stackAlignmentSize;
}
//_________________________________________________________________________________________________
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h (working copy)
@@ -1217,8 +1217,11 @@
{ assert(callingConvention!=NULL); return callingConvention; }
uint32 getArgStackDepth(Inst::OpndRole role)const
- { return (role & Inst::OpndRole_UseDef)==Inst::OpndRole_Def?defArgStackDepth:useArgStackDepth; }
+ { return ((role & Inst::OpndRole_UseDef) == Inst::OpndRole_Def) ? defArgStackDepth : useArgStackDepth; }
+ uint32 getArgStackDepthAlignment(Inst::OpndRole role) const
+ { return ((role & Inst::OpndRole_UseDef) == Inst::OpndRole_Def) ? useArgStackDepthAlignment : useArgStackDepthAlignment; }
+
void setOwnerInst(Inst * oi){ ownerInst = oi; }
protected:
StlVector & getInfos(Inst::OpndRole role)
@@ -1236,6 +1239,7 @@
StlVector useStackOpndInfos;
uint32 defArgStackDepth, useArgStackDepth;
+ uint32 defArgStackDepthAlignment, useArgStackDepthAlignment;
};
@@ -1285,6 +1289,9 @@
uint32 getArgStackDepth()const
{ return callingConventionClient.getArgStackDepth(Inst::OpndRole_Use); }
+ uint32 getArgStackDepthAlignment() const
+ { return callingConventionClient.getArgStackDepthAlignment(Inst::OpndRole_Use); }
+
CallingConventionClient& getCallingConventionClient(){ return callingConventionClient; }
const CallingConventionClient& getCallingConventionClient()const { return callingConventionClient; }
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (working copy)
@@ -2176,13 +2176,13 @@
if (codeSelector.methodCodeSelector.slowLdString || dstType->isSystemClass()) {
NamedType * parentType=enclosingMethod->getParentType();
#ifdef _EM64T_
- Opnd * st = irManager.getRegOpnd(RegName_RDI);
+ Opnd * tp = irManager.getRegOpnd(RegName_RDI);
+ appendInsts(irManager.newCopyPseudoInst(Mnemonic_MOV, tp,irManager.newImmOpnd(getRuntimeIdType(), Opnd::RuntimeInfo::Kind_TypeRuntimeId, parentType)));
+ Opnd * st = irManager.getRegOpnd(RegName_RSI);
appendInsts(irManager.newCopyPseudoInst(Mnemonic_MOV, st, irManager.newImmOpnd(typeManager.getInt64Type(), refToken)));
- Opnd * tp = irManager.getRegOpnd(RegName_RSI);
- appendInsts(irManager.newCopyPseudoInst(Mnemonic_MOV, tp,irManager.newImmOpnd(getRuntimeIdType(), Opnd::RuntimeInfo::Kind_TypeRuntimeId, parentType)));
Opnd * helperOpnds[] = {
- st,
- tp
+ tp,
+ st
};
#else
Opnd * helperOpnds[] = {
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (working copy)
@@ -1692,77 +1692,70 @@
#endif
const Nodes& nodes = fg->getNodes();
- for (Nodes::const_iterator it = nodes.begin(), end = nodes.end(); it!=end; ++it) {
+ for (Nodes::const_iterator it = nodes.begin(), end = nodes.end(); it != end; ++it) {
Node* node = *it;
if (node->isBlockNode()) {
- for (Inst * inst=(Inst*)node->getLastInst(), * prevInst=NULL; inst!=NULL; inst=prevInst) {
- prevInst=inst->getPrevInst();
+ for (Inst * inst = (Inst*)node->getLastInst(), * prevInst = NULL; inst != NULL; inst = prevInst) {
+ prevInst = inst->getPrevInst();
if (inst->getMnemonic() == Mnemonic_CALL) {
- const CallInst * callInst=(const CallInst*)inst;
+ const CallInst * callInst = (const CallInst*)inst;
+ const CallingConvention * cc =
+ callInst->getCallingConventionClient().getCallingConvention();
const StlVector& stackOpndInfos =
callInst->getCallingConventionClient().getStackOpndInfos(Inst::OpndRole_Use);
- Inst * instToPrepend=inst;
+
+ Inst * instToPrepend = inst;
Opnd * const * opnds = callInst->getOpnds();
-#ifdef _EM64T_
- unsigned sz = 0;
- for (uint32 i=0, n=(uint32)stackOpndInfos.size(); igetArgStackDepthAlignment() > 0) {
+ node->prependInst(newInst(Mnemonic_SUB, getRegOpnd(STACK_REG),
+ newImmOpnd(typeManager.getInt32Type(), callInst->getArgStackDepthAlignment())), inst);
}
- unsigned corr = 0;
- if(sz&15) {
- corr += 16-(sz&15);
+ // Put inputs on the stack.
+ for (uint32 i = 0, n = (uint32)stackOpndInfos.size(); i < n; i++) {
+ Opnd* opnd = opnds[stackOpndInfos[i].opndIndex];
+ Inst * pushInst = newCopyPseudoInst(Mnemonic_PUSH, opnd);
+ pushInst->insertBefore(instToPrepend);
+ instToPrepend = pushInst;
}
+
#ifdef _WIN64
+ // Assert that shadow doesn't break stack alignment computes earlier.
+ assert((shadowSize & (STACK_ALIGNMENT - 1)) == 0);
Opnd::RuntimeInfo * rt = callInst->getRuntimeInfo();
+ bool needShadow = false;
if (rt) {
- //stack size for parameters: "number of entries is equal to 4 or the maximum number ofparameters
- //See http://msdn2.microsoft.com/en-gb/library/ms794596.aspx for details
- //shadow - is an area on stack reserved to map parameters passed with registers
- bool needShadow = rt->getKind() == Opnd::RuntimeInfo::Kind_InternalHelperAddress;
+ // Stack size for parameters: "number of entries is equal to 4 or the maximum number of parameters"
+ // See http://msdn2.microsoft.com/en-gb/library/ms794596.aspx for details.
+ // Shadow - is an area on stack reserved to map parameters passed with registers.
+ needShadow = rt->getKind() == Opnd::RuntimeInfo::Kind_InternalHelperAddress;
if (!needShadow && rt->getKind() == Opnd::RuntimeInfo::Kind_HelperAddress) {
VM_RT_SUPPORT helperId = (VM_RT_SUPPORT)(POINTER_SIZE_INT)rt->getValue(0);
- //ABOUT: VM does not allocate shadow for most of the helpers
- //however some helpers are direct pointers to native funcs
- //TODO: create VM interface to get calling conventions for the helper
- //today this knowledge is hardcoded here
+ // ABOUT: VM does not allocate shadow for most of the helpers
+ // however some helpers are direct pointers to native functions.
+ // TODO: create VM interface to get calling conventions for the helper
+ // today this knowledge is hardcoded here
needShadow = helperId == VM_RT_GC_GET_TLS_BASE;
}
- if (needShadow) {
- uint32 shadowSize = 4 * sizeof(POINTER_SIZE_INT);
- corr += shadowSize;
- }
}
-#endif
- if (corr)
- {
- node->prependInst(newInst(Mnemonic_SUB, getRegOpnd(STACK_REG), newImmOpnd(typeManager.getInt32Type(), corr)), inst);
- node->appendInst(newInst(Mnemonic_ADD, getRegOpnd(STACK_REG), newImmOpnd(typeManager.getInt32Type(), corr)), inst);
+ if (needShadow) {
+ // Arrange shadow area on the stack.
+ shadowSize = 4 * sizeof(POINTER_SIZE_INT);
+ node->prependInst(newInst(Mnemonic_SUB, getRegOpnd(STACK_REG), newImmOpnd(typeManager.getInt32Type(), shadowSize)), inst);
}
- sz = 0;
+
#endif
- for (uint32 i=0, n=(uint32)stackOpndInfos.size(); igetCallingConventionClient().getCallingConvention()->pushLastToFirst()?i:n-1-i;
- Inst * pushInst=newCopyPseudoInst(Mnemonic_PUSH, opnds[stackOpndInfos[index].opndIndex]);
- sz+=sizeof(POINTER_SIZE_INT);//getByteSize(opnds[stackOpndInfos[index].opndIndex]->getSize());
-#else
- Inst * pushInst=newCopyPseudoInst(Mnemonic_PUSH, opnds[stackOpndInfos[i].opndIndex]);
-#endif
- pushInst->insertBefore(instToPrepend);
- instToPrepend=pushInst;
- }
-#ifdef _EM64T_
- if(sz && !((CallInst *)inst)->getCallingConventionClient().getCallingConvention()->calleeRestoresStack()) {
- Inst* newIns = newInst(Mnemonic_ADD, getRegOpnd(STACK_REG), newImmOpnd(typeManager.getInt32Type(), sz));
+ unsigned stackPopSize = cc->calleeRestoresStack() ? 0 : callInst->getArgStackDepth();
+ stackPopSize += shadowSize;
+ // Restore stack pointer.
+ if(stackPopSize != 0) {
+ Inst* newIns = newInst(Mnemonic_ADD, getRegOpnd(STACK_REG), newImmOpnd(typeManager.getInt32Type(), stackPopSize));
newIns->insertAfter(inst);
}
-#else
- if(!((CallInst *)inst)->getCallingConventionClient().getCallingConvention()->calleeRestoresStack()) {
- Inst* newIns = newInst(Mnemonic_ADD, getRegOpnd(RegName_ESP), newImmOpnd(typeManager.getInt32Type(), ((CallInst *)inst)->getArgStackDepth()));
- newIns->insertAfter(inst);
- }
-#endif
}
}
}
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (working copy)
@@ -37,6 +37,7 @@
#include "JITInstanceContext.h"
#include "PMFAction.h"
#include "Ia32CodeGeneratorFlags.h"
+#include "Ia32CallingConvention.h"
#include "LoopTree.h"
@@ -52,13 +53,13 @@
{
const char * newString(MemoryManager& mm, const char * str, uint32 length=EmptyUint32);
+
#ifdef _EM64T_
#define STACK_REG RegName_RSP
#else
#define STACK_REG RegName_ESP
#endif
-
//========================================================================================
// STL aux classes (need to be moved somewhere)
//========================================================================================
Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp (working copy)
@@ -72,7 +72,9 @@
* | |
* | |
* | |
- * +-------------------------------+ localBase <--- "real" ESP
+ * +-------------------------------+ localBase
+ * | alignment padding |
+ * |-------------------------------+ <--- "real" ESP
* | EAX |
* | ECX |
* | EDX |
@@ -132,9 +134,6 @@
int32 inargEnd;
int32 frameSize;
uint32 outArgSize;
-#ifdef _EM64T_
- bool stackCorrection;
-#endif
StackInfo * stackInfo;
MemoryManager memoryManager;
@@ -159,9 +158,6 @@
inargEnd(0),
frameSize(0),
outArgSize(0),
-#ifdef _EM64T_
- stackCorrection(0),
-#endif
memoryManager("StackLayouter")
{
};
@@ -273,135 +269,92 @@
void StackLayouter::createProlog()
{
- IRManager & irm=getIRManager();
+ const uint32 slotSize = sizeof(POINTER_SIZE_INT);
+ const uint32 stackSizeAlignment = (STACK_ALIGNMENT == STACK_ALIGN_HALF16) ? STACK_ALIGN16 : STACK_ALIGNMENT;
+ IRManager & irm = getIRManager();
+ EntryPointPseudoInst * entryPointInst = NULL;
+ int offset = 0;
+
+ entryPointInst = irManager->getEntryPointInst();
+ assert(entryPointInst->getNode() == irManager->getFlowGraph()->getEntryNode());
- for (uint32 i = 0; igetRefCount() && opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) {
+ if (opnd->getRefCount() && opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) {
Opnd * dispOpnd=opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement);
- if (dispOpnd==NULL){
- dispOpnd=irm.newImmOpnd(irm.getTypeManager().getInt32Type(), 0);
+ if (dispOpnd == NULL){
+ dispOpnd = irm.newImmOpnd(irm.getTypeManager().getInt32Type(), 0);
opnd->setMemOpndSubOpnd(MemOpndSubOpndKind_Displacement, dispOpnd);
}
dispOpnd->assignImmValue(0);
}
- }//end for
+ }
- int offset = 0;
-
- //return EIP area
+ // Return EIP area.
retEIPBase = offset;
offset += sizeof(POINTER_SIZE_INT);
-
retEIPEnd = inargBase = offset;
- uint32 slotSize=sizeof(POINTER_SIZE_INT);
- EntryPointPseudoInst * entryPointInst = irManager->getEntryPointInst();
- assert(entryPointInst->getNode()==irManager->getFlowGraph()->getEntryNode());
- if (entryPointInst) {//process entry-point instruction
+ // Assign displacements for input operands.
+ if (entryPointInst) {
const StlVector& stackOpndInfos =
((const EntryPointPseudoInst*)entryPointInst)->getCallingConventionClient().getStackOpndInfos(Inst::OpndRole_Def);
- for (uint32 i=0, n=(uint32)stackOpndInfos.size(); igetCallingConventionClient().getCallingConvention()->pushLastToFirst() ?
- ((n-1)*slotSize-stackOpndInfos[i].offset):
-#endif
- stackOpndInfos[i].offset;
+ /*
+ uint64 argOffset = entryPointInst->getCallingConventionClient().getCallingConvention()->pushLastToFirst()
+ ? ((n - 1) * slotSize - stackOpndInfos[i].offset) : stackOpndInfos[i].offset;
+ */
- Opnd * opnd=entryPointInst->getOpnd(stackOpndInfos[i].opndIndex);
+ uint64 argOffset = stackOpndInfos[i].offset;
+ Opnd * opnd = entryPointInst->getOpnd(stackOpndInfos[i].opndIndex);
Opnd * disp = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement);
- disp->assignImmValue(offset+argOffset);
+ disp->assignImmValue(offset + argOffset);
}
}
-
-
inargEnd = offset;
-
icalleeEnd = offset = 0;
uint32 calleeSavedRegs=irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask();
-
uint32 usageRegMask = irManager->getTotalRegUsage(OpndKind_GPReg);
-
Inst * lastPush = NULL;
-
+
+ // Push callee-save registers onto stack.
#ifdef _EM64T_
- //--------------------------------
- unsigned counter = 0;
- for (uint32 reg = RegName_R15; reg >= RegName_RAX ; reg--) {
- uint32 mask = getRegMask((RegName)reg);
- if((mask & calleeSavedRegs) && (usageRegMask & mask)) counter++;
- }
- //--------------------------------
- for (uint32 reg = RegName_R15; reg >= RegName_RAX ; reg--) {
+ for (uint32 reg = RegName_R15; reg >= RegName_RAX; reg--) {
#else
- for (uint32 reg = RegName_EDI; reg>=RegName_EAX; reg--) {//push callee-save registers onto stack
+ for (uint32 reg = RegName_EDI; reg >= RegName_EAX; reg--) {
#endif
uint32 mask = getRegMask((RegName)reg);
- if((mask & calleeSavedRegs) && (usageRegMask & mask)) {
+ if ((mask & calleeSavedRegs) && (usageRegMask & mask)) {
Inst * inst = irm.newInst(Mnemonic_PUSH, irm.getRegOpnd((RegName)reg));
- if (!lastPush)
+ if (!lastPush) {
lastPush = inst;
+ }
inst->insertAfter(entryPointInst);
offset -= slotSize;
}
}
-#ifdef _EM64T_
- if(!(counter & 1)) {
- Opnd * rsp = irManager->getRegOpnd(STACK_REG);
- Type* uint64_type = irManager->getTypeFromTag(Type::UInt64);
- Inst* newIns = irManager->newInst(Mnemonic_SUB,rsp,irManager->newImmOpnd(uint64_type, slotSize));
- newIns->insertAfter(entryPointInst);
- offset -= slotSize;
- stackCorrection = 1;
- }
-#endif
-
icalleeBase = fcalleeEnd = fcalleeBase = acalleeEnd = acalleeBase = localEnd = offset;
-
+ // Retrieve relations not earlier than all memory locations are assigned.
IRManager::AliasRelation * relations = new(irm.getMemoryManager()) IRManager::AliasRelation[irm.getOpndCount()];
- irm.getAliasRelations(relations);// retrieve relations no earlier than all memory locations are assigned
+ irm.getAliasRelations(relations);
-#ifdef _EM64T_
- //--------------------------------
- counter = 0;
- for (uint32 i = 0; igetRefCount() == 0)
continue;
if(opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) {
- Opnd * dispOpnd=opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement);
- if (dispOpnd->getImmValue()==0) {
- uint32 cb=getByteSize(opnd->getSize());
- cb=(cb+slotSize-1)&~(slotSize-1);
- counter+=cb;
- }
- }
- }
- if (counter & 15) {
- offset -= 16-(counter&15);
- }
- //--------------------------------
-#endif
-
- for (uint32 i = 0; igetRefCount() == 0)
- continue;
- if(opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) {
- Opnd * dispOpnd=opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement);
- if (dispOpnd->getImmValue()==0) {
+ Opnd * dispOpnd = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement);
+ if (dispOpnd->getImmValue() == 0) {
if (relations[opnd->getId()].outerOpnd == NULL) {
- uint32 cb=getByteSize(opnd->getSize());
- cb=(cb+slotSize-1)&~(slotSize-1);
+ uint32 cb = getByteSize(opnd->getSize());
+ cb=(cb + slotSize - 1) & ~(slotSize - 1);
offset -= cb;
dispOpnd->assignImmValue(offset);
}
@@ -409,32 +362,37 @@
}
}
+ // Align stack pointer. Local area should preserve alignment available on function enter.
+ offset = offset & ~(stackSizeAlignment - 1);
+
+ // Assert local area is properly aligned.
+ assert((offset & (STACK_ALIGNMENT - 1)) == 0);
+
localBase = offset;
if (localEnd>localBase) {
- Inst* newIns = irm.newInst(Mnemonic_SUB, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd-localBase));
+ Inst* newIns = irm.newInst(Mnemonic_SUB, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd - localBase));
newIns->insertAfter(lastPush ? lastPush : entryPointInst);
}
frameSize = icalleeEnd -localBase;
-
}
void StackLayouter::createEpilog()
{ // Predeccessors of en and irm.isEpilog(en->pred)
- IRManager & irm=getIRManager();
- uint32 calleeSavedRegs=irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask();
+ IRManager & irm = getIRManager();
+ uint32 calleeSavedRegs = irm.getCallingConvention()->getCalleeSavedRegs(OpndKind_GPReg).getMask();
const Edges& inEdges = irm.getFlowGraph()->getExitNode()->getInEdges();
uint32 usageRegMask = irManager->getTotalRegUsage(OpndKind_GPReg);
for (Edges::const_iterator ite = inEdges.begin(), ende = inEdges.end(); ite!=ende; ++ite) {
Edge* edge = *ite;
if (irm.isEpilog(edge->getSourceNode())) {
Node * epilog = edge->getSourceNode();
- Inst * retInst=(Inst*)epilog->getLastInst();
+ Inst * retInst = (Inst*)epilog->getLastInst();
assert(retInst->hasKind(Inst::Kind_RetInst));
- if (localEnd>localBase) {
- //restore stack pointer
- Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd-localBase));
+ if (localEnd > localBase) {
+ // Restore stack pointer.
+ Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), localEnd - localBase));
newIns->insertBefore(retInst);
}
#ifdef _EM64T_
@@ -448,12 +406,6 @@
newIns->insertBefore(retInst);
}
}
-#ifdef _EM64T_
- if (stackCorrection) {//restore stack pointer
- Inst* newIns = irm.newInst(Mnemonic_ADD, irm.getRegOpnd(STACK_REG), irm.newImmOpnd(irm.getTypeManager().getInt32Type(), sizeof(POINTER_SIZE_INT)));
- newIns->insertBefore(retInst);
- }
-#endif
}
}
}
Index: trunk/vm/jitrino/src/codegenerator/ipf/IpfInstCodeSelector.cpp
===================================================================
--- trunk/vm/jitrino/src/codegenerator/ipf/IpfInstCodeSelector.cpp (revision 591610)
+++ trunk/vm/jitrino/src/codegenerator/ipf/IpfInstCodeSelector.cpp (working copy)
@@ -1938,8 +1938,8 @@
IPF_LOG << " newArray of " << arrayType->getElementType()->getName() << endl;
Opnd *helperArgs[] = {
- opndManager->newImm((int64) arrayType->getAllocationHandle()),
- (Opnd *)numElems
+ (Opnd *)numElems,
+ opndManager->newImm((int64) arrayType->getAllocationHandle())
};
VM_RT_SUPPORT hId = VM_RT_NEW_VECTOR_USING_VTABLE;
Index: trunk/vm/jitrino/src/jet/cg.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg.cpp (working copy)
@@ -50,7 +50,7 @@
const CallSig ci_helper_o(CCONV_HELPERS, jobj);
const CallSig ci_helper_v(CCONV_HELPERS);
-const CallSig ci_helper_io(CCONV_HELPERS, i32, jobj);
+const CallSig ci_helper_oi(CCONV_HELPERS, jobj, i32);
const CallSig ci_helper_linkerr(CCONV_HELPERS, jobj, i32, i32);
void CodeGen::do_mov(const Val& dst_s, const Val& src_s, bool skipTypeCheck)
Index: trunk/vm/jitrino/src/jet/cg.h
===================================================================
--- trunk/vm/jitrino/src/jet/cg.h (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg.h (working copy)
@@ -57,7 +57,7 @@
/**
* CallSig for a VM helper that takes 2 args: (#i32, #jobj).
*/
-extern const CallSig ci_helper_io;
+extern const CallSig ci_helper_oi;
/**
* CallSig for VM helper THROW_LINKAGE_ERROR.
*/
Index: trunk/vm/jitrino/src/jet/cg_dbg.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_dbg.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_dbg.cpp (working copy)
@@ -83,6 +83,9 @@
void CodeGen::gen_dbg_check_stack(bool start)
{
+ if (m_infoBlock.get_bc_size() == 1 && m_bc[0] == OPCODE_RETURN) {
+ return; // empty method, nothing to do
+ }
if (start) {
// We store SP before a code to be checked ...
st(jobj, sp, m_base, voff(m_stack.dbg_scratch()));
Index: trunk/vm/jitrino/src/jet/cg_fld_arr.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_fld_arr.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_fld_arr.cpp (working copy)
@@ -133,13 +133,6 @@
gen_write_barrier(m_curr_inst->opcode, NULL, Opnd(0));
static const CallSig cs_aastore(CCONV_HELPERS, jobj, i32, jobj);
unsigned stackFix = gen_stack_to_args(true, cs_aastore, 0);
-#ifdef _EM64T_
- // Huh ? Do we really have another order of args here ?
- AR gr = valloc(jobj);
- mov(gr, cs_aastore.reg(0));
- mov(cs_aastore.reg(0), cs_aastore.reg(2));
- mov(cs_aastore.reg(2), gr);
-#endif
gen_call_vm(cs_aastore, rt_helper_aastore, 3);
if (stackFix != 0) {
alu(alu_sub, sp, stackFix);
@@ -245,7 +238,7 @@
Val& ref = vstack(ref_depth, true);
where = Opnd(jt, ref.reg(), fld_offset);
} else { //field is not resolved -> generate code to request offset
- static const CallSig cs_get_offset(CCONV_STDCALL, iplatf, i32, i32);
+ static const CallSig cs_get_offset(CCONV_HELPERS, iplatf, i32, i32);
gen_call_vm(cs_get_offset, rt_helper_field_get_offset_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut());
runlock(cs_get_offset);
rlock(gr_ret);
@@ -259,7 +252,7 @@
char * fld_addr = (char*)field_get_address(fieldOp.fld);
where = vaddr(jt, fld_addr);
} else { //field is not resolved -> generate code to request address
- static const CallSig cs_get_addr(CCONV_STDCALL, iplatf, i32, i32);
+ static const CallSig cs_get_addr(CCONV_HELPERS, iplatf, i32, i32);
gen_call_vm(cs_get_addr, rt_helper_field_get_address_withresolve, 0, fieldOp.enclClass, fieldOp.cpIndex, fieldOp.isPut());
runlock(cs_get_addr);
where = Opnd(jt, gr_ret, 0);
Index: trunk/vm/jitrino/src/jet/cg_instr.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_instr.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_instr.cpp (working copy)
@@ -81,12 +81,12 @@
// test [eax+rt_suspend_req_flag_offset], 0
// I don't believe this will gain any improvements for .jet, so using
// portable and 'official' way:
- gen_call_vm(cs_v, rt_helper_get_tls_base_ptr, 0);
+ gen_call_vm(platform_v, rt_helper_get_tls_base_ptr, 0);
// The address of flag is now in gr_ret
Opnd mem(i32, gr_ret, rt_suspend_req_flag_offset);
alu(alu_cmp, mem, Opnd(0));
unsigned br_off = br(z, 0, 0, taken);
- gen_call_vm_restore(false, cs_v, rt_helper_gc_safepoint, 0);
+ gen_call_vm_restore(false, helper_v, rt_helper_gc_safepoint, 0);
patch(br_off, ip());
}
Index: trunk/vm/jitrino/src/jet/cg_meth.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_meth.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_meth.cpp (working copy)
@@ -70,14 +70,21 @@
// Debugging things
//
- // Ensure stack is aligned properly - for _ALIGN16 only here.
- // _ALIGN_HALF16 is handled below.
- if (is_set(DBG_CHECK_STACK) && (m_ci.cc() & CCONV_STACK_ALIGN16)){
- alu(alu_test, sp, 0xF);
+ // Ensure stack is aligned properly.
+ unsigned alignment = (m_ci.cc() & CCONV_STACK_ALIGN_HALF16) ? CCONV_STACK_ALIGN16
+ : m_ci.cc() & CCONV_STACK_ALIGN_MASK;
+ if (is_set(DBG_CHECK_STACK) && alignment != 0) {
+ if (m_ci.cc() & CCONV_STACK_ALIGN_HALF16) {
+ alu(alu_sub, sp, (unsigned)STACK_SLOT_SIZE);
+ }
+ alu(alu_test, sp, (alignment - 1));
unsigned br_off = br(eq, 0, 0);
gen_dbg_rt(false, "Misaligned stack @ %s", meth_fname());
gen_brk();
patch(br_off, ip());
+ if (m_ci.cc() & CCONV_STACK_ALIGN_HALF16) {
+ alu(alu_add, sp, (unsigned)STACK_SLOT_SIZE);
+ }
}
if (is_set(DBG_BRK)) {
@@ -102,18 +109,7 @@
//
unsigned frameSize = m_stack.size();
alu(alu_sub, sp, frameSize);
-
- // Ensure stack is aligned properly - do it here for _ALIGN_HALF16,
- // as the stack must be (sp%16==0) at this point
- if (is_set(DBG_CHECK_STACK) && (m_ci.cc() & CCONV_STACK_ALIGN_HALF16)){
- assert((frameSize+8) % 16 == 0);
- alu(alu_test, sp, 0xF);
- unsigned br_off = br(eq, 0, 0);
- gen_dbg_rt(false, "Misaligned stack @ %s", meth_fname());
- gen_brk();
- patch(br_off, ip());
- }
-
+
// Lock all the args registers to avoid them to be rewritten by the
// frame setup procedures
rlock(m_ci);
@@ -533,6 +529,10 @@
else {
AR gr = gr0;
if (cs_mon.reg(0) != gr_x) {
+ if (cs_mon.size() != 0) {
+ assert(cs_mon.caller_pops());
+ alu(alu_sub, sp, cs_mon.size());
+ }
ld(jobj, cs_mon.reg(0), m_base, voff(m_stack.thiz()));
}
else {
@@ -585,6 +585,10 @@
}
AR gr = valloc(jobj);
if (cs_mon.reg(0) != gr_x) {
+ if (cs_mon.size() != 0) {
+ assert(cs_mon.caller_pops());
+ alu(alu_sub, sp, cs_mon.size());
+ }
vpark(cs_mon.reg(0));
ld(jobj, cs_mon.reg(0), m_base, voff(m_stack.thiz()));
}
@@ -603,7 +607,7 @@
if (compilation_params.exe_notify_method_exit) {
// JVMTI helper takes pointer to return value and method handle
- const CallSig cs_ti_mexit(CCONV_STDCALL, jobj, jobj);
+ const CallSig cs_ti_mexit(CCONV_HELPERS, jobj, jobj);
// The call is a bit unusual, and is processed as follows:
// we load an address of the top of the operand stack into
// a temporary register, and then pass this value as pointer
@@ -685,8 +689,11 @@
Opnd op = vstack(0, true).as_opnd();
st(jtmov(retType), op.reg(), m_base, voff(m_stack.scratch()));
ld(jobj, gtmp, m_base, voff(m_stack.scratch()));
- if (cs_trace_arg.reg(0) != gr_x) {
- assert(cs_trace_arg.size() == 0);
+ if (cs_trace_arg.reg(0) != gr_x) {
+ if (cs_trace_arg.size() != 0) {
+ assert(cs_trace_arg.caller_pops());
+ alu(alu_sub, sp, cs_trace_arg.size());
+ }
mov(cs_trace_arg.reg(0), gtmp);
}
else {
@@ -793,14 +800,14 @@
assert(m_lazy_resolution);
//1. get method address
if (opcod == OPCODE_INVOKESTATIC || opcod == OPCODE_INVOKESPECIAL) {
- static const CallSig cs_get_is_addr(CCONV_STDCALL, iplatf, i32);
+ static const CallSig cs_get_is_addr(CCONV_HELPERS, iplatf, i32);
char* helper = opcod == OPCODE_INVOKESTATIC ? rt_helper_get_invokestatic_addr_withresolve :
rt_helper_get_invokespecial_addr_withresolve;
gen_call_vm(cs_get_is_addr, helper, 0, m_klass, cpIndex);
runlock(cs_get_is_addr);
} else {
assert(opcod == OPCODE_INVOKEVIRTUAL || opcod == OPCODE_INVOKEINTERFACE);
- static const CallSig cs_get_iv_addr(CCONV_STDCALL, iplatf, i32, jobj);
+ static const CallSig cs_get_iv_addr(CCONV_HELPERS, iplatf, i32, jobj);
char * helper = opcod == OPCODE_INVOKEVIRTUAL ? rt_helper_get_invokevirtual_addr_withresolve :
rt_helper_get_invokeinterface_addr_withresolve;
// setup constant parameters first,
@@ -825,7 +832,7 @@
else if (opcod == OPCODE_INVOKEINTERFACE) {
// if it's INVOKEINTERFACE, then first resolve it
Class_Handle klass = method_get_class(meth);
- const CallSig cs_vtbl(CCONV_STDCALL, jobj, jobj);
+ const CallSig cs_vtbl(CCONV_HELPERS, jobj, jobj);
// Prepare args for ldInterface helper
if (cs_vtbl.reg(0) == gr_x) {
assert(cs_vtbl.size() != 0);
@@ -833,7 +840,10 @@
st(jobj, thiz.reg(), sp, cs_vtbl.off(0));
}
else {
- assert(cs_vtbl.size() == 0);
+ if (cs_vtbl.size() != 0) {
+ assert(cs_vtbl.caller_pops());
+ alu(alu_sub, sp, cs_vtbl.size());
+ }
mov(cs_vtbl.get(0), thiz.as_opnd());
}
gen_call_vm(cs_vtbl, rt_helper_get_vtable, 1, klass);
@@ -975,7 +985,9 @@
Opnd tmp(jt, gtmp);
mov(tmp, Opnd(jt, gr_ret));
if (cs_trace_arg.reg(0) != gr_x) {
- assert(cs_trace_arg.size() == 0);
+ if (cs_trace_arg.size() != 0) {
+ alu(alu_sub, sp, cs_trace_arg.size());
+ }
mov(cs_trace_arg.reg(0), gtmp);
}
else {
Index: trunk/vm/jitrino/src/jet/cg_obj.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_obj.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_obj.cpp (working copy)
@@ -55,7 +55,7 @@
return;
}
assert(lazy);
- static const CallSig cs_newarray_withresolve(CCONV_STDCALL, iplatf, i32, i32);
+ static const CallSig cs_newarray_withresolve(CCONV_HELPERS, iplatf, i32, i32);
Val sizeVal = vstack(0);
// setup constant parameters first,
Val vclass(iplatf, enclClass);
@@ -82,22 +82,11 @@
gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0,
m_klass, jinst.op0, jinst.opcode);
}
-#ifdef _EM64T_
static const CallSig cs_new_arr(CCONV_HELPERS, i32, jobj);
unsigned stackFix = gen_stack_to_args(true, cs_new_arr, 0, 1);
gen_call_vm(cs_new_arr, rt_helper_new_array, 1, ah);
runlock(cs_new_arr);
-#else
- static const CallSig cs_new_arr(CCONV_HELPERS, jobj, i32);
- rlock(cs_new_arr);
- AR artmp = valloc(jobj);
- rlock(artmp);
- Encoder::gen_args(cs_new_arr, artmp, 0, 1, ah);
- unsigned stackFix = gen_stack_to_args(true, cs_new_arr, 1, 1);
- runlock(artmp);
- gen_call_vm(cs_new_arr, rt_helper_new_array, cs_new_arr.count());
- runlock(cs_new_arr);
-#endif
+
if (stackFix != 0) {
alu(alu_sub, sp, stackFix);
}
@@ -129,7 +118,7 @@
bool resolve = !lazy || class_is_cp_entry_resolved(m_compileHandle, enclClass, cpIndex);
if(!resolve) {
assert(lazy);
- static CallSig ci_get_class_withresolve(CCONV_STDCALL, iplatf, i32);
+ static CallSig ci_get_class_withresolve(CCONV_HELPERS, iplatf, i32);
gen_call_vm(ci_get_class_withresolve, rt_helper_get_class_withresolve, 0, enclClass, cpIndex);
runlock(ci_get_class_withresolve);
klassVal = Val(jobj, gr_ret);
@@ -179,12 +168,12 @@
}
unsigned size = class_get_boxed_data_size(klass);
Allocation_Handle ah = class_get_allocation_handle(klass);
- static CallSig ci_new(CCONV_STDCALL, i32, jobj);
+ static CallSig ci_new(CCONV_HELPERS, i32, jobj);
gen_call_vm(ci_new, rt_helper_new, 0, size, ah);
}
} else {
assert(lazy);
- static CallSig ci_new_with_resolve(CCONV_STDCALL, iplatf, i32);
+ static CallSig ci_new_with_resolve(CCONV_HELPERS, iplatf, i32);
gen_call_vm(ci_new_with_resolve, rt_helper_new_withresolve, 0, enclClass, cpIndex);
}
gen_save_ret(jobj);
@@ -204,7 +193,7 @@
assert(!lazy);
gen_call_throw(ci_helper_linkerr, rt_helper_throw_linking_exc, 0, enclClass, cpIdx, opcode);
}
- static const CallSig cs(CCONV_STDCALL, jobj, jobj);
+ static const CallSig cs(CCONV_HELPERS, jobj, jobj);
unsigned stackFix = gen_stack_to_args(true, cs, 0, 1);
char * helper = opcode == OPCODE_CHECKCAST ? rt_helper_checkcast : rt_helper_instanceof;
gen_call_vm(cs, helper, 1, klass);
@@ -215,7 +204,7 @@
gen_save_ret(opcode == OPCODE_CHECKCAST ? jobj : i32);
} else {
assert(lazy);
- static const CallSig cs_with_resolve(CCONV_STDCALL, iplatf, i32, jobj);
+ static const CallSig cs_with_resolve(CCONV_HELPERS, iplatf, i32, jobj);
char * helper = opcode == OPCODE_CHECKCAST ? rt_helper_checkcast_withresolve : rt_helper_instanceof_withresolve;
Val tos = vstack(0);
// setup constant parameters first,
@@ -233,7 +222,7 @@
{
const JInst& jinst = *m_curr_inst;
gen_check_null(0);
- static const CallSig cs_mon(CCONV_MANAGED, jobj);
+ static const CallSig cs_mon(CCONV_HELPERS, jobj);
unsigned stackFix = gen_stack_to_args(true, cs_mon, 0);
gen_call_vm(cs_mon,
jinst.opcode == OPCODE_MONITORENTER ?
@@ -246,7 +235,7 @@
void CodeGen::gen_athrow(void)
{
- static const CallSig cs_throw(CCONV_MANAGED, jobj);
+ static const CallSig cs_throw(CCONV_HELPERS, jobj);
unsigned stackFix = gen_stack_to_args(true, cs_throw, 0);
gen_call_vm(cs_throw, rt_helper_throw, 1);
runlock(cs_throw);
Index: trunk/vm/jitrino/src/jet/cg_stk.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/cg_stk.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/cg_stk.cpp (working copy)
@@ -67,8 +67,7 @@
return;
}
assert(m_curr_inst->opcode != OPCODE_LDC2_W);
- gen_call_vm(ci_helper_io, rt_helper_ldc_string, 0, m_curr_inst->op0,
- m_klass);
+ gen_call_vm(ci_helper_oi, rt_helper_ldc_string, 0, m_klass, m_curr_inst->op0);
gen_save_ret(jobj);
vstack(0).set(VA_NZ);
m_bbstate->seen_gcpt = true;
Index: trunk/vm/jitrino/src/jet/csig.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/csig.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/csig.cpp (working copy)
@@ -29,15 +29,11 @@
namespace Jitrino {
namespace Jet {
-const CallSig cs_v(CCONV_STDCALL);
+const CallSig helper_v(CCONV_HELPERS);
+const CallSig platform_v(CCONV_HELPERS);
void CallSig::init(void)
{
- // can't have stack alignment in calling convention when callee pops
- // so if any of ALIGN set, then CALLER_POPS must also be set.
- assert( !(m_cc&(CCONV_STACK_ALIGN16|CCONV_STACK_ALIGN_HALF16)) ||
- (m_cc&CCONV_CALLER_POPS));
-
unsigned num = (unsigned)m_args.size();
m_data.resize(num);
unsigned fps = 0, gps = 0;
@@ -90,11 +86,18 @@
}
}
m_stack = -off;
- // Do alignment
- if (m_stack != 0 &&
- (m_cc & (CCONV_STACK_ALIGN16|CCONV_STACK_ALIGN_HALF16))) {
- m_stack = (m_stack+15) & 0xFFFFFFF0;
+ m_alignment = 0;
+
+ unsigned stack_on_enter_size = m_stack + sizeof(POINTER_SIZE_INT);
+ // Do alignment.
+ unsigned alignment = (m_cc & CCONV_STACK_ALIGN_HALF16) ? CCONV_STACK_ALIGN16
+ : m_cc & CCONV_STACK_ALIGN_MASK;
+ if (alignment != 0 && stack_on_enter_size & (alignment - 1)) {
+ unsigned stack_on_enter_aligned =
+ (stack_on_enter_size + (alignment - 1)) & ~((alignment - 1));
+ m_alignment = stack_on_enter_aligned - stack_on_enter_size;
}
+ m_stack += m_alignment;
}
Index: trunk/vm/jitrino/src/jet/csig.h
===================================================================
--- trunk/vm/jitrino/src/jet/csig.h (revision 591610)
+++ trunk/vm/jitrino/src/jet/csig.h (working copy)
@@ -53,9 +53,14 @@
/**
* @brief All args go though memory.
*/
-#define CCONV_MEM (0x00000004)
+#define CCONV_MEM (0x00000020)
/**
+ * @brief When entering a function, obey the (sp)%%4 == 0 rule.
+ */
+#define CCONV_STACK_ALIGN4 (0x00000004)
+
+/**
* @brief When entering a function, obey the (sp+8)%%16 == 0 rule (Intel 64
* convention).
*/
@@ -67,6 +72,12 @@
#define CCONV_STACK_ALIGN16 (0x00000010)
/**
+ * Mask to extract stack alignment form calling convention.
+ */
+#define CCONV_STACK_ALIGN_MASK (CCONV_STACK_ALIGN4 | CCONV_STACK_ALIGN_HALF16 | CCONV_STACK_ALIGN16)
+
+
+/**
* @brief IA-32's stdcall convention.
*/
#define CCONV_STDCALL_IA32 (CCONV_MEM)
@@ -74,7 +85,7 @@
/**
* @brief IA-32's cdecl convention.
*/
-#define CCONV_CDECL_IA32 (CCONV_CALLER_POPS|CCONV_MEM)
+#define CCONV_CDECL_IA32 (CCONV_CALLER_POPS | CCONV_MEM)
#ifdef _EM64T_
/**
@@ -89,6 +100,7 @@
* @brief On IA-32 it's CCONV_CDECL_IA32, on EM64T it's CCONV_EM64T.
*/
#define CCONV_CDECL CCONV_EM64T
+ #define CCONV_PLATFORM CCONV_EM64T
#ifdef _WIN32
/// A nubmer of FR registers dedicated to pass float-point arguments.
#define MAX_FR_ARGS (4)
@@ -98,13 +110,14 @@
#else
#define CCONV_STDCALL CCONV_STDCALL_IA32
#define CCONV_CDECL CCONV_CDECL_IA32
+ #define CCONV_PLATFORM CCONV_CDECL
#define MAX_FR_ARGS (0)
#endif
/**
* @brief IA-32's DRLVM's convention for managed code.
*/
-#define CCONV_MANAGED_IA32 (CCONV_L2R | CCONV_MEM)
+#define CCONV_MANAGED_IA32 (CCONV_L2R | CCONV_MEM | CCONV_STACK_ALIGN16)
/**
* @brief A special case - VM's helper MULTIANEWARRAY always has cdecl-like
* convention.
@@ -116,15 +129,12 @@
* @brief On IA-32 it's CCONV_MANAGED_IA32, on EM64T it's CCONV_EM64T.
*/
#define CCONV_MANAGED CCONV_EM64T
- /**
- * @brief On IA-32 it's CCONV_MANAGED_IA32, on EM64T it's CCONV_EM64T.
- */
- #define CCONV_HELPERS CCONV_EM64T
#else
#define CCONV_MANAGED CCONV_MANAGED_IA32
- #define CCONV_HELPERS CCONV_MANAGED_IA32 //CCONV_STDCALL
#endif
+#define CCONV_HELPERS CCONV_STDCALL
+
///@} // ~JET_CCONV
@@ -257,6 +267,13 @@
}
/**
+ * @brief Returns size (in bytes) of padding area to achieve proper alignment.
+ */
+ unsigned alignment() const {
+ return m_alignment;
+ }
+
+ /**
* @brief Returns size (in bytes) of the stack size needed to pass args
* that go through the stack.
*/
@@ -366,7 +383,12 @@
* This implies presumption that a valid AR is alwys > 0.
*/
::std::vector m_data;
+
/**
+ * @brief Returns size (in bytes) of padding area to achieve proper alignment.
+ */
+ unsigned m_alignment;
+ /**
* @brief Size (in bytes) of stack frame needed to pass arguments.
*
* ... with all the alignment properties taken into account.
@@ -382,7 +404,8 @@
/**
* @brief CallSig for stdcall function that takes no args.
*/
-extern const CallSig cs_v;
+extern const CallSig helper_v;
+extern const CallSig platform_v;
}}; // ~namespace Jitrino::Jet
Index: trunk/vm/jitrino/src/jet/enc.cpp
===================================================================
--- trunk/vm/jitrino/src/jet/enc.cpp (revision 591610)
+++ trunk/vm/jitrino/src/jet/enc.cpp (working copy)
@@ -517,19 +517,24 @@
if (is_trace_on()) {
trace("call", to_str(target), "");
}
- if (check_stack &&
- (ci.cc() & (CCONV_STACK_ALIGN_HALF16|CCONV_STACK_ALIGN16))) {
- if (ci.cc() & CCONV_STACK_ALIGN16) {
- alu(alu_sub, sp, 8);
+
+ unsigned alignment = (ci.cc() & CCONV_STACK_ALIGN_HALF16) ? CCONV_STACK_ALIGN16
+ : ci.cc() & CCONV_STACK_ALIGN_MASK;
+ if (check_stack && alignment != 0) {
+ alu(alu_sub, sp, (unsigned)STACK_SLOT_SIZE);
+ if (ci.cc() & CCONV_STACK_ALIGN_HALF16) {
+ alu(alu_sub, sp, (unsigned)STACK_SLOT_SIZE);
}
- alu(alu_test, sp, 0x0F);
+ alu(alu_test, sp, (alignment - 1));
unsigned br_off = br(z, 0, 0, taken);
trap();
- if (ci.cc() & CCONV_STACK_ALIGN16) {
- alu(alu_add, sp, 8);
+ patch(br_off, ip());
+ if (ci.cc() & CCONV_STACK_ALIGN_HALF16) {
+ alu(alu_add, sp, (unsigned)STACK_SLOT_SIZE);
}
- patch(br_off, ip());
+ alu(alu_add, sp, (unsigned)STACK_SLOT_SIZE);
}
+
call_impl(target);
if (ci.caller_pops() && ci.size() != 0) {
alu(alu_add, sp, ci.size());
Index: trunk/vm/jitrino/src/jet/sframe.h
===================================================================
--- trunk/vm/jitrino/src/jet/sframe.h (revision 591610)
+++ trunk/vm/jitrino/src/jet/sframe.h (working copy)
@@ -247,15 +247,10 @@
unsigned size(void) const
{
unsigned sz = -unused();
- if (CCONV_MANAGED & CCONV_STACK_ALIGN16) {
- return ((sz + 15) & ~0xF);
- }
- else if (CCONV_MANAGED & CCONV_STACK_ALIGN_HALF16) {
- return ((sz + 15) & ~0xF) + 8;
- }
- else {
- return ((sz + 3) & ~0x03);
- }
+ unsigned alignment = (CCONV_MANAGED & CCONV_STACK_ALIGN_HALF16) ? CCONV_STACK_ALIGN16
+ : CCONV_MANAGED & CCONV_STACK_ALIGN_MASK;
+ alignment = (alignment == 0) ? CCONV_STACK_ALIGN4 : alignment;
+ return ((sz + (alignment - 1)) & ~(alignment - 1));
}
//
Index: trunk/vm/port/include/lil.h
===================================================================
--- trunk/vm/port/include/lil.h (revision 591610)
+++ trunk/vm/port/include/lil.h (working copy)
@@ -108,10 +108,6 @@
rth = calling convention used to call runtime helpers
stdcall = Windows's stdcall calling convention
-20021204: The rth calling convention should be used for all runtime helpers. Unfortunately, on IA32 we are inconsistent in the calling
- conventions used to call runtime helpers - some use stdcall, some use the managed code conventions. So for now, rth is to be
- used for stdcall helpers, managed should be used for the rest. In the future we can harmonise this and correct the problem.
-
l is a label (sequence of alpha, digit, _, starting with alpha or _)
v is a variable, one of: i0, i1, ... (input variables), sp0, sp1, ... (standard places), l0, l1, ... (local variables),
o0, o1, ... (output variables), r (return variable)
Index: trunk/vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp
===================================================================
--- trunk/vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp (revision 591610)
+++ trunk/vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp (working copy)
@@ -34,6 +34,7 @@
#include "m2n_ia32_internal.h"
#include "vm_threads.h"
#include "encoder.h"
+#include "jit_runtime_support_common.h"
// Strategy:
// Up to 2 standard places
@@ -1399,11 +1400,19 @@
{
adjust_stack_for_return();
unsigned sz = sig_size_on_stack(ctxt.entry_sig);
- if (ctxt.entry_cc.callee_pop && sz) {
- *buf = ::ret(*buf, Imm_Opnd(sz));
- } else {
- *buf = ::ret(*buf);
+
+ if (ctxt.entry_cc.callee_pop) {
+ if (lil_sig_get_cc(ctxt.entry_sig) == LCC_Managed) {
+ sz += sizeof(POINTER_SIZE_INT);
+ sz = (sz + (MANAGED_STACK_ALIGNMENT - 1)) & ~(MANAGED_STACK_ALIGNMENT - 1);
+ sz -= sizeof(POINTER_SIZE_INT);
+ }
+ if (sz != 0) {
+ *buf = ::ret(*buf, Imm_Opnd(sz));
+ return;
+ }
}
+ *buf = ::ret(*buf);
}
void push_m2n(Method_Handle method, frame_type current_frame_type, bool handles)
Index: trunk/vm/vmcore/include/jit_runtime_support_common.h
===================================================================
--- trunk/vm/vmcore/include/jit_runtime_support_common.h (revision 591610)
+++ trunk/vm/vmcore/include/jit_runtime_support_common.h (working copy)
@@ -30,12 +30,34 @@
#include "platform_lowlevel.h"
#include "heap.h"
+/**
+ * When entering managed function, obey the (sp)%%4 == 0 rule.
+ */
+#define STACK_ALIGN4 (0x00000008)
+
+/**
+ * When entering managed function, obey the (sp)%%8 == 0 rule.
+ */
+#define STACK_ALIGN_HALF16 (0x00000008)
+
+/**
+ * When entering managed function, obey the (sp)%%16 == 0 rule.
+ */
+#define STACK_ALIGN16 (0x00000010)
+
+
+#ifdef _EM64T_
+ #define MANAGED_STACK_ALIGNMENT STACK_ALIGN_HALF16
+#else
+ #define MANAGED_STACK_ALIGNMENT STACK_ALIGN16
+#endif
+
VMEXPORT // temporary solution for interpreter unplug
int __stdcall vm_instanceof(ManagedObject *obj, Class *c);
// Implements VM_RT_AASTORE
void * __stdcall
-vm_rt_aastore(ManagedObject *elem, int idx, Vector_Handle array);
+vm_rt_aastore(Vector_Handle array, int idx, ManagedObject *elem);
// Implements VM_RT_AASTORE_TEST
int __stdcall
Index: trunk/vm/vmcore/src/class_support/Prepare.cpp
===================================================================
--- trunk/vm/vmcore/src/class_support/Prepare.cpp (revision 591610)
+++ trunk/vm/vmcore/src/class_support/Prepare.cpp (working copy)
@@ -1027,7 +1027,7 @@
NativeCodePtr addr = NULL;
void (*p_throw_ame)(Class_Handle, Method_Handle) =
prepare_throw_abstract_method_error;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth::void;"
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall::void;"
"push_m2n 0, 0;"
"m2n_save_all;"
"out platform:pint,pint:void;"
@@ -1066,7 +1066,7 @@
NativeCodePtr addr = NULL;
void (*p_throw_iae)(Class_Handle, Method_Handle) =
prepare_throw_illegal_access_error;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth::void;"
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall::void;"
"push_m2n 0, 0;"
"m2n_save_all;"
"out platform:pint,pint:void;"
Index: trunk/vm/vmcore/src/exception/exceptions_jit.cpp
===================================================================
--- trunk/vm/vmcore/src/exception/exceptions_jit.cpp (revision 591610)
+++ trunk/vm/vmcore/src/exception/exceptions_jit.cpp (working copy)
@@ -690,7 +690,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed:ref:void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall:ref:void;"
"push_m2n 0, 0;"
"m2n_save_all;" "out platform:ref,pint,pint,pint:void;");
assert(cs);
@@ -799,14 +799,14 @@
const unsigned cap_and_size = (unsigned)((0<<16) | 16);
#ifdef _IPF_
- LilCodeStub *cs = lil_parse_code_stub("entry 1:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 1:stdcall::void;"
"push_m2n 0, 0, handles;"
"m2n_save_all;"
"out platform:ref,pint,pint,pint:void;"
"o0=0:ref;" "o1=sp0;" "o2=0;" "o3=0;" "call.noret %0i;",
exn_athrow);
#else
- LilCodeStub *cs = lil_parse_code_stub("entry 1:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 1:stdcall::void;"
"push_m2n 0, 0, handles;"
"m2n_save_all;"
"locals 1;"
@@ -842,7 +842,7 @@
Class *exn_clss =
VM_Global_State::loader_env->java_lang_NullPointerException_Class;
LilCodeStub *cs =
- lil_parse_code_stub("entry 0:managed::void;" "std_places 1;"
+ lil_parse_code_stub("entry 0:stdcall::void;" "std_places 1;"
"sp0=%0i;" "tailcall %1i;",
exn_clss,
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -883,7 +883,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_get_illegal_monitor_state_exception_type(),
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline())
@@ -910,7 +910,7 @@
Global_Env *env = VM_Global_State::loader_env;
Class *exn_clss = env->java_lang_ArrayIndexOutOfBoundsException_Class;
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_clss,
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -950,7 +950,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_get_negative_array_size_exception_type(),
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -990,7 +990,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_get_illegal_state_exception_type(),
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -1013,7 +1013,7 @@
}
Global_Env *env = VM_Global_State::loader_env;
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
env->java_lang_ArrayStoreException_Class,
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -1037,7 +1037,7 @@
}
Global_Env *env = VM_Global_State::loader_env;
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
env->java_lang_ArithmeticException_Class,
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -1067,7 +1067,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_get_class_cast_exception_type(),
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
@@ -1106,7 +1106,7 @@
return addr;
}
- LilCodeStub *cs = lil_parse_code_stub("entry 0:managed::void;"
+ LilCodeStub *cs = lil_parse_code_stub("entry 0:stdcall::void;"
"std_places 1;" "sp0=%0i;" "tailcall %1i;",
exn_get_incompatible_class_change_exception_type(),
lil_npc_to_fp(exn_get_rth_throw_lazy_trampoline()));
Index: trunk/vm/vmcore/src/jit/compile.cpp
===================================================================
--- trunk/vm/vmcore/src/jit/compile.cpp (revision 591610)
+++ trunk/vm/vmcore/src/jit/compile.cpp (working copy)
@@ -295,7 +295,7 @@
if (is_synchronised) {
if (is_static) {
cs = lil_parse_onto_end(cs,
- "out managed:pint:void;"
+ "out stdcall:pint:void;"
"o0=%0i;"
"call %1i;",
clss,
@@ -303,7 +303,7 @@
assert(cs);
} else {
cs = lil_parse_onto_end(cs,
- "out managed:ref:void;"
+ "out stdcall:ref:void;"
"o0=i0;"
"call %0i;",
lil_npc_to_fp(vm_get_rt_support_addr(VM_RT_MONITOR_ENTER)));
@@ -461,7 +461,7 @@
if (is_synchronised) {
if (is_static) {
cs = lil_parse_onto_end(cs,
- "out managed:pint:void;"
+ "out stdcall:pint:void;"
"o0=%0i;"
"call %1i;",
clss,
@@ -469,7 +469,7 @@
} else {
cs = lil_parse_onto_end(cs,
"ld l0,[l0+%0i:ref];"
- "out managed:ref:void; o0=l0; call %1i;",
+ "out stdcall:ref:void; o0=l0; call %1i;",
oh_get_handle_offset(0),
lil_npc_to_fp(vm_get_rt_support_addr(VM_RT_MONITOR_EXIT)));
}
Index: trunk/vm/vmcore/src/jit/jit_runtime_support.cpp
===================================================================
--- trunk/vm/vmcore/src/jit/jit_runtime_support.cpp (revision 591610)
+++ trunk/vm/vmcore/src/jit/jit_runtime_support.cpp (working copy)
@@ -126,7 +126,7 @@
static NativeCodePtr addr = NULL;
if (!addr) {
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed::ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall::ref;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -195,7 +195,7 @@
if (!addr) {
ManagedObject* (*p_instantiate_ref)(Class*,unsigned) = rth_ldc_ref_helper;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g4,pint:ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint,g4:ref;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -203,9 +203,7 @@
}
cs = lil_parse_onto_end(cs,
"push_m2n 0, %0i;"
- "out platform:pint,g4:ref;"
- "o0=i1;"
- "o1=i0;"
+ "in2out platform:ref;"
"call %1i;"
"pop_m2n;"
"ret;",
@@ -406,7 +404,7 @@
static NativeCodePtr addr = NULL;
if (!addr) {
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:ref,pint:ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:ref,pint:ref;");
#ifdef VM_STATS
if (dyn_count) {
@@ -435,7 +433,7 @@
static NativeCodePtr addr = NULL;
if (!addr) {
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:ref,pint:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:ref,pint:g4;");
#ifdef VM_STATS
assert(dyn_count);
cs = lil_parse_onto_end(cs, "inc [%0i:pint]; in2out platform:void; call %1i;", dyn_count, rth_update_instanceof_stats);
@@ -458,7 +456,7 @@
// Store a reference into an array at a given index and return NULL,
// or return the Class* for the exception to throw.
-static Class* rth_aastore(ManagedObject* elem, int idx, Vector_Handle array)
+static Class* rth_aastore(Vector_Handle array, int idx, ManagedObject* elem)
{
#ifdef VM_STATS
VM_Statistics::get_vm_stats().num_aastore ++;
@@ -507,9 +505,9 @@
static NativeCodePtr addr = NULL;
if (!addr) {
- Class* (*p_aastore)(ManagedObject*, int, Vector_Handle) = rth_aastore;
- // The args are the element ref to store, the index, and the array to store into\n"
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:ref,pint,ref:void;");
+ Class* (*p_aastore)(Vector_Handle, int, ManagedObject*) = rth_aastore;
+ // The args are the array to store into, the index, and the element ref to store\n"
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:ref,pint,ref:void;");
#ifdef VM_STATS
assert(dyn_count);
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -583,7 +581,7 @@
if (!addr) {
bool (*p_aastore_test)(ManagedObject*, Vector_Handle) = rth_aastore_test;
// The args are the element ref to store and the array to store into\n
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:ref,ref:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:ref,ref:void;");
assert(cs);
#ifdef VM_STATS
assert(dyn_count);
@@ -620,7 +618,7 @@
if (!addr) {
void (*p_throw_linking_error)(Class_Handle ch, unsigned index, unsigned opcode) =
vm_rt_class_throw_linking_error;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:pint,g4,g4:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint,g4,g4:void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -661,7 +659,7 @@
if (!addr) {
void* (*p_get_ivtable)(ManagedObject*, Class*) = rth_get_interface_vtable;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:ref,pint:pint;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:ref,pint:pint;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -734,7 +732,7 @@
POINTER_SIZE_INT (*p_is_inited)(Class*) = is_class_initialized;
void (*p_init)(Class*) = class_initialize;
void (*p_rethrow)() = exn_rethrow;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:pint:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint:void;");
assert(cs);
#ifdef VM_STATS
assert(dyn_count);
@@ -801,7 +799,7 @@
if (!addr) {
int32 (*p_f2i)(float) = f2i;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:f4:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f4:g4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -854,7 +852,7 @@
if (!addr) {
int64 (*p_f2l)(float) = f2l;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:f4:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f4:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -899,7 +897,7 @@
if (!addr) {
int32 (*p_d2i)(double) = d2i;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:f8:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f8:g4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -952,7 +950,7 @@
if (!addr) {
int64 (*p_d2l)(double) = d2l;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:f8:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f8:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -985,7 +983,7 @@
if (!addr) {
int64 (*p_lshl)(int64, int32) = lshl;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g8,g4:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g4:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1018,7 +1016,7 @@
if (!addr) {
int64 (*p_lshr)(int64, int32) = lshr;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g8,g4:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g4:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1051,7 +1049,7 @@
if (!addr) {
uint64 (*p_lushr)(uint64, uint32) = lushr;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g8,g4:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g4:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1084,7 +1082,7 @@
if (!addr) {
int64 (*p_lmul)(int64, int64) = lmul;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,g8:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g8:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1118,7 +1116,7 @@
if (!addr) {
int64 (*p_lrem)(int64, int64) = lrem;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,g8:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g8:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1155,7 +1153,7 @@
if (!addr) {
int64 (*p_ldiv)(int64, int64) = ldiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,g8:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g8:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1192,7 +1190,7 @@
if (!addr) {
uint64 (*p_ludiv)(uint64, uint64) = ludiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,g8:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,g8:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1225,7 +1223,7 @@
// This constant must be kept in sync with MAGIC in ir.cpp
POINTER_SIZE_INT divisor_offset = 40;
int64 (*p_ldiv)(int64, int64) = ldiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,pint:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,pint:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1257,7 +1255,7 @@
// This constant must be kept in sync with MAGIC in ir.cpp
POINTER_SIZE_INT divisor_offset = 40;
int64 (*p_lrem)(int64, int64) = lrem;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g8,pint:g8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g8,pint:g8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1292,7 +1290,7 @@
if (!addr) {
int32 (*p_imul)(int32, int32) = imul;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g4,g4:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g4,g4:g4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1326,7 +1324,7 @@
if (!addr) {
int32 (*p_irem)(int32, int32) = irem;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g4,g4:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g4,g4:g4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1363,7 +1361,7 @@
if (!addr) {
int32 (*p_idiv)(int32, int32) = idiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:g4,g4:g4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g4,g4:g4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1430,7 +1428,7 @@
if (!addr) {
float (*p_frem)(float, float) = frem;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:rth:f4,f4:f4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f4,f4:f4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1463,7 +1461,7 @@
if (!addr) {
float (*p_fdiv)(float, float) = fdiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:f4,f4:f4;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f4,f4:f4;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1496,7 +1494,7 @@
if (!addr) {
double (*p_drem)(double, double) = my_drem;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:f8,f8:f8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f8,f8:f8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1529,7 +1527,7 @@
if (!addr) {
double (*p_ddiv)(double, double) = ddiv;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:f8,f8:f8;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:f8,f8:f8;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1567,7 +1565,7 @@
if (wrappers.lookup(stub, &_junk, &wrapper)) return wrapper;
LilCodeStub* cs = lil_parse_code_stub(
- "entry 0:managed:arbitrary;"
+ "entry 0:stdcall:arbitrary;"
"inc [%0i:g4];"
"tailcall %1i;",
dyncount, lil_npc_to_fp(stub));
@@ -1591,7 +1589,7 @@
return addr;
}
void (*hythread_safe_point_ptr)() = jvmti_safe_point;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed::void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall::void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1624,7 +1622,7 @@
return addr;
}
void (*jvmti_method_enter_callback_ptr)(Method_Handle) = jvmti_method_enter_callback;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:pint:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint:void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1653,7 +1651,7 @@
return addr;
}
void (*jvmti_method_exit_callback_ptr)(Method_Handle, jvalue *) = jvmti_method_exit_callback;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:pint,pint:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint,pint:void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1661,14 +1659,7 @@
}
cs = lil_parse_onto_end(cs,
"push_m2n 0, %0i;"
- "out platform:pint,pint:void;"
-#ifdef _EM64T_
- "o0=i0;"
- "o1=i1;"
-#else
- "o0=i1;"
- "o1=i0;"
-#endif
+ "in2out platform:void;"
"call %1i;"
"pop_m2n;"
"ret;",
@@ -1691,7 +1682,7 @@
void (*jvmti_field_access_callback_ptr)(Field_Handle, Method_Handle,
jlocation, ManagedObject*) = jvmti_field_access_callback;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:pint,pint,g8,pint:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,g8,pint:void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1718,7 +1709,7 @@
// return addr;
// }
//LilCodeStub* cs = lil_parse_code_stub(
- // "entry 0:managed:pint,pint,g8,pint:void;"
+ // "entry 0:stdcall:pint,pint,g8,pint:void;"
// "push_m2n 0, 0;"
// "in2out platform:void;"
// "call %0i;"
@@ -1738,7 +1729,7 @@
}
void (*jvmti_field_modification_callback_ptr)(Field_Handle, Method_Handle,
jlocation, ManagedObject*, jvalue*) = jvmti_field_modification_callback;
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:pint,pint,g8,pint,pint:void;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,g8,pint,pint:void;");
assert(cs);
if (dyn_count) {
cs = lil_parse_onto_end(cs, "inc [%0i:pint];", dyn_count);
@@ -1765,7 +1756,7 @@
// return addr;
//}
//LilCodeStub* cs = lil_parse_code_stub(
- // "entry 0:managed:pint,pint,g8,pint,pint:void;"
+ // "entry 0:stdcall:pint,pint,g8,pint,pint:void;"
// "push_m2n 0, 0;"
// "in2out platform:void;"
// "call %0i;"
@@ -1797,11 +1788,11 @@
LilCodeStub* cs = NULL;
const char* in2out = NULL;
if (type == ResolveResType_Unmanaged) {
- cs = lil_parse_code_stub("entry 0:rth:pint,pint:pint;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint:pint;");
in2out = "in2out platform:pint;";
} else {
assert(type == ResolveResType_Managed);
- cs = lil_parse_code_stub("entry 0:rth:pint,pint:ref;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint:ref;");
in2out = "in2out platform:ref;";
}
assert(cs);
@@ -1836,11 +1827,11 @@
LilCodeStub* cs = NULL;
const char* in2out = NULL;
if (type == ResolveResType_Unmanaged) {
- cs = lil_parse_code_stub("entry 0:rth:pint,pint,ref:pint;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,ref:pint;");
in2out = "in2out platform:pint;";
} else {
assert(type == ResolveResType_Managed);
- cs = lil_parse_code_stub("entry 0:rth:pint,pint,ref:ref;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,ref:ref;");
in2out = "in2out platform:ref;";
}
@@ -1875,11 +1866,11 @@
LilCodeStub* cs = NULL;
const char* in2out = NULL;
if (type == ResolveResType_Unmanaged) {
- cs = lil_parse_code_stub("entry 0:rth:pint,pint,pint:pint;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,pint:pint;");
in2out = "in2out platform:pint;";
} else {
assert(type == ResolveResType_Managed);
- cs = lil_parse_code_stub("entry 0:rth:pint,pint,pint:ref;");
+ cs = lil_parse_code_stub("entry 0:stdcall:pint,pint,pint:ref;");
in2out = "in2out platform:ref;";
}
@@ -2909,7 +2900,7 @@
if (is_checkcast) {
// args: ManagedObject *obj, Class *super; returns a ManagedObject*
cs = lil_parse_code_stub
- ("entry 0:rth:ref,pint:ref;"
+ ("entry 0:stdcall:ref,pint:ref;"
"jc i0!=%0i:ref,nonnull;"
"r=i0;" // return obj if obj==NULL
"ret;",
@@ -2918,7 +2909,7 @@
else {
// args: ManagedObject *obj, Class *super; returns a boolean
cs = lil_parse_code_stub
- ("entry 0:rth:ref,pint:g4;"
+ ("entry 0:stdcall:ref,pint:g4;"
"jc i0!=%0i:ref,nonnull;"
"r=0:g4;" // return FALSE if obj==NULL
"ret;",
@@ -2969,7 +2960,7 @@
if (is_checkcast) {
// args: ManagedObject *obj, Class *super; returns a ManagedObject*
cs = lil_parse_code_stub
- ("entry 0:rth:ref,pint:ref;"
+ ("entry 0:stdcall:ref,pint:ref;"
"jc i0!=%0i,nonnull;"
"r=i0;" // return obj if obj==NULL
"ret;",
@@ -2978,7 +2969,7 @@
else {
// args: ManagedObject *obj, Class *super; returns a boolean
cs = lil_parse_code_stub
- ("entry 0:rth:ref,pint:g4;"
+ ("entry 0:stdcall:ref,pint:g4;"
"jc i0!=%0i,nonnull;"
"r=0:g4;" // return FALSE if obj==NULL
"ret;",
@@ -3235,7 +3226,7 @@
// 20030505 This JIT support routine expects to be called directly from managed code.
// The return value is either NULL or the ClassHandle for an exception to throw.
void * __stdcall
-vm_rt_aastore(ManagedObject *elem, int idx, Vector_Handle array)
+vm_rt_aastore(Vector_Handle array, int idx, ManagedObject *elem)
{
#ifdef VM_STATS
VM_Statistics::get_vm_stats().num_aastore ++;
Index: trunk/vm/vmcore/src/jit/rt_helper_info.cpp
===================================================================
--- trunk/vm/vmcore/src/jit/rt_helper_info.cpp (revision 591610)
+++ trunk/vm/vmcore/src/jit/rt_helper_info.cpp (working copy)
@@ -57,7 +57,7 @@
{VM_RT_ARRAY_STORE_EXCEPTION, "VM_RT_ARRAY_STORE_EXCEPTION",
INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_STDCALL, 0},
{VM_RT_THROW_LINKING_EXCEPTION, "VM_RT_THROW_LINKING_EXCEPTION",
- INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_DRL, 0},
+ INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_STDCALL, 0},
{VM_RT_THROW_SET_STACK_TRACE, "VM_RT_THROW_SET_STACK_TRACE",
INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_STDCALL, 1},
@@ -137,11 +137,11 @@
{VM_RT_D2L, "VM_RT_D2L",
INTERRUPTIBLE_NEVER, CALLING_CONVENTION_STDCALL, 1},
{VM_RT_LSHL, "VM_RT_LSHL",
- INTERRUPTIBLE_NEVER, CALLING_CONVENTION_DRL, 2},
+ INTERRUPTIBLE_NEVER, CALLING_CONVENTION_STDCALL, 2},
{VM_RT_LSHR, "VM_RT_LSHR",
- INTERRUPTIBLE_NEVER, CALLING_CONVENTION_DRL, 2},
+ INTERRUPTIBLE_NEVER, CALLING_CONVENTION_STDCALL, 2},
{VM_RT_LUSHR, "VM_RT_LUSHR",
- INTERRUPTIBLE_NEVER, CALLING_CONVENTION_DRL, 2},
+ INTERRUPTIBLE_NEVER, CALLING_CONVENTION_STDCALL, 2},
{VM_RT_LMUL, "VM_RT_LMUL",
INTERRUPTIBLE_NEVER, CALLING_CONVENTION_STDCALL, 2},
#ifdef VM_LONG_OPT
Index: trunk/vm/vmcore/src/util/em64t/base/ini_em64t.cpp
===================================================================
--- trunk/vm/vmcore/src/util/em64t/base/ini_em64t.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/em64t/base/ini_em64t.cpp (working copy)
@@ -39,6 +39,7 @@
#include "encoder.h"
#include "ini.h"
#include "lil_code_generator_utils.h"
+#include "jit_runtime_support_common.h"
#define LOG_DOMAIN "vm.helpers"
#include "cxxlog.h"
@@ -112,6 +113,8 @@
stub = push(stub, rbp_opnd);
stub = mov(stub, rbp_opnd, rsp_opnd);
+ assert(MANAGED_STACK_ALIGNMENT == STACK_ALIGN_HALF16);
+
// align stack pointer if required (rsp % 16 == 0)
stub = alu(stub, and_opc, rsp_opnd, Imm_Opnd(0xfffffff0));
Index: trunk/vm/vmcore/src/util/em64t/base/jit_lock_rt_support_em64t.cpp
===================================================================
--- trunk/vm/vmcore/src/util/em64t/base/jit_lock_rt_support_em64t.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/em64t/base/jit_lock_rt_support_em64t.cpp (working copy)
@@ -348,7 +348,7 @@
return addr;
}
- LilCodeStub * cs = lil_parse_code_stub("entry 0:managed:pint:void;");
+ LilCodeStub * cs = lil_parse_code_stub("entry 0:stdcall:pint:void;");
#ifdef VM_STATS
// int * value = VM_Statistics::get_vm_stats().rt_function_calls.lookup_or_add((void*)VM_RT_MONITOR_ENTER_STATIC, 0, NULL);
// cs = lil_parse_onto_end(cs, "inc [%0i:pint];", value);
@@ -383,7 +383,7 @@
return addr;
}
- LilCodeStub * cs = lil_parse_code_stub("entry 0:managed:ref:void;");
+ LilCodeStub * cs = lil_parse_code_stub("entry 0:stdcall:ref:void;");
#ifdef VM_STATS
// int * value = VM_Statistics::get_vm_stats().rt_function_calls.lookup_or_add((void*)VM_RT_MONITOR_ENTER, 0, NULL);
@@ -406,7 +406,7 @@
// throw NullPointerException
cs = lil_parse_onto_end(cs,
":throw_null_pointer;"
- "out managed::void;"
+ "out stdcall::void;"
"call.noret %0i;",
lil_npc_to_fp(exn_get_rth_throw_null_pointer())
);
@@ -429,7 +429,7 @@
}
LilCodeStub * cs = lil_parse_code_stub(
- "entry 0:managed:ref:void;"
+ "entry 0:stdcall:ref:void;"
"locals 1;"
"l0 = i0;"
);
@@ -476,7 +476,7 @@
"jc r!=%1i, illegal_monitor;"
"ret;"
":illegal_monitor;"
- "out managed::void;"
+ "out stdcall::void;"
"call.noret %2i;",
vm_monitor_try_exit,
(POINTER_SIZE_INT)TM_ERROR_NONE,
@@ -491,7 +491,7 @@
return addr;
}
- LilCodeStub * cs = lil_parse_code_stub("entry 0:managed:pint:void;");
+ LilCodeStub * cs = lil_parse_code_stub("entry 0:stdcall:pint:void;");
#ifdef VM_STATS
// int * value = VM_Statistics::get_vm_stats().rt_function_calls.lookup_or_add((void*)VM_RT_MONITOR_EXIT_STATIC, 0, NULL);
// cs = lil_parse_onto_end(cs, "inc [%0i:pint];", value);
@@ -527,7 +527,7 @@
return addr;
}
- LilCodeStub * cs = lil_parse_code_stub("entry 0:managed:ref:void;");
+ LilCodeStub * cs = lil_parse_code_stub("entry 0:stdcall:ref:void;");
#ifdef VM_STATS
// int * value = VM_Statistics::get_vm_stats().rt_function_calls.lookup_or_add((void*)VM_RT_MONITOR_EXIT, 0, NULL);
@@ -550,7 +550,7 @@
// throw NullPointerException
cs = lil_parse_onto_end(cs,
":throw_null_pointer;"
- "out managed::void;"
+ "out stdcall::void;"
"call.noret %0i;",
lil_npc_to_fp(exn_get_rth_throw_null_pointer())
);
@@ -573,7 +573,7 @@
}
LilCodeStub * cs = lil_parse_code_stub(
- "entry 0:managed:ref:void;"
+ "entry 0:stdcall:ref:void;"
"in2out platform:g4;"
);
assert(cs);
Index: trunk/vm/vmcore/src/util/em64t/base/jit_runtime_support_em64t.cpp
===================================================================
--- trunk/vm/vmcore/src/util/em64t/base/jit_runtime_support_em64t.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/em64t/base/jit_runtime_support_em64t.cpp (working copy)
@@ -83,7 +83,7 @@
return addr;
}
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g4,pint:ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g4,pint:ref;");
assert(cs);
#ifdef VM_STATS
@@ -114,7 +114,7 @@
return addr;
}
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:g4,pint:ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall:g4,pint:ref;");
assert(cs);
#ifdef VM_STATS
Index: trunk/vm/vmcore/src/util/ia32/base/ini_iA32.cpp
===================================================================
--- trunk/vm/vmcore/src/util/ia32/base/ini_iA32.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/ia32/base/ini_iA32.cpp (working copy)
@@ -19,8 +19,6 @@
* @version $Revision: 1.1.2.1.4.3 $
*/
-
-
//MVM
#include
@@ -33,6 +31,7 @@
#include "Class.h"
#include "exceptions.h"
#include "vm_threads.h"
+#include "jit_runtime_support_common.h"
#include "compile.h"
#include "nogc.h"
@@ -41,6 +40,7 @@
#include "environment.h"
#include "lil.h"
#include "lil_code_generator.h"
+#include "lil_code_generator_utils.h"
#include "interpreter.h"
@@ -49,63 +49,108 @@
#define LOG_DOMAIN "invoke"
#include "cxxlog.h"
-#ifdef _WIN32
-static int64 __declspec(naked) __stdcall
-vm_invoke_native_array_stub(uint32 *args,
- int sz,
- void* f)
-{
- __asm {
- push ebp
- mov ebp, esp
- push ebx // FIXME: check jit calling conventions,
- push esi // is it necessary to save the registers here
- push edi
+#include "dump.h"
- mov eax, [ebp+8]
- mov ecx, [ebp+12]
- lea eax, [eax+ecx*4-4]
- sub eax, esp
- or ecx, ecx
- je e
- l:
- push [esp+eax]
- loop l
- e:
- mov eax, [ebp+16]
- call eax
- lea esp, [ebp-12]
- pop edi
- pop esi
- pop ebx
- leave
- ret
+typedef double (*DoubleFuncPtr)(uint32* args, int args_size, void* func);
+typedef ManagedObject* (*RefFuncPtr)(uint32* args, int args_size, void* func);
+typedef float (*FloatFuncPtr)(uint32* args, int args_size, void* func);
+typedef int32 (*IntFuncPtr)(uint32* args, int args_size, void* func);
+
+static IntFuncPtr gen_invoke_managed_func() {
+ static IntFuncPtr func = NULL;
+
+ if (func) {
+ return func;
}
-}
-#else /* Linux */
-extern "C" {
- int64 vm_invoke_native_array_stub(uint32 *args,
- int sz,
- void *func);
-}
+ // Defines stack alignment on managed function enter.
+ const int32 STACK_ALIGNMENT = MANAGED_STACK_ALIGNMENT;
+ const int32 STACK_ALIGNMENT_MASK = ~(STACK_ALIGNMENT - 1);
+ const char * LOOP_BEGIN = "loop_begin";
+ const char * LOOP_END = "loop_end";
+
+ // [ebp + 8] - args
+ // [ebp + 12] - size
+ // [ebp + 16] - func
+ const int32 STACK_ARGS_OFFSET = 8;
+ const int32 STACK_NARGS_OFFSET = 12;
+ const int32 STACK_FUNC_OFFSET = 16;
+ const int32 STACK_CALLEE_SAVED_OFFSET = -12;
+
+ const int STUB_SIZE = 124;
+ char * stub = (char *) malloc_fixed_code_for_jit(STUB_SIZE,
+ DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_DEFAULT, CAA_Allocate);
+#ifdef _DEBUG
+ memset(stub, 0xcc /*int 3*/, STUB_SIZE);
#endif
+
+ tl::MemoryPool pool;
+ LilCguLabelAddresses labels(&pool, stub);
+
+ func = (IntFuncPtr) stub;
-typedef double (*DoubleFuncPtr)(uint32*,int,void*);
-typedef ManagedObject* (*RefFuncPtr)(uint32*,int,void*);
-typedef float (*FloatFuncPtr)(uint32*,int,void*);
-typedef int32 (*IntFuncPtr)(uint32*,int,void*);
+ // Initialize ebp-based stack frame.
+ stub = push(stub, ebp_opnd);
+ stub = mov(stub, ebp_opnd, esp_opnd);
+
+ // Preserve callee-saved registers.
+ stub = push(stub, ebx_opnd);
+ stub = push(stub, esi_opnd);
+ stub = push(stub, edi_opnd);
-DoubleFuncPtr vm_invoke_native_array_stub_double =
- (DoubleFuncPtr) vm_invoke_native_array_stub;
-RefFuncPtr vm_invoke_native_array_stub_ref =
- (RefFuncPtr) vm_invoke_native_array_stub;
-IntFuncPtr vm_invoke_native_array_stub_int =
- (IntFuncPtr) vm_invoke_native_array_stub;
-FloatFuncPtr vm_invoke_native_array_stub_float =
- (FloatFuncPtr) vm_invoke_native_array_stub;
+ // Load an array of arguments ('args') and its size from the stack.
+ stub = mov(stub, eax_opnd, M_Base_Opnd(ebp_reg, STACK_ARGS_OFFSET));
+ stub = mov(stub, ecx_opnd, M_Base_Opnd(ebp_reg, STACK_NARGS_OFFSET));
+
+ // Align memory stack.
+ stub = lea(stub, ebx_opnd, M_Index_Opnd(n_reg, ecx_reg, 4, 4));
+ stub = mov(stub, esi_opnd, ebx_opnd);
+ stub = neg(stub, esi_opnd);
+ stub = alu(stub, add_opc, esi_opnd, esp_opnd);
+ stub = alu(stub, and_opc, esi_opnd, Imm_Opnd(size_32, STACK_ALIGNMENT_MASK));
+ stub = alu(stub, add_opc, ebx_opnd, esi_opnd);
+ stub = mov(stub, esp_opnd, ebx_opnd);
+
+ // Load a pointer to the last argument of 'args' array.
+ stub = lea(stub, eax_opnd, M_Index_Opnd(eax_reg, ecx_reg, -4, 4));
+ stub = alu(stub, sub_opc, eax_opnd, esp_opnd);
+ stub = alu(stub, or_opc, ecx_opnd, ecx_opnd);
+ stub = branch8(stub, Condition_Z, Imm_Opnd(size_8, 0));
+ labels.add_patch_to_label(LOOP_END, stub - 1, LPT_Rel8);
+
+// LOOP_BEGIN:
+ // Push inputs on the stack.
+ labels.define_label(LOOP_BEGIN, stub, false);
+
+ stub = push(stub, M_Index_Opnd(esp_reg, eax_reg, 0, 1));
+ stub = loop(stub, Imm_Opnd(size_8, 0));
+ labels.add_patch_to_label(LOOP_BEGIN, stub - 1, LPT_Rel8);
+// LOOP_END:
+ labels.define_label(LOOP_END, stub, false);
+
+ // Call target function.
+ stub = mov(stub, eax_opnd, M_Base_Opnd(ebp_reg, STACK_FUNC_OFFSET));
+ stub = call(stub, eax_opnd);
+
+ // Restore callee-saved registers from the stack.
+ stub = lea(stub, esp_opnd, M_Base_Opnd(ebp_reg, STACK_CALLEE_SAVED_OFFSET));
+ stub = pop(stub, edi_opnd);
+ stub = pop(stub, esi_opnd);
+ stub = pop(stub, ebx_opnd);
+
+ // Leave current frame.
+ stub = pop(stub, ebp_opnd);
+ stub = ret(stub);
+
+ assert(stub - (char *)func <= STUB_SIZE);
+
+ DUMP_STUB(func, "invoke_managed_func", stub - (char *)func);
+
+ return func;
+}
+
void
JIT_execute_method_default(JIT_Handle jit, jmethodID methodID, jvalue *return_value, jvalue *args) {
@@ -117,6 +162,8 @@
// fprintf(stderr, "Not implemented\n");
+ static const IntFuncPtr invoke_managed_func = gen_invoke_managed_func();
+
Method *method = (Method*) methodID;
TRACE("enter method "
<< method->get_class()->get_name()->bytes << " "
@@ -196,14 +243,14 @@
switch(ret_type) {
case JAVA_TYPE_VOID:
- vm_invoke_native_array_stub(arg_words, argId, meth_addr);
+ invoke_managed_func(arg_words, argId, meth_addr);
break;
case JAVA_TYPE_CLASS:
case JAVA_TYPE_ARRAY:
case JAVA_TYPE_STRING:
{
- ManagedObject *ref = vm_invoke_native_array_stub_ref(arg_words, argId, meth_addr);
+ ManagedObject *ref = ((RefFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
ObjectHandle h = oh_allocate_local_handle();
if (ref != NULL) {
@@ -220,19 +267,19 @@
case JAVA_TYPE_CHAR:
case JAVA_TYPE_SHORT:
case JAVA_TYPE_INT:
- resultPtr->i = vm_invoke_native_array_stub_int(arg_words, argId, meth_addr);
+ resultPtr->i = ((IntFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
break;
case JAVA_TYPE_FLOAT:
- resultPtr->f = vm_invoke_native_array_stub_float(arg_words, argId, meth_addr);
+ resultPtr->f = ((FloatFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
break;
case JAVA_TYPE_LONG:
- resultPtr->j = vm_invoke_native_array_stub(arg_words, argId, meth_addr);
+ resultPtr->j = ((IntFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
break;
case JAVA_TYPE_DOUBLE:
- resultPtr->d = vm_invoke_native_array_stub_double(arg_words, argId, meth_addr);
+ resultPtr->d = ((DoubleFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
break;
default:
Index: trunk/vm/vmcore/src/util/ia32/base/invoke_native_stub_ia32.asm
===================================================================
--- trunk/vm/vmcore/src/util/ia32/base/invoke_native_stub_ia32.asm (revision 591610)
+++ trunk/vm/vmcore/src/util/ia32/base/invoke_native_stub_ia32.asm (working copy)
@@ -1,46 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements. See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Author: Intel, Evgueni Brevnov
-//
- .text
- .align 2
-.globl vm_invoke_native_array_stub
- .type vm_invoke_native_array_stub, @function
-vm_invoke_native_array_stub:
-
- push %ebp
- movl %esp, %ebp
- push %ebx
- push %esi
- push %edi
- movl 8(%ebp), %eax
- movl 12(%ebp), %ecx
- leal -4(%eax,%ecx,4), %eax
- subl %esp, %eax
- or %ecx, %ecx
- je 2f
-1:
- push 0(%esp,%eax)
- loop 1b
-2:
- movl 16(%ebp), %eax
- call *%eax
- leal -12(%ebp), %esp
- pop %edi
- pop %esi
- pop %ebx
- leave
- ret
Index: trunk/vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp
===================================================================
--- trunk/vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp (working copy)
@@ -232,7 +232,7 @@
if (VM_Global_State::loader_env->use_lil_stubs) {
LilCodeStub* cs = lil_parse_code_stub(
- "entry 0:managed:pint:void; // The single argument is a Class_Handle \n"
+ "entry 0:stdcall:pint:void; // The single argument is a Class_Handle \n"
"locals 3;\
in2out platform:pint; \
call %0i; \
@@ -495,16 +495,16 @@
static void *__stdcall
-aastore_ia32(volatile ManagedObject *elem,
+aastore_ia32(Vector_Handle array,
int idx,
- Vector_Handle array);
+ volatile ManagedObject *elem);
// 20030321 This JIT support routine expects to be called directly from managed code.
static void *__stdcall
-aastore_ia32(volatile ManagedObject *elem,
+aastore_ia32(Vector_Handle array,
int idx,
- Vector_Handle array)
+ volatile ManagedObject *elem)
{
#ifdef REFS_RUNTIME_OR_COMPRESSED
REFS_RUNTIME_SWITCH_IF
@@ -578,8 +578,8 @@
}
LilCodeStub* cs = lil_parse_code_stub(
- "entry 0:managed:ref,pint,ref:void; // The args are the element ref to store, the index, and the array to store into\n"
- "in2out managed:pint; "
+ "entry 0:stdcall:ref,pint,ref:void; // The args are the array to store into, the index, and the element ref to store\n"
+ "in2out stdcall:pint; "
"call %0i; // vm_rt_aastore either returns NULL or the ClassHandle of an exception to throw \n"
"jc r!=0,aastore_failed; \
ret; \
Index: trunk/vm/vmcore/src/util/ipf/base/jit_runtime_support_ipf.cpp
===================================================================
--- trunk/vm/vmcore/src/util/ipf/base/jit_runtime_support_ipf.cpp (revision 591610)
+++ trunk/vm/vmcore/src/util/ipf/base/jit_runtime_support_ipf.cpp (working copy)
@@ -566,7 +566,7 @@
}
if (VM_Global_State::loader_env->use_lil_stubs) {
- LilCodeStub* cs = lil_parse_code_stub("entry 0:managed::ref;");
+ LilCodeStub* cs = lil_parse_code_stub("entry 0:stdcall::ref;");
assert(cs);
cs = lil_parse_onto_end(cs,
"push_m2n 0, 0;"
@@ -818,7 +818,7 @@
// Allocate frame, save pfs, b0, and gp
int out0, save_pfs, save_b0, save_gp;
const int num_in_args = 3, num_out_args = 3;
- void *(*p_vm_rt_aastore)(ManagedObject *elem, int idx, Vector_Handle array);
+ void *(*p_vm_rt_aastore)(Vector_Handle array, int idx, ManagedObject *elem);
p_vm_rt_aastore = vm_rt_aastore;
emit_alloc_for_single_call(emitter, num_in_args, num_out_args,
(void **)p_vm_rt_aastore,