Index: trunk/vm/jitrino/src/codegenerator/CodeGenIntfc.h =================================================================== --- trunk/vm/jitrino/src/codegenerator/CodeGenIntfc.h (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/CodeGenIntfc.h (working copy) @@ -472,6 +472,8 @@ virtual void methodEntry(MethodDesc* mDesc) = 0; virtual void methodEnd(MethodDesc* mDesc, CG_OpndHandle* retVallue = NULL) = 0; + virtual CG_OpndHandle* alloca(CG_OpndHandle* size) = 0; + // Set the current persistent instruction id associated with any subsequently generated instructions. virtual void setCurrentPersistentId(PersistentInstructionId persistentId) = 0; Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32Encoder.cpp =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32Encoder.cpp (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32Encoder.cpp (working copy) @@ -244,7 +244,8 @@ int disp = pdisp ? (int)pdisp->getImmValue() : 0 ; - if (p->getMemOpndKind() == MemOpndKind_StackAutoLayout) { + if (p->getMemOpndKind() == MemOpndKind_StackAutoLayout + || p->getMemOpndKind() == MemOpndKind_StackAllocaLayout) { disp += inst->getStackDepth(); if (Mnemonic_POP == mnemonic) { disp -= STACK_SLOT_SIZE; Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32Inst.h (working copy) @@ -247,6 +247,16 @@ return memOpndAlignment; } + void setMemOpndStackSize(uint32 size) { + assert(memOpndKind == MemOpndKind_StackAllocaLayout); + memOpndStackSize = size; + } + + uint32 getMemOpndStackSize() { + assert(memOpndKind == MemOpndKind_StackAllocaLayout); + return memOpndStackSize; + } + /** * Sets desirable memory operand alignment. */ @@ -343,7 +353,7 @@ */ bool isSubjectForLivenessAnalysis()const { - return (memOpndKind&(MemOpndKind_StackManualLayout|MemOpndKind_ConstantArea|MemOpndKind_Heap))==0 && !isPlacedIn(OpndKind_Imm); + return (memOpndKind&(MemOpndKind_StackManualLayout|MemOpndKind_StackAllocaLayout|MemOpndKind_ConstantArea|MemOpndKind_Heap))==0 && !isPlacedIn(OpndKind_Imm); } /** Returns the segment register used with the operand (memory). */ @@ -375,7 +385,7 @@ :id(_id), firstId(_id), type(t), defScope(DefScope_Null), definingInst(NULL), refCount(0), segReg(RegName_Null), memOpndKind(MemOpndKind_Null), - memOpndAlignment(MemOpndAlignment_Any), + memOpndAlignment(MemOpndAlignment_Any), memOpndStackSize(0), immValue(0), runtimeInfo(NULL) { constraints[ConstraintKind_Initial]=constraints[ConstraintKind_Calculated]=c; } @@ -392,8 +402,10 @@ RegName segReg; MemOpndKind memOpndKind; - // Defines alignment for memory oprands that have stack auto layout kind. + // Alignment for memory operands with MemOpndKind_StackAutoLayout kind. MemOpndAlignment memOpndAlignment; + // Size on the stack to be allocated for operands with MemOpndKind_StackAllocaLayout kind. + uint32 memOpndStackSize; union{ RegName regName; Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (working copy) @@ -3293,6 +3293,12 @@ 2, (Opnd**)hlpArgs, NULL)); } +CG_OpndHandle* InstCodeSelector::alloca(CG_OpndHandle* size) { + Inst * allocaInst = irManager.newAllocaInst((Opnd*)size); + appendInsts(allocaInst); + return allocaInst->getOpnd(0); +} + //____________________________________________________________________________________________________ }}; // namespace Ia32 Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h (working copy) @@ -267,6 +267,8 @@ void methodEntry(MethodDesc* mDesc); void methodEnd(MethodDesc* mDesc, CG_OpndHandle* retOpnd = NULL); + CG_OpndHandle* alloca(CG_OpndHandle* size); + // // Set/clear current persistent id to be assigned to the generated instructions // Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32InternalProfiler.cpp =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32InternalProfiler.cpp (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32InternalProfiler.cpp (working copy) @@ -220,6 +220,7 @@ const AttrDesc memOpndKinds[] = { {MemOpndKind_Null, "Null"}, {MemOpndKind_StackAutoLayout, "StackAutoLayout"}, + {MemOpndKind_StackAllocaLayout, "StackAllocaLayout"}, {MemOpndKind_StackManualLayout, "StackManualLayout"}, {MemOpndKind_Stack, "Stack"}, {MemOpndKind_Heap, "Heap"}, Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRConstants.h =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRConstants.h (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRConstants.h (working copy) @@ -59,11 +59,12 @@ enum MemOpndKind { MemOpndKind_Null=0, - MemOpndKind_StackAutoLayout=0xf, - MemOpndKind_StackManualLayout=0x10, - MemOpndKind_Stack=0x1f, - MemOpndKind_Heap=0x20, - MemOpndKind_ConstantArea=0x40, + MemOpndKind_StackAutoLayout=0x1, + MemOpndKind_StackAllocaLayout=0x2, + MemOpndKind_StackManualLayout=0x4, + MemOpndKind_Stack=0xf, + MemOpndKind_Heap=0x10, + MemOpndKind_ConstantArea=0x20, MemOpndKind_Any=0xff, }; Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (working copy) @@ -435,7 +435,19 @@ return inst; } +Inst * IRManager::newAllocaInst(Opnd* size) { + assert(size->getType()->isInt4()); + //Inst * allocaInst = new(memoryManager, 4) Inst(Mnemonic_LEA, instId++, Inst::Form_Native); + Opnd * destOpnd = newOpnd(typeManager.getUnmanagedPtrType(typeManager.getIntPtrType())); + Opnd * memOpnd = newMemOpnd(typeManager.getIntPtrType(), MemOpndKind_StackAllocaLayout, getRegOpnd(STACK_REG), 0); + memOpnd->setMemOpndStackSize((uint32)size->getImmValue()); + //allocaInst->insertOpnd(0, destOpnd, Inst::OpndRole_Explicit|Inst::OpndRole_Def); + //allocaInst->insertOpnd(1, memOpnd, Inst::OpndRole_Explicit|Inst::OpndRole_Use); + //allocaInst->setConstraint(2, Constraint(OpndKind_Any, sizeOpnd->getSize())); + //allocaInst->assignOpcodeGroup(this); + return newInstEx(Mnemonic_LEA, 1, destOpnd, memOpnd); +} //_________________________________________________________________________________________________ BranchInst * IRManager::newBranchInst(Mnemonic mnemonic, Node* trueTarget, Node* falseTarget, Opnd * targetOpnd) @@ -1136,7 +1148,9 @@ return instList; }else{ #ifdef _EM64T_ - if((targetOpnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) && (sourceKind==OpndKind_Imm) && (sourceOpnd->getSize() == OpndSize_64)) + MemOpndKind kind = targetOpnd->getMemOpndKind(); + if((kind == MemOpndKind_StackAutoLayout || kind == MemOpndKind_StackAllocaLayout) + && (sourceKind==OpndKind_Imm) && (sourceOpnd->getSize() == OpndSize_64)) return newMemMovSequence(targetOpnd, sourceOpnd, regUsageMask, false); else #else Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (working copy) @@ -185,6 +185,11 @@ Opnd * opnd4, Opnd * opnd5=0, Opnd * opnd6=0, Opnd * opnd7=0 ); + /** + * @param size - number of bytes to allocate on stack. + */ + Inst * newAllocaInst(Opnd* size); + /** Creates a new branch instruction The source of the targetMemOpnd can be defined by its RuntimInfo Index: trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp =================================================================== --- trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp (revision 616709) +++ trunk/vm/jitrino/src/codegenerator/ia32/Ia32StackLayout.cpp (working copy) @@ -292,8 +292,10 @@ // Create or reset displacements for stack memory operands. for (uint32 i = 0; i < irManager->getOpndCount(); i++) { Opnd * opnd = irManager->getOpnd(i); - if (opnd->getRefCount() && opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout) { - Opnd * dispOpnd=opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); + MemOpndKind kind = opnd->getMemOpndKind(); + if (opnd->getRefCount() && + (kind == MemOpndKind_StackAutoLayout || kind == MemOpndKind_StackAllocaLayout)) { + Opnd * dispOpnd = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); if (dispOpnd == NULL){ dispOpnd = irManager->newImmOpnd(irManager->getTypeManager().getInt32Type(), 0); opnd->setMemOpndSubOpnd(MemOpndSubOpndKind_Displacement, dispOpnd); @@ -362,29 +364,39 @@ irManager->getAliasRelations(relations); // Assign displacements for local variable operands. - for (int j = 0; j <= alignmentSequenceSize; j++) { + for (int j = 0; j < alignmentSequenceSize; j++) { + Opnd::MemOpndAlignment currentAlignment = alignmentSequence[j]; for (uint32 i = 0; i < irManager->getOpndCount(); i++) { Opnd * opnd = irManager->getOpnd(i); - Opnd::MemOpndAlignment currentAlignment = alignmentSequence[j]; - if(opnd->getRefCount() != 0 - && opnd->getMemOpndKind() == MemOpndKind_StackAutoLayout - && opnd->getMemOpndAlignment() == currentAlignment) { - Opnd * dispOpnd = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); - if (dispOpnd->getImmValue() == 0) { - if (relations[opnd->getId()].outerOpnd == NULL) { - if (currentAlignment == Opnd::MemOpndAlignment_Any) { - uint32 cb = getByteSize(opnd->getSize()); - cb = (cb + (slotSize - 1)) & ~(slotSize - 1); - offset -= cb; - } else { - // Make sure - assert((stackSizeAlignment % currentAlignment) == 0); - // It just doesn't make sense to align on less than operand size. - assert((uint32)currentAlignment >= getByteSize(opnd->getSize())); - offset -= currentAlignment; - } - dispOpnd->assignImmValue(offset); + if (!opnd->isPlacedIn(OpndKind_Memory) + || opnd->getRefCount() == 0 + || relations[opnd->getId()].outerOpnd != NULL + || opnd->getMemOpndAlignment() != currentAlignment) { + continue; + } + Opnd * dispOpnd = opnd->getMemOpndSubOpnd(MemOpndSubOpndKind_Displacement); + if (dispOpnd != NULL && dispOpnd->getImmValue() == 0) { + MemOpndKind kind = opnd->getMemOpndKind(); + if (kind == MemOpndKind_StackAutoLayout) { + if (currentAlignment == Opnd::MemOpndAlignment_Any) { + uint32 cb = getByteSize(opnd->getSize()); + cb = (cb + (slotSize - 1)) & ~(slotSize - 1); + offset -= cb; + } else { + // Make sure + assert((stackSizeAlignment % currentAlignment) == 0); + // It just doesn't make sense to align on less than operand size. + assert((uint32)currentAlignment >= getByteSize(opnd->getSize())); + offset -= currentAlignment; } + dispOpnd->assignImmValue(offset); + } else if (kind == MemOpndKind_StackAllocaLayout) { + // Doesn't support alignment yet. + assert(opnd->getMemOpndAlignment() == Opnd::MemOpndAlignment_Any); + uint32 cb = opnd->getMemOpndStackSize(); + cb = (cb + (slotSize - 1)) & ~(slotSize - 1); + offset -= cb; + dispOpnd->assignImmValue(offset); } } } Index: trunk/vm/jitrino/src/optimizer/CodeSelectors.cpp =================================================================== --- trunk/vm/jitrino/src/optimizer/CodeSelectors.cpp (revision 616709) +++ trunk/vm/jitrino/src/optimizer/CodeSelectors.cpp (working copy) @@ -1893,7 +1893,9 @@ assert(0); break; case Op_Alloca: - assert(0); + { + cgInst = instructionCallback.alloca(getCGInst(inst->getSrc(0))); + } break; case Op_ArgList: assert(0); Index: trunk/vm/jitrino/src/optimizer/Inst.cpp =================================================================== --- trunk/vm/jitrino/src/optimizer/Inst.cpp (revision 616709) +++ trunk/vm/jitrino/src/optimizer/Inst.cpp (working copy) @@ -2417,6 +2417,19 @@ dst->getType()->tag, dst, dimensions, newNumElems, elemType); } +/** + * Arranges specified number of bytes on the memory stack of the current frame. + * + * @param dst – unmanaged pointer to the allocated region + * @param size – immediate value holding number of bytes to be allocated on the stack + * @returns – generated instruction + */ +Inst* +InstFactory::makeAlloca(Opnd* dst, Opnd* size) { + assert(dst->getType()->isUnmanagedPtr()); + return makeInst(Op_Alloca, Modifier(), Type::UnmanagedPtr, dst, size); +} + Inst* InstFactory::makeTauMonitorEnter(Opnd* src, Opnd *tauNonNull) { assert(tauNonNull->getType()->tag == Type::Tau); return makeInst(Op_TauMonitorEnter, Modifier(), Type::Void, OpndManager::getNullOpnd(), src, Index: trunk/vm/jitrino/src/optimizer/Inst.h =================================================================== --- trunk/vm/jitrino/src/optimizer/Inst.h (revision 616709) +++ trunk/vm/jitrino/src/optimizer/Inst.h (working copy) @@ -1084,6 +1084,7 @@ public: MemoryManager& getMemManager() {return memManager;} Inst* clone(Inst* inst, OpndManager& opndManager, OpndRenameTable *table); + // Numeric compute Inst* makeAdd(Modifier mod, Opnd* dst, Opnd* src1, Opnd* src2); Inst* makeSub(Modifier mod, Opnd* dst, Opnd* src1, Opnd* src2); @@ -1224,6 +1225,7 @@ Inst* makeNewObj(Opnd* dst, Type* type); Inst* makeNewArray(Opnd* dst, Opnd* numElems, Type* elemType); Inst* makeNewMultiArray(Opnd* dst, uint32 dimensions, Opnd** numElems, Type* elemType); + Inst* makeAlloca(Opnd* dst, Opnd* size); // sync Inst* makeTauMonitorEnter(Opnd* src, Opnd *tauSrcNonNull); Inst* makeTauMonitorExit(Opnd* src, Opnd *tauSrcNonNull); Index: trunk/vm/jitrino/src/optimizer/memoryopt.cpp =================================================================== --- trunk/vm/jitrino/src/optimizer/memoryopt.cpp (revision 616709) +++ trunk/vm/jitrino/src/optimizer/memoryopt.cpp (working copy) @@ -909,7 +909,6 @@ case Op_LdToken: case Op_InitBlock: case Op_CopyBlock: - case Op_Alloca: assert(0); break; @@ -938,6 +937,7 @@ case Op_LdArrayLenOffsetPlusHeapbase: case Op_AddOffset: case Op_AddOffsetPlusHeapbase: + case Op_Alloca: // the following are irrelevant, but cased so we // notice any additions: Index: trunk/vm/jitrino/src/optimizer/Opcode.cpp =================================================================== --- trunk/vm/jitrino/src/optimizer/Opcode.cpp (revision 616709) +++ trunk/vm/jitrino/src/optimizer/Opcode.cpp (working copy) @@ -76,166 +76,169 @@ }; static OpcodeInfo opcodeTable[] = { - { Op_Add, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "add ", "add%m %s -) %l" }, - { Op_Mul, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "mul ", "mul%m %s -) %l", }, - { Op_Sub, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "sub ", "sub%m %s -) %l", }, - { Op_TauDiv, false, MB::Movable, MK::Signed_and_Strict, "div ", "div%m %0,%1 ((%2)) -) %l", }, // (opnds must already be checked for 0/overflow) - { Op_TauRem, false, MB::Movable, MK::Signed_and_Strict, "rem ", "rem%m %0,%1 ((%2)) -) %l", }, // (opnds must already be checked for 0/overflow) - { Op_Neg, false, MB::Movable, MK::None, "neg ", "neg %s -) %l", }, - { Op_MulHi, false, MB::Movable, MK::Signed, "mulhi ", "mulhi%m %s -) %l", }, // SignedModifier (but only signed needed now) - { Op_Min, false, MB::Movable, MK::None, "min ", "min %s -) %l", }, - { Op_Max, false, MB::Movable, MK::None, "max ", "max %s -) %l", }, - { Op_Abs, false, MB::Movable, MK::None, "abs ", "abs %s -) %l", }, - { Op_And, false, MB::Movable, MK::None, "and ", "and %s -) %l", }, - { Op_Or, false, MB::Movable, MK::None, "or ", "or %s -) %l", }, - { Op_Xor, false, MB::Movable, MK::None, "xor ", "xor %s -) %l", }, - { Op_Not, false, MB::Movable, MK::None, "not ", "not %s -) %l", }, - { Op_Select, false, MB::Movable, MK::None, "select", "select %s -) %l", }, // (src1 ? src2 : src3) - { Op_Conv, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "conv ", "conv%t%m %s -) %l", }, - { Op_ConvZE, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "convze ", "conv_ze_%t%m %s -) %l", }, - { Op_ConvUnmanaged, false, MB::StoreOrSync, MK::Overflow_and_Exception_and_Strict, "convu ", "conv_unm_%t%m %s -) %l", }, - { Op_Shladd, false, MB::Movable, MK::None, "shladd", "shladd %s -) %l", }, // no mods, 2nd operand must be LdConstant - { Op_Shl, false, MB::Movable, MK::ShiftMask, "shl ", "shl%m %s -) %l", }, - { Op_Shr, false, MB::Movable, MK::ShiftMask_and_Signed, "shr ", "shr%m %s -) %l", }, - { Op_Cmp, false, MB::Movable, MK::Comparison, "cmp ", "c%m:%t %s -) %l", }, - { Op_Cmp3, false, MB::Movable, MK::Comparison, "cmp3 ", "c3%m:%t %s -) %l", }, // 3-way compare, e.g.: ((s0>s1)?1:((s1>s0)?-1:0)) - { Op_Branch, true, MB::ControlFlow, MK::Comparison, "br ", "if c%m.%t %s goto %l", }, - { Op_Jump, true, MB::ControlFlow, MK::None, "jmp ", "goto %l", }, // (different from the CLI jmp opcode) - { Op_Switch, true, MB::ControlFlow, MK::None, "switch", "switch (%l)[%0]", }, + { Op_Add, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "add ", "add%m %s -) %l" }, + { Op_Mul, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "mul ", "mul%m %s -) %l", }, + { Op_Sub, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "sub ", "sub%m %s -) %l", }, + { Op_TauDiv, false, MB::Movable, MK::Signed_and_Strict, "div ", "div%m %0,%1 ((%2)) -) %l", }, // (opnds must already be checked for 0/overflow) + { Op_TauRem, false, MB::Movable, MK::Signed_and_Strict, "rem ", "rem%m %0,%1 ((%2)) -) %l", }, // (opnds must already be checked for 0/overflow) + { Op_Neg, false, MB::Movable, MK::None, "neg ", "neg %s -) %l", }, + { Op_MulHi, false, MB::Movable, MK::Signed, "mulhi ", "mulhi%m %s -) %l", }, // SignedModifier (but only signed needed now) + { Op_Min, false, MB::Movable, MK::None, "min ", "min %s -) %l", }, + { Op_Max, false, MB::Movable, MK::None, "max ", "max %s -) %l", }, + { Op_Abs, false, MB::Movable, MK::None, "abs ", "abs %s -) %l", }, + { Op_And, false, MB::Movable, MK::None, "and ", "and %s -) %l", }, + { Op_Or, false, MB::Movable, MK::None, "or ", "or %s -) %l", }, + { Op_Xor, false, MB::Movable, MK::None, "xor ", "xor %s -) %l", }, + { Op_Not, false, MB::Movable, MK::None, "not ", "not %s -) %l", }, + { Op_Select, false, MB::Movable, MK::None, "select", "select %s -) %l", }, // (src1 ? src2 : src3) + { Op_Conv, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "conv ", "conv%t%m %s -) %l", }, + { Op_ConvZE, false, MB::Movable, MK::Overflow_and_Exception_and_Strict, "convze ", "conv_ze_%t%m %s -) %l", }, + { Op_ConvUnmanaged, false, MB::StoreOrSync, MK::Overflow_and_Exception_and_Strict, "convu ", "conv_unm_%t%m %s -) %l", }, + { Op_Shladd, false, MB::Movable, MK::None, "shladd", "shladd %s -) %l", }, // no mods, 2nd operand must be LdConstant + { Op_Shl, false, MB::Movable, MK::ShiftMask, "shl ", "shl%m %s -) %l", }, + { Op_Shr, false, MB::Movable, MK::ShiftMask_and_Signed, "shr ", "shr%m %s -) %l", }, + { Op_Cmp, false, MB::Movable, MK::Comparison, "cmp ", "c%m:%t %s -) %l", }, + { Op_Cmp3, false, MB::Movable, MK::Comparison, "cmp3 ", "c3%m:%t %s -) %l", }, // 3-way compare, e.g.: ((s0>s1)?1:((s1>s0)?-1:0)) + { Op_Branch, true, MB::ControlFlow, MK::Comparison, "br ", "if c%m.%t %s goto %l", }, + { Op_Jump, true, MB::ControlFlow, MK::None, "jmp ", "goto %l", }, // (different from the CLI jmp opcode) + { Op_Switch, true, MB::ControlFlow, MK::None, "switch", "switch (%l)[%0]", }, { Op_DirectCall, true, MB::Call, MK::Exception, "call ", "call %d(%p) ((%0,%1)) -) %l %b", }, { Op_TauVirtualCall, true, MB::Call, MK::Exception, "callvirt", "callvrt [%2.%d](%a) ((%0,%1)) -) %l %b", }, - { Op_IndirectCall, true, MB::Call, MK::Exception, "calli", "calli [%0](%a) ((%1,%2)) -) %l", }, - { Op_IndirectMemoryCall, true, MB::Call, MK::Exception, "callimem", "callimem [%0](%a) ((%1,%2)) -) %l", }, - { Op_IntrinsicCall, true, MB::Call, MK::Exception, "callintr", "callintr %d(%p) ((%0,%1)) -) %l", }, - { Op_JitHelperCall, true, MB::Call, MK::Exception, "callhelper", "callhelper %d(%s) -) %l", }, - { Op_VMHelperCall, true, MB::Call, MK::Exception, "callvmhelper", "callvmhelper %d(%s) -) %l %b", }, - { Op_Return, true, MB::ControlFlow, MK::None, "return", "return %s", }, - { Op_Catch, true, MB::ControlFlow, MK::None, "catch", "catch -) %l", }, - { Op_Throw, true, MB::Exception, MK::Throw, "throw ", "throw %0 %b", }, - { Op_PseudoThrow, true, MB::Exception, MK::Exception, "pseudoThrow ", "pseudoThrow %b", }, - { Op_ThrowSystemException, true, MB::Exception, MK::None, "throwsys ", "throwsys %d %b", }, - { Op_ThrowLinkingException, true, MB::Exception, MK::None, "throwLink ", "throwLink", }, - { Op_Leave, true, MB::ControlFlow, MK::None, "leave ", "leave %l", }, // CLI only -- DELETE - { Op_EndFinally, true, MB::ControlFlow, MK::None, "endfinally", "endfinally", }, // CLI only -- DELETE - { Op_EndFilter, true, MB::ControlFlow, MK::None, "endfilter", "endfilter", }, // CLI only -- DELETE - { Op_EndCatch, true, MB::Call, MK::None, "endcatch", "endcatch", }, // CLI only -- DELETE - { Op_JSR, true, MB::Call, MK::None, "jsr", "jsr %l", }, // Java only, JSR's -- DELETE - { Op_Ret, true, MB::ControlFlow, MK::None, "ret", "ret %s", }, // Java only, JSR's -- DELETE - { Op_SaveRet, true, MB::ControlFlow, MK::None, "saveret", "saveret -) %l", }, // Java only, JSR's -- DELETE + { Op_IndirectCall, true, MB::Call, MK::Exception, "calli", "calli [%0](%a) ((%1,%2)) -) %l", }, + { Op_IndirectMemoryCall, true, MB::Call, MK::Exception, "callimem", "callimem [%0](%a) ((%1,%2)) -) %l", }, + { Op_IntrinsicCall, true, MB::Call, MK::Exception, "callintr", "callintr %d(%p) ((%0,%1)) -) %l", }, + { Op_JitHelperCall, true, MB::Call, MK::Exception, "callhelper", "callhelper %d(%s) -) %l", }, + { Op_VMHelperCall, true, MB::Call, MK::Exception, "callvmhelper", "callvmhelper %d(%s) -) %l %b", }, + { Op_Return, true, MB::ControlFlow, MK::None, "return", "return %s", }, + { Op_Catch, true, MB::ControlFlow, MK::None, "catch", "catch -) %l", }, + { Op_Throw, true, MB::Exception, MK::Throw, "throw ", "throw %0 %b", }, + { Op_PseudoThrow, true, MB::Exception, MK::Exception, "pseudoThrow ", "pseudoThrow %b", }, + { Op_ThrowSystemException, true, MB::Exception, MK::None, "throwsys ", "throwsys %d %b", }, + { Op_ThrowLinkingException, true, MB::Exception, MK::None, "throwLink ", "throwLink", }, + { Op_Leave, true, MB::ControlFlow, MK::None, "leave ", "leave %l", }, // CLI only -- DELETE + { Op_EndFinally, true, MB::ControlFlow, MK::None, "endfinally", "endfinally", }, // CLI only -- DELETE + { Op_EndFilter, true, MB::ControlFlow, MK::None, "endfilter", "endfilter", }, // CLI only -- DELETE + { Op_EndCatch, true, MB::Call, MK::None, "endcatch", "endcatch", }, // CLI only -- DELETE + { Op_JSR, true, MB::Call, MK::None, "jsr", "jsr %l", }, // Java only, JSR's -- DELETE + { Op_Ret, true, MB::ControlFlow, MK::None, "ret", "ret %s", }, // Java only, JSR's -- DELETE + { Op_SaveRet, true, MB::ControlFlow, MK::None, "saveret", "saveret -) %l", }, // Java only, JSR's -- DELETE // Move instruction - { Op_Copy, false, MB::Movable, MK::None, "copy", "copy %s -) %l", }, - { Op_DefArg, true, MB::None, MK::DefArg, "defarg", "defarg%m -) %l", }, + { Op_Copy, false, MB::Movable, MK::None, "copy", "copy %s -) %l", }, + { Op_DefArg, true, MB::None, MK::DefArg, "defarg", "defarg%m -) %l", }, // Load instructions - { Op_LdConstant, false, MB::Movable, MK::None, "ldc ", "ldc%t #%c -) %l", }, - { Op_LdRef, false, MB::Movable, MK::AutoCompress, "ldref ", "ldref%m (%d) -) %l %b", }, - { Op_LdVar, false, MB::None, MK::None, "ldvar ", "ldvar %0 -) %l", }, - { Op_LdVarAddr, false, MB::Movable, MK::None, "ldvara", "ldvara %0 -) %l", }, - { Op_TauLdInd, false, MB::Load, MK::AutoCompress_Speculative, "ldind", "ldind%m:%t [%0] ((%1,%2)) -) %l", }, + { Op_LdConstant, false, MB::Movable, MK::None, "ldc ", "ldc%t #%c -) %l", }, + { Op_LdRef, false, MB::Movable, MK::AutoCompress, "ldref ", "ldref%m (%d) -) %l %b", }, + { Op_LdVar, false, MB::None, MK::None, "ldvar ", "ldvar %0 -) %l", }, + { Op_LdVarAddr, false, MB::Movable, MK::None, "ldvara", "ldvara %0 -) %l", }, + { Op_TauLdInd, false, MB::Load, MK::AutoCompress_Speculative, "ldind", "ldind%m:%t [%0] ((%1,%2)) -) %l", }, { Op_TauLdField, false, MB::Load, MK::AutoCompress, "ldfld", "ldfld:%t [%0.%d] ((%1,%2)) -) %l", }, - { Op_LdStatic, false, MB::Load, MK::AutoCompress, "ldsfld", "ldsfld:%t [%d] -) %l", }, + { Op_LdStatic, false, MB::Load, MK::AutoCompress, "ldsfld", "ldsfld:%t [%d] -) %l", }, { Op_TauLdElem, false, MB::Load, MK::AutoCompress, "ldelem", "ldelem:%t [%0[%1]] ((%2,%3)) -) %l", }, - { Op_LdFieldAddr, false, MB::Movable, MK::None, "ldflda", "ldflda [%0.%d] -) %l", }, - { Op_LdStaticAddr, false, MB::Movable, MK::None, "ldsflda", "ldsflda [%d] -) %l", }, - { Op_LdElemAddr, false, MB::Movable, MK::None, "ldelema", "ldelema [%0[%1]] -) %l", }, - { Op_TauLdVTableAddr, false, MB::Movable, MK::None, "ldvtable", "ldvtable %0 ((%1)) -) %l", }, - { Op_TauLdIntfcVTableAddr, false, MB::Movable, MK::None, "ldintfcvt", "ldintfcvt %0,%d -) %l", }, - { Op_TauLdVirtFunAddr, false, MB::CSEable, MK::None, "ldvfn ", "ldvfn [%0.%d] ((%1)) -) %l", }, - { Op_TauLdVirtFunAddrSlot, false, MB::CSEable, MK::None, "ldvfnslot", "ldvfnslot [%0.%d] ((%1)) -) %l", }, - { Op_LdFunAddr, false, MB::CSEable, MK::None, "ldfn ", "ldfn [%d] -) %l", }, - { Op_LdFunAddrSlot, false, MB::CSEable, MK::None, "ldfnslot", "ldfnslot [%d] -) %l", }, - { Op_GetVTableAddr, false, MB::Movable, MK::None, "getvtable", "getvtable %d -) %l", }, // obtains the address of the vtable for a particular object type - { Op_TauArrayLen, false, MB::CSEable, MK::None, "arraylen ", "arraylen %0 ((%1,%2)) -) %l", }, - { Op_LdArrayBaseAddr, false, MB::CSEable, MK::None, "ldbase", "ldbase %s -) %l", }, // load the base (zero'th element) address of array - { Op_AddScaledIndex, false, MB::Movable, MK::None, "addindex", "addindex %s -) %l", }, // Add a scaled index to an array element address - { Op_StVar, true, MB::None, MK::None, "stvar ", "stvar %0 -) %l", }, - { Op_TauStInd, true, MB::StoreOrSync, MK::Store_AutoCompress, "stind", "stind%m:%t %0 ((%2,%3,%4)) -) [%1]", }, + { Op_LdFieldAddr, false, MB::Movable, MK::None, "ldflda", "ldflda [%0.%d] -) %l", }, + { Op_LdStaticAddr, false, MB::Movable, MK::None, "ldsflda", "ldsflda [%d] -) %l", }, + { Op_LdElemAddr, false, MB::Movable, MK::None, "ldelema", "ldelema [%0[%1]] -) %l", }, + { Op_TauLdVTableAddr, false, MB::Movable, MK::None, "ldvtable", "ldvtable %0 ((%1)) -) %l", }, + { Op_TauLdIntfcVTableAddr, false, MB::Movable, MK::None, "ldintfcvt", "ldintfcvt %0,%d -) %l", }, + { Op_TauLdVirtFunAddr, false, MB::CSEable, MK::None, "ldvfn ", "ldvfn [%0.%d] ((%1)) -) %l", }, + { Op_TauLdVirtFunAddrSlot, false, MB::CSEable, MK::None, "ldvfnslot", "ldvfnslot [%0.%d] ((%1)) -) %l", }, + { Op_LdFunAddr, false, MB::CSEable, MK::None, "ldfn ", "ldfn [%d] -) %l", }, + { Op_LdFunAddrSlot, false, MB::CSEable, MK::None, "ldfnslot", "ldfnslot [%d] -) %l", }, + { Op_GetVTableAddr, false, MB::Movable, MK::None, "getvtable", "getvtable %d -) %l", }, // obtains the address of the vtable for a particular object type + { Op_TauArrayLen, false, MB::CSEable, MK::None, "arraylen ", "arraylen %0 ((%1,%2)) -) %l", }, + { Op_LdArrayBaseAddr, false, MB::CSEable, MK::None, "ldbase", "ldbase %s -) %l", }, // load the base (zero'th element) address of array + { Op_AddScaledIndex, false, MB::Movable, MK::None, "addindex", "addindex %s -) %l", }, // Add a scaled index to an array element address + { Op_StVar, true, MB::None, MK::None, "stvar ", "stvar %0 -) %l", }, + { Op_TauStInd, true, MB::StoreOrSync, MK::Store_AutoCompress, "stind", "stind%m:%t %0 ((%2,%3,%4)) -) [%1]", }, { Op_TauStField, true, MB::StoreOrSync, MK::Store_AutoCompress, "stfld", "stfld%m:%t %0 ((%2,%3)) -) [%1.%d]", }, - { Op_TauStElem, true, MB::StoreOrSync, MK::Store_AutoCompress, "stelem", "stelem%m:%t %0 ((%3,%4,%5)) -) [%1[%2]]", }, - { Op_TauStStatic, true, MB::StoreOrSync, MK::Store_AutoCompress, "stsfld", "stsfld:%t %0 ((%1)) -) [%d]", }, - { Op_TauStRef, true, MB::StoreOrSync, MK::Store_AutoCompress, "stref ", "stref%m %0 ((%3,%4,%5)) -) [%1 %2] ", }, // high-level version that will make a call to the VM + { Op_TauStElem, true, MB::StoreOrSync, MK::Store_AutoCompress, "stelem", "stelem%m:%t %0 ((%3,%4,%5)) -) [%1[%2]]",}, + { Op_TauStStatic, true, MB::StoreOrSync, MK::Store_AutoCompress, "stsfld", "stsfld:%t %0 ((%1)) -) [%d]", }, + { Op_TauStRef, true, MB::StoreOrSync, MK::Store_AutoCompress, "stref ", "stref%m %0 ((%3,%4,%5)) -) [%1 %2] ", }, // high-level version that will make a call to the VM { Op_TauCheckBounds, false, MB::Check, MK::Overflow_and_Exception, "chkbounds", "chkbounds %1 .lt. %0 -) %l %b", }, // takes index and array length arguments, }, - { Op_TauCheckLowerBound, false, MB::Check, MK::Overflow_and_Exception, "chklb", "chklb %0 .le. %1 -) %l", }, // throws unless src0 <= src1 - { Op_TauCheckUpperBound, false, MB::Check, MK::Overflow_and_Exception, "chkub", "chkub %0 .lt. %1 -) %l", }, // throws unless src0 < src1 - { Op_TauCheckNull, false, MB::Check, MK::Exception_and_DefArg, "chknull", "chknull %0 -) %l %b", }, // throws NullPointerException if src is null - { Op_TauCheckZero, false, MB::Check, MK::Exception, "chkzero", "chkzero %0 -) %l %b", }, // for divide by zero exceptions (div and rem) - { Op_TauCheckDivOpnds, false, MB::Check, MK::Exception, "chkdivopnds", "chkdivopnds %0,%1 -) %l", }, // for signed divide overflow in CLI (div/rem of MAXNEGINT, -1): generates an ArithmeticException - { Op_TauCheckElemType, false, MB::Check, MK::Exception, "chkelemtype", "chkelemtype %0,%1 ((%2,%3)) -) %l", }, // Array element type check for aastore - { Op_TauCheckFinite, false, MB::Check, MK::Exception, "ckfinite", "ckfinite %s -) %l", }, // throws ArithmeticException if value is NaN or +- inifinity - { Op_NewObj, false, MB::Exception, MK::Exception, "newobj", "newobj %d -) %l %b", }, // OutOfMemoryException - { Op_NewArray, false, MB::Exception, MK::Exception, "newarray", "newarray %d[%0] -) %l %b", }, // OutOfMemoryException, NegativeArraySizeException - { Op_NewMultiArray, false, MB::Exception, MK::Exception, "newmultiarray", "newmultiarray %d[%s] -) %l", }, // OutOfMemoryException, NegativeArraySizeException - { Op_TauMonitorEnter, true, MB::StoreOrSync, MK::None, "monenter", "monenter %0 ((%1))", }, // (opnd must be non-null) - { Op_TauMonitorExit, true, MB::StoreOrSync, MK::Exception, "monexit", "monexit %0 ((%1))", }, // (opnd must be non-null), IllegalMonitorStateException - { Op_TypeMonitorEnter, true, MB::StoreOrSync, MK::None, "tmonenter", "monenter %d", }, - { Op_TypeMonitorExit, true, MB::StoreOrSync, MK::Exception, "tmonexit", "monexit %d", }, - { Op_LdLockAddr, false, MB::Movable, MK::None, "ldlockaddr", "ldlockaddr %0 -) %l", }, // yields ref:int16 - { Op_IncRecCount, true, MB::StoreOrSync, MK::None, "increccnt", "increccnt %s", }, // allows BalancedMonitorEnter to be used with regular MonitorExit - { Op_TauBalancedMonitorEnter, true, MB::StoreOrSync, MK::None, "balmonenter", "balmonenter %0,%1 ((%2)) -) %l", }, // (opnd must be non-null), postdominated by BalancedMonitorExit - { Op_BalancedMonitorExit, true, MB::StoreOrSync, MK::None, "balmonexit", "balmonexit %s", }, // (cannot yield exception), },dominated by BalancedMonitorEnter - { Op_TauOptimisticBalancedMonitorEnter, true, MB::StoreOrSync, MK::None, "optbalmonenter", "optbalmonenter %0,%1 ((%2)) -) %l", }, // (opnd must be non-null), postdominated by BalancedMonitorExit - { Op_OptimisticBalancedMonitorExit, true, MB::StoreOrSync, MK::Exception, "optbalmonexit", "optbalmonexit %s", }, // (cannot yield exception), },dominated by BalancedMonitorEnter - { Op_MonitorEnterFence, true, MB::StoreOrSync, MK::None, "monenterfence", "monenterfence %0", }, // (opnd must be non-null) - { Op_MonitorExitFence, true, MB::StoreOrSync, MK::None, "monexitfence", "monexitfence %0", }, // (opnd must be non-null) - { Op_TauStaticCast, false, MB::Movable, MK::None, "staticcast", "staticcast %0,%d ((%1)) -) %l", }, // Compile-time assertion. Asserts that cast is legal. - { Op_TauCast, false, MB::Check, MK::Exception, "cast ", "cast %0,%d ((%1)) -) %l %b", }, // CastException (suceeds if argument is null, returns casted object) - { Op_TauAsType, false, MB::Movable, MK::None, "astype", "astype %0,%d -) %l", }, // returns casted object if argument is an instance of, null otherwise - { Op_TauInstanceOf, false, MB::Movable, MK::None, "instanceof", "instanceof %0,%d ((%1)) -) %l",}, // returns true if argument is an instance of type T, tau opnd isNonNull - { Op_InitType, true, MB::CSEable, MK::Exception, "inittype", "inittype %d %b", }, // can throw a linking exception during class initialization - { Op_Label, true, MB::None, MK::None, "label ", "%l: %b", }, // special label instructions for branch labels, finally, catch - { Op_MethodEntry, true, MB::None, MK::None, "methodentry", "--- MethodEntry(%d): (%s) %b",}, // method entry label - { Op_MethodEnd, true, MB::None, MK::None, "methodend", "+++ MethodEnd(%d) (%s)", }, // end of a method - { Op_SourceLineNumber, true, MB::None, MK::None, "lineno", "???", }, // change to source position - { Op_LdObj, false, MB::Load, MK::None, "ldobj ", "ldobj [%0] -) %l", }, // load a value type to the stack - { Op_StObj, true, MB::StoreOrSync, MK::None, "stobj ", "stobj %1 -) [%0] -- %d", }, // store a value type from the stack - { Op_CopyObj, true, MB::StoreOrSync, MK::None, "cpobj ", "cpobj [%1] -) [%0] -- %d", }, // copy a value type - { Op_InitObj, true, MB::StoreOrSync, MK::None, "initobj", "initobj [%0]", }, // initialize a value type - { Op_Sizeof, false, MB::Movable, MK::None, "sizeof", "sizeof %d -) %l", }, // Pushes the size of a value type as a U4 - { Op_Box, false, MB::Exception, MK::None, "box ", "box %0,%d -) %l", }, - { Op_Unbox, false, MB::CSEable, MK::None, "unbox ", "unbox %0,%d -) %l", }, - { Op_LdToken, true , MB::None, MK::None, "ldtok ", "ldtok -) %l", }, - { Op_MkRefAny, false, MB::CSEable, MK::None, "mkrefany", "mkrefany", }, // transforms a pointer to a typed reference - { Op_RefAnyVal, false, MB::CSEable, MK::None, "refanyval", "refanyval", }, // ??? Pushes a pointer to the typed reference ??? - { Op_RefAnyType, false, MB::CSEable, MK::None, "refanytype", "refanytype", }, // Pushes the type token in a typed reference - same as obj.getClass()? + { Op_TauCheckLowerBound, false, MB::Check, MK::Overflow_and_Exception, "chklb", "chklb %0 .le. %1 -) %l", }, // throws unless src0 <= src1 + { Op_TauCheckUpperBound, false, MB::Check, MK::Overflow_and_Exception, "chkub", "chkub %0 .lt. %1 -) %l", }, // throws unless src0 < src1 + { Op_TauCheckNull, false, MB::Check, MK::Exception_and_DefArg, "chknull", "chknull %0 -) %l %b", }, // throws NullPointerException if src is null + { Op_TauCheckZero, false, MB::Check, MK::Exception, "chkzero", "chkzero %0 -) %l %b", }, // for divide by zero exceptions (div and rem) + { Op_TauCheckDivOpnds, false, MB::Check, MK::Exception, "chkdivopnds", "chkdivopnds %0,%1 -) %l", }, // for signed divide overflow in CLI (div/rem of MAXNEGINT, -1): generates an ArithmeticException + { Op_TauCheckElemType, false, MB::Check, MK::Exception, "chkelemtype", "chkelemtype %0,%1 ((%2,%3)) -) %l", }, // Array element type check for aastore + { Op_TauCheckFinite, false, MB::Check, MK::Exception, "ckfinite", "ckfinite %s -) %l", }, // throws ArithmeticException if value is NaN or +- inifinity + { Op_NewObj, false, MB::Exception, MK::Exception, "newobj", "newobj %d -) %l %b", }, // OutOfMemoryException + { Op_NewArray, false, MB::Exception, MK::Exception, "newarray", "newarray %d[%0] -) %l %b", }, // OutOfMemoryException, NegativeArraySizeException + { Op_NewMultiArray, false, MB::Exception, MK::Exception, "newmultiarray", "newmultiarray %d[%s] -) %l", }, // OutOfMemoryException, NegativeArraySizeException + { Op_Alloca, false, MB::Movable, MK::None, "alloca", "alloca %0 -) %l", }, + { Op_TauMonitorEnter, true, MB::StoreOrSync, MK::None, "monenter", "monenter %0 ((%1))", }, // (opnd must be non-null) + { Op_TauMonitorExit, true, MB::StoreOrSync, MK::Exception, "monexit", "monexit %0 ((%1))", }, // (opnd must be non-null), IllegalMonitorStateException + { Op_TypeMonitorEnter, true, MB::StoreOrSync, MK::None, "tmonenter", "monenter %d", }, + { Op_TypeMonitorExit, true, MB::StoreOrSync, MK::Exception, "tmonexit", "monexit %d", }, + { Op_LdLockAddr, false, MB::Movable, MK::None, "ldlockaddr", "ldlockaddr %0 -) %l", }, // yields ref:int16 + { Op_IncRecCount, true, MB::StoreOrSync, MK::None, "increccnt", "increccnt %s", }, // allows BalancedMonitorEnter to be used with regular MonitorExit + { Op_TauBalancedMonitorEnter, + true, MB::StoreOrSync, MK::None, "balmonenter", "balmonenter %0,%1 ((%2)) -) %l", }, // (opnd must be non-null), postdominated by BalancedMonitorExit + { Op_BalancedMonitorExit, true, MB::StoreOrSync, MK::None, "balmonexit", "balmonexit %s", }, // (cannot yield exception), },dominated by BalancedMonitorEnter + { Op_TauOptimisticBalancedMonitorEnter, + true, MB::StoreOrSync, MK::None, "optbalmonenter","optbalmonenter %0,%1 ((%2)) -) %l", }, // (opnd must be non-null), postdominated by BalancedMonitorExit + { Op_OptimisticBalancedMonitorExit, + true, MB::StoreOrSync, MK::Exception, "optbalmonexit", "optbalmonexit %s", }, // (cannot yield exception), },dominated by BalancedMonitorEnter + { Op_MonitorEnterFence, true, MB::StoreOrSync, MK::None, "monenterfence", "monenterfence %0", }, // (opnd must be non-null) + { Op_MonitorExitFence, true, MB::StoreOrSync, MK::None, "monexitfence", "monexitfence %0", }, // (opnd must be non-null) + { Op_TauStaticCast, false, MB::Movable, MK::None, "staticcast", "staticcast %0,%d ((%1)) -) %l", }, // Compile-time assertion. Asserts that cast is legal. + { Op_TauCast, false, MB::Check, MK::Exception, "cast ", "cast %0,%d ((%1)) -) %l %b", }, // CastException (suceeds if argument is null, returns casted object) + { Op_TauAsType, false, MB::Movable, MK::None, "astype", "astype %0,%d -) %l", }, // returns casted object if argument is an instance of, null otherwise + { Op_TauInstanceOf, false, MB::Movable, MK::None, "instanceof", "instanceof %0,%d ((%1)) -) %l", }, // returns true if argument is an instance of type T, tau opnd isNonNull + { Op_InitType, true, MB::CSEable, MK::Exception, "inittype", "inittype %d %b", }, // can throw a linking exception during class initialization + { Op_Label, true, MB::None, MK::None, "label ", "%l: %b", }, // special label instructions for branch labels, finally, catch + { Op_MethodEntry, true, MB::None, MK::None, "methodentry", "--- MethodEntry(%d): (%s) %b", }, // method entry label + { Op_MethodEnd, true, MB::None, MK::None, "methodend", "+++ MethodEnd(%d) (%s)", }, // end of a method + { Op_SourceLineNumber, true, MB::None, MK::None, "lineno", "???", }, // change to source position + { Op_LdObj, false, MB::Load, MK::None, "ldobj ", "ldobj [%0] -) %l", }, // load a value type to the stack + { Op_StObj, true, MB::StoreOrSync, MK::None, "stobj ", "stobj %1 -) [%0] -- %d", }, // store a value type from the stack + { Op_CopyObj, true, MB::StoreOrSync, MK::None, "cpobj ", "cpobj [%1] -) [%0] -- %d", }, // copy a value type + { Op_InitObj, true, MB::StoreOrSync, MK::None, "initobj", "initobj [%0]", }, // initialize a value type + { Op_Sizeof, false, MB::Movable, MK::None, "sizeof", "sizeof %d -) %l", }, // Pushes the size of a value type as a U4 + { Op_Box, false, MB::Exception, MK::None, "box ", "box %0,%d -) %l", }, + { Op_Unbox, false, MB::CSEable, MK::None, "unbox ", "unbox %0,%d -) %l", }, + { Op_LdToken, true , MB::None, MK::None, "ldtok ", "ldtok -) %l", }, + { Op_MkRefAny, false, MB::CSEable, MK::None, "mkrefany", "mkrefany", }, // transforms a pointer to a typed reference + { Op_RefAnyVal, false, MB::CSEable, MK::None, "refanyval", "refanyval", }, // ??? Pushes a pointer to the typed reference ??? + { Op_RefAnyType, false, MB::CSEable, MK::None, "refanytype", "refanytype", }, // Pushes the type token in a typed reference - same as obj.getClass()? // Memory instructions - { Op_InitBlock, true, MB::StoreOrSync, MK::None, "initblk", "initblk", }, // memset - { Op_CopyBlock, true, MB::StoreOrSync, MK::None, "cpblk ", "cpblk ", }, // memcopy - { Op_Alloca, true, MB::StoreOrSync, MK::None, "alloca", "alloca", }, // allocations memory from the stack, },not verifiable - { Op_ArgList, true, MB::None, MK::None, "arglist", "arglist", }, // for implementing varargs, use is private to CLI System.ArgIterator + { Op_InitBlock, true, MB::StoreOrSync, MK::None, "initblk", "initblk", }, // memset + { Op_CopyBlock, true, MB::StoreOrSync, MK::None, "cpblk ", "cpblk ", }, // memcopy + { Op_ArgList, true, MB::None, MK::None, "arglist", "arglist", }, // for implementing varargs, use is private to CLI System.ArgIterator // Special SSA nodes - { Op_Phi, false, MB::None, MK::None, "phi ", "phi(%s) -) %l", }, // merge point - { Op_TauPi, false, MB::Movable, MK::None, "pi ", "pi(%0 : %d) ((%1)) -) %l", }, // liverange split based on condition + { Op_Phi, false, MB::None, MK::None, "phi ", "phi(%s) -) %l", }, // merge point + { Op_TauPi, false, MB::Movable, MK::None, "pi ", "pi(%0 : %d) ((%1)) -) %l", }, // liverange split based on condition // Profile instrumentation instructions - { Op_IncCounter, true, MB::None, MK::None, "inccounter", "inccounter(%d)", }, // Increment a profile counter by 1 - { Op_Prefetch, true, MB::StoreOrSync, MK::None, "prefetch", "prefetch %0 ", }, //StoreOrSync + { Op_IncCounter, true, MB::None, MK::None, "inccounter", "inccounter(%d)", }, // Increment a profile counter by 1 + { Op_Prefetch, true, MB::StoreOrSync, MK::None, "prefetch", "prefetch %0 ", }, //StoreOrSync // Compressed Pointer instructions - { Op_UncompressRef, false, MB::Movable, MK::None, "uncmpref", "uncmpref %s -) %l", }, - { Op_CompressRef, false, MB::Movable, MK::None, "cmpref", "cmpref %s -) %l", }, - { Op_LdFieldOffset, false, MB::Movable, MK::None, "ldfldoff", "ldfldoff [.%d] -) %l", }, + { Op_UncompressRef, false, MB::Movable, MK::None, "uncmpref", "uncmpref %s -) %l", }, + { Op_CompressRef, false, MB::Movable, MK::None, "cmpref", "cmpref %s -) %l", }, + { Op_LdFieldOffset, false, MB::Movable, MK::None, "ldfldoff", "ldfldoff [.%d] -) %l", }, { Op_LdFieldOffsetPlusHeapbase, - false, MB::Movable, MK::None, "ldfldophb", "ldfldoffphb [.%d] -) %l", }, - { Op_LdArrayBaseOffset, false, MB::Movable, MK::None, "ldbaseoff", "ldbaseoff -) %l", }, + false, MB::Movable, MK::None, "ldfldophb", "ldfldoffphb [.%d] -) %l", }, + { Op_LdArrayBaseOffset, false, MB::Movable, MK::None, "ldbaseoff", "ldbaseoff -) %l", }, { Op_LdArrayBaseOffsetPlusHeapbase, - false, MB::Movable, MK::None, "ldbaseoffphb", "ldbaseoffphb -) %l", }, - { Op_LdArrayLenOffset, false, MB::Movable, MK::None, "ldlenoff", "ldlenoff -) %l", }, + false, MB::Movable, MK::None, "ldbaseoffphb", "ldbaseoffphb -) %l", }, + { Op_LdArrayLenOffset, false, MB::Movable, MK::None, "ldlenoff", "ldlenoff -) %l", }, { Op_LdArrayLenOffsetPlusHeapbase, - false, MB::Movable, MK::None, "ldlenoffphb", "ldlenoffphb -) %l", }, - { Op_AddOffset, false, MB::Movable, MK::None, "addoffset", "addoff %s -) %l", }, - { Op_AddOffsetPlusHeapbase, false, MB::Movable, MK::None, "addoffphb", "addoffphb %s -) %l", }, + false, MB::Movable, MK::None, "ldlenoffphb", "ldlenoffphb -) %l", }, + { Op_AddOffset, false, MB::Movable, MK::None, "addoffset", "addoff %s -) %l", }, + { Op_AddOffsetPlusHeapbase, false, MB::Movable, MK::None, "addoffphb", "addoffphb %s -) %l", }, - { Op_TauPoint, false, MB::None, MK::None, "taupoint ", "taupoint() -) %l", }, // mark - { Op_TauEdge, false, MB::None, MK::None, "tauedge ", "tauedge() -) %l", }, // mark - { Op_TauAnd, false, MB::Movable, MK::None, "tauand ", "tauand %s -) %l", }, - { Op_TauUnsafe, false, MB::None, MK::None, "tauunsafe", "tauunsafe() -) %l", }, // mark - { Op_TauSafe, false, MB::None, MK::None, "tauunsafe", "tausafe() -) %l", }, // mark + { Op_TauPoint, false, MB::None, MK::None, "taupoint ", "taupoint() -) %l", }, // mark + { Op_TauEdge, false, MB::None, MK::None, "tauedge ", "tauedge() -) %l", }, // mark + { Op_TauAnd, false, MB::Movable, MK::None, "tauand ", "tauand %s -) %l", }, + { Op_TauUnsafe, false, MB::None, MK::None, "tauunsafe", "tauunsafe() -) %l", }, // mark + { Op_TauSafe, false, MB::None, MK::None, "tauunsafe", "tausafe() -) %l", }, // mark - { Op_TauCheckCast, false, MB::Check, MK::Exception, "tauchkcast ", "tauchkcast %0,%d ((%1)) -) %l", }, // CastException (suceeds if argument is null, returns casted object) - { Op_TauHasType, false, MB::Movable, MK::None, "tauhastype ", "tauhastype %0,%d -) %l", }, // temporary declaration that source is of given type - { Op_TauHasExactType, false, MB::CSEable, MK::None, "tauexacttype ", "tauexacttype %0,%d -) %l", }, // temporary declaration that source is exactly of given type - { Op_TauIsNonNull, true, MB::CSEable, MK::None, "tauisnonnull ", "tauisnonnull %0 -) %l", }, // temporary declaration that source null + { Op_TauCheckCast, false, MB::Check, MK::Exception, "tauchkcast ", "tauchkcast %0,%d ((%1)) -) %l", }, // CastException (suceeds if argument is null, returns casted object) + { Op_TauHasType, false, MB::Movable, MK::None, "tauhastype ", "tauhastype %0,%d -) %l", }, // temporary declaration that source is of given type + { Op_TauHasExactType, false, MB::CSEable, MK::None, "tauexacttype ", "tauexacttype %0,%d -) %l", }, // temporary declaration that source is exactly of given type + { Op_TauIsNonNull, true, MB::CSEable, MK::None, "tauisnonnull ", "tauisnonnull %0 -) %l", }, // temporary declaration that source null }; unsigned short Modifier::encode(Opcode opcode, uint32 numbits) const Index: trunk/vm/jitrino/src/optimizer/Opcode.h =================================================================== --- trunk/vm/jitrino/src/optimizer/Opcode.h (revision 616709) +++ trunk/vm/jitrino/src/optimizer/Opcode.h (working copy) @@ -395,6 +395,7 @@ Op_NewObj, // OutOfMemoryException Op_NewArray, // OutOfMemoryException, NegativeArraySizeException Op_NewMultiArray, // OutOfMemoryException, NegativeArraySizeException + Op_Alloca, // Allocates space on the stack // Synchronization Op_TauMonitorEnter, // (opnd must be non-null) // this could take an ExceptionModifier @@ -447,7 +448,6 @@ // Memory instructions Op_InitBlock, // memset Op_CopyBlock, // memcopy - Op_Alloca, // allocations memory from the stack, not verifiable Op_ArgList, // for implementing varargs; use is private to CLI System.ArgIterator // Special SSA nodes @@ -495,28 +495,28 @@ struct Kind { enum Enum { // these values must be disjoint bits: - None = 0, - Overflow = 1, - Signed = 2, - Comparison = 4, - ShiftMask = 8, - Strict = 16, - DefArg = 32, - SrcNonNull = 64, - Store = 128, - Exception = 256, - AutoCompress = 512, - Speculative = 1024, - Throw = 2048, - NewModifier1 = 4096, - NewModifier2 = 8192, + None = 0x0, + Overflow = 0x1, + Signed = 0x2, + Comparison = 0x4, + ShiftMask = 0x8, + Strict = 0x10, + DefArg = 0x20, + SrcNonNull = 0x40, + Store = 0x80, + Exception = 0x100, + AutoCompress = 0x200, + Speculative = 0x400, + Throw = 0x800, + NewModifier1 = 0x1000, + NewModifier2 = 0x2000, // these are combinations for convenient use in Opcode.cpp: - Signed_and_Strict = (2 | 16), - Overflow_and_Exception = (1 | 256), - Overflow_and_Exception_and_Strict = (1 | 256 | 16), - ShiftMask_and_Signed = (8 | 2), - SrcNonNull_and_Exception = (64 | 256), + Signed_and_Strict = (Signed | Strict), + Overflow_and_Exception = (Overflow | Exception), + Overflow_and_Exception_and_Strict = (Overflow | Exception | Strict), + ShiftMask_and_Signed = (ShiftMask | Signed), + SrcNonNull_and_Exception = (SrcNonNull | Exception), Store_AutoCompress = (Store | AutoCompress), AutoCompress_Speculative = (AutoCompress | Speculative),