Index: vm/jitrino/src/codegenerator/ia32/Ia32GCMap.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32GCMap.cpp (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32GCMap.cpp (working copy) @@ -206,7 +206,11 @@ int offset_from_esp0 = (int)displOpnd->getImmValue(); //opnd saving offset from the esp on method call int inst_offset_from_esp0 = (int)inst->getStackDepth(); int ptrToAddrOffset = inst_offset_from_esp0 + offset_from_esp0; //opnd saving offset from the esp on inst +#ifdef _EM64T_ + gcOpnd = new (mm) GCSafePointOpnd(isObject, false, ptrToAddrOffset, offset, isCompressed); +#else gcOpnd = new (mm) GCSafePointOpnd(isObject, false, ptrToAddrOffset, offset); +#endif } else { assert(opnd->getMemOpndKind() == MemOpndKind_Heap); continue; Index: vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32IRManager.cpp (working copy) @@ -1588,6 +1588,17 @@ } //_____________________________________________________________________________________________ +bool IRManager::fits32Bits(POINTER_SIZE_INT candidate) { + return (POINTER_SIZE_INT)candidate == (uint32)candidate; +} + +//_____________________________________________________________________________________________ +bool IRManager::fits31Bit(POINTER_SIZE_INT candidate) { + uint32 signedBit32 = (uint32)candidate & (0x1<<31); + return (POINTER_SIZE_INT)candidate == (uint32)candidate && signedBit32 == 0; +} + +//_____________________________________________________________________________________________ void IRManager::getLiveAtExit(const Node * node, BitSet & ls) const { assert(ls.getSetSize()<=getOpndCount()); @@ -1899,17 +1910,17 @@ assert(lastInst->getBCOffset()!=ILLEGAL_BC_MAPPING_VALUE); throwInst->setBCOffset(lastInst->getBCOffset()); throwBasicBlock->appendInst(throwInst); + Type* opndType = opnd->getType(); int64 zero = 0; - if( refsCompressed && opnd->getType()->isReference() ) { - assert(!Type::isCompressedReference(opnd->getType()->tag)); + if( refsCompressed && opndType->isReference() && !opndType->isCompressedReference() ) { zero = (int64)(POINTER_SIZE_INT)VMInterface::getHeapBase(); } Opnd* zeroOpnd = NULL; - if((POINTER_SIZE_INT)zero == (uint32)zero) { // heap base fits into 32 bits - zeroOpnd = newImmOpnd(opnd->getType(), zero); + if (fits32Bits(zero)) { + zeroOpnd = newImmOpnd(opndType, zero); } else { // zero can not be an immediate at comparison Opnd* zeroImm = newImmOpnd(typeManager.getIntPtrType(), zero); - zeroOpnd = newOpnd(opnd->getType()); + zeroOpnd = newOpnd(opndType); Inst* copy = newCopyPseudoInst(Mnemonic_MOV, zeroOpnd, zeroImm); bb->appendInst(copy); copy->setBCOffset(lastInst->getBCOffset()); Index: vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32StackInfo.cpp (working copy) @@ -304,21 +304,29 @@ switch(reg) { #ifdef _EM64T_ case RegName_R15: + case RegName_R15D: return context->p_r15; case RegName_R14: + case RegName_R14D: return context->p_r14; case RegName_R13: + case RegName_R13D: return context->p_r13; case RegName_R12: + case RegName_R12D: return context->p_r12; case RegName_RBP: + case RegName_EBP: return context->p_rbp; case RegName_RBX: + case RegName_EBX: return context->p_rbx; #ifdef _WIN64 case RegName_RSI: + case RegName_ESI: return context->p_rsi; case RegName_RDI: + case RegName_EDI: return context->p_rdi; #endif #else Index: vm/jitrino/src/codegenerator/ia32/Ia32GCMap.h =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32GCMap.h (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32GCMap.h (working copy) @@ -121,12 +121,12 @@ public: #ifdef _EM64T_ - GCSafePointOpnd(bool isObject, bool isOnRegister, int32 _val, int32 _mptrOffset, bool isCompressed=false) : val(_val), mptrOffset(_mptrOffset) { + GCSafePointOpnd(bool isObject, bool isOnRegister, int32 _val, int32 _mptrOffset, bool isCompressed=false) : flags(0), val(_val), mptrOffset(_mptrOffset) { flags = flags | (isCompressed ? COMPRESSED_MASK: 0); #else - GCSafePointOpnd(bool isObject, bool isOnRegister, int32 _val, int32 _mptrOffset) : val(_val), mptrOffset(_mptrOffset) { + GCSafePointOpnd(bool isObject, bool isOnRegister, int32 _val, int32 _mptrOffset) : flags(0), val(_val), mptrOffset(_mptrOffset) { #endif - flags = isObject ? OBJ_MASK : 0; + flags = flags | (isObject ? OBJ_MASK : 0); flags = flags | (isOnRegister ? REG_MASK: 0); #ifdef GCMAP_TRACK_IDS firstId = 0; Index: vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32IRManager.h (working copy) @@ -469,6 +469,9 @@ uint32 getVerificationLevel()const{ return verificationLevel; } bool refsAreCompressed() {return refsCompressed;} + + static bool fits32Bits(POINTER_SIZE_INT candidate); + static bool fits31Bit(POINTER_SIZE_INT candidate); protected: //control flow graph factory methods Index: vm/jitrino/src/codegenerator/ia32/Ia32ComplexAddrFormLoader.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32ComplexAddrFormLoader.cpp (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32ComplexAddrFormLoader.cpp (working copy) @@ -187,7 +187,11 @@ } else if(src2->isPlacedIn(OpndKind_Imm)) { irManager->resolveRuntimeInfo(src2); #ifdef _EM64T_ - if((src2->getImmValue() > (int64)0x7FFFFFFF) || (src2->getImmValue() < -((int64)0x10000000))) { + int64 src2ImmValue = src2->getImmValue(); + if((src2ImmValue > (int64)0x7FFFFFFF) || + (src2ImmValue < -((int64)0x10000000)) || + (src2ImmValue == (int64)VMInterface::getHeapBase())) + { table.baseOp = table.suspOp; return; } Index: vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (working copy) @@ -1202,6 +1202,7 @@ bool swapped=false; switch(opType){ case CompareOp::I4: + case CompareOp::CompRef: #ifndef _EM64T_ case CompareOp::I: case CompareOp::Ref: @@ -1297,26 +1298,31 @@ { POINTER_SIZE_INT zero = 0; Type* type = target->getType(); - if(irManager.refsAreCompressed() && type->isReference()) { + if(irManager.refsAreCompressed() && type->isReference() && !type->isCompressedReference()) { zero = (POINTER_SIZE_INT)VMInterface::getHeapBase(); } if(zero != 0) { - return heapBaseOpnd(type,zero); + return heapBaseOpnd(type,zero,true); } return NULL; } Opnd* -InstCodeSelector::heapBaseOpnd(Type* type, POINTER_SIZE_INT heapBase) { +InstCodeSelector::heapBaseOpnd(Type* type) { + return heapBaseOpnd(type,(POINTER_SIZE_INT)VMInterface::getHeapBase()); +} + +Opnd* +InstCodeSelector::heapBaseOpnd(Type* type, POINTER_SIZE_INT heapBase, bool forComparison) { #ifndef _EM64T_ assert(0); // not supposed to be used on ia32 #endif Opnd* heapBaseOpnd = NULL; - if((POINTER_SIZE_INT)heapBase == (uint32)heapBase) { // heap base fits into 32 bits + if( (forComparison ? IRManager::fits31Bit(heapBase) : IRManager::fits32Bits(heapBase)) ) { heapBaseOpnd = irManager.newImmOpnd(type, heapBase); } else { // heapBase can not be an immediate at comparison heapBaseOpnd = irManager.newOpnd(type); - // be careful here. if type == Int64 the immediate opnd is returned from copyOpnd. + // be careful here. if type == Int64 the immediate opnd is being returned from copyOpnd. // Make sure it is not a problem if you are removing the assert. assert(type != typeManager.getInt64Type()); copyOpnd(heapBaseOpnd,irManager.newImmOpnd(typeManager.getInt64Type(), heapBase)); @@ -1489,7 +1495,7 @@ return irManager.newImmOpnd(typeManager.getCompressedNullObjectType(), 0); } else { if (irManager.refsAreCompressed()) { - return irManager.newImmOpnd(typeManager.getNullObjectType(), (POINTER_SIZE_INT)VMInterface::getHeapBase()); + return heapBaseOpnd(typeManager.getNullObjectType()); } else { return irManager.newImmOpnd(typeManager.getNullObjectType(), 0); } @@ -1593,6 +1599,9 @@ throwInst->setBCOffset(currentHIRInstBCOffset); throwBasicBlock->appendInst(throwInst); + assert(!((Opnd*)array)->getType()->isCompressedReference()); + assert(!((Opnd*)src)->getType()->isCompressedReference()); + Opnd * args[] = { (Opnd*)src, (Opnd*)array }; Opnd * flag = irManager.newOpnd(typeManager.getInt32Type()); @@ -1660,15 +1669,44 @@ //_______________________________________________________________________________________________________________ CG_OpndHandle* InstCodeSelector::uncompressRef(CG_OpndHandle *compref) { +#ifdef _EM64T_ + Opnd* src = (Opnd*)compref; + Type* srcType = src->getType(); + assert(srcType->isCompressedReference()); + Type* uncompressedType = typeManager.uncompressType(srcType); + Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); + + Opnd* src64 = irManager.newOpnd(typeManager.getInt64Type()); + appendInsts(irManager.newInstEx(Mnemonic_MOVZX, 1, src64, src)); + + Opnd* base = heapBaseOpnd(unmanagedPtrType); + Opnd* dst = irManager.newOpnd(uncompressedType); + appendInsts(irManager.newInstEx(Mnemonic_ADD, 1, dst, src64, base)); + return dst; +#else ICS_ASSERT(0); return 0; +#endif } //_______________________________________________________________________________________________________________ CG_OpndHandle* InstCodeSelector::compressRef(CG_OpndHandle *ref) { +#ifdef _EM64T_ + Opnd* src = (Opnd*)ref; + Type* srcType = src->getType(); + assert(!srcType->isCompressedReference()); + Type* compressedType = typeManager.compressType(srcType); + Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); + + Opnd* base = heapBaseOpnd(unmanagedPtrType); + Opnd* dst = irManager.newOpnd(compressedType); + appendInsts(irManager.newInstEx(Mnemonic_SUB, 1, dst, src, base)); + return dst; +#else ICS_ASSERT(0); return 0; +#endif } //_______________________________________________________________________________________________________________ @@ -1821,6 +1859,7 @@ } else { elemSize = getByteSize(irManager.getTypeSize(elemType)); } + assert(!elemType->isObject() || !irManager.refsAreCompressed() || elemSize == 4); Opnd * elemSizeOpnd = irManager.newImmOpnd(indexType, elemSize); Opnd * indexOpnd = (Opnd *)index; @@ -1856,10 +1895,17 @@ CG_OpndHandle * index) { Type* type = ((Opnd*)elemBase)->getType(); - PtrType * ptrType=type->asPtrType(); - Type * elemType = ptrType->getPointedToType(); + PtrType* ptrType=type->asPtrType(); + Type* elemType = ptrType->getPointedToType(); - uint32 elemSize=getByteSize(irManager.getTypeSize(elemType)); + uint32 elemSize = 0; + if (elemType->isReference() + && Type::isCompressedReference(elemType->tag, compilationInterface) + && !elemType->isCompressedReference()) { + elemSize = 4; + } else { + elemSize = getByteSize(irManager.getTypeSize(elemType)); + } Type * indexType = #ifdef _EM64T_ @@ -1906,20 +1952,28 @@ // Simple load indirect -- load primitive value CG_OpndHandle* InstCodeSelector::simpleLdInd(Type * dstType, Opnd * addr, - Type::Tag memType, - Opnd * baseTau, - Opnd * offsetTau) + Type::Tag memType, + bool autoUncompressRef, + Opnd * baseTau, + Opnd * offsetTau) { #ifdef _EM64T_ if(irManager.refsAreCompressed() && memType > Type::Float && memType!=Type::UnmanagedPtr) { Opnd * opnd = irManager.newMemOpndAutoKind(typeManager.getInt32Type(), addr); - Opnd * dst = irManager.newOpnd(typeManager.getInt64Type()); - // loading compressed 32-bit managed address, ensure zero-extention - copyOpnd(dst, opnd, true); - // uncompress - Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); - Opnd* heapBase = heapBaseOpnd(unmanagedPtrType, (POINTER_SIZE_INT)VMInterface::getHeapBase()); - dst = simpleOp_I8(Mnemonic_ADD, dstType, dst, heapBase); + + Opnd* dst = irManager.newOpnd(dstType); + if (autoUncompressRef) { + Opnd * compressedDst = irManager.newOpnd(typeManager.getInt64Type()); + // loading compressed 32-bit managed address, ensure zero-extention + copyOpnd(compressedDst, opnd, true); + // uncompress + Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); + Opnd* heapBase = heapBaseOpnd(unmanagedPtrType); + appendInsts(irManager.newInstEx(Mnemonic_ADD, 1, dst, compressedDst, heapBase)); + } else { + assert(irManager.getTypeSize(dstType) == OpndSize_32); + copyOpnd(dst, opnd); + } return dst; } else @@ -1935,24 +1989,27 @@ //_______________________________________________________________________________________________________________ // Simple store indirect -- store primitive value -void InstCodeSelector::simpleStInd(Opnd * addr, - Opnd * src, - Type::Tag memType, - bool autoCompressRef, - Opnd * baseTau, - Opnd * offsetAndTypeTau) +void InstCodeSelector::simpleStInd(Opnd * addr, Opnd * src, + Type::Tag memType, + bool autoCompressRef, + Opnd * baseTau, + Opnd * offsetAndTypeTau) { #ifdef _EM64T_ // unmanaged pointers are never being compressed // Actually, there is only one possible case caused by magics: // unmanaged pointer to Int8 if(irManager.refsAreCompressed() && memType > Type::Float && !src->getType()->isUnmanagedPtr()) { - Type * unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); - Opnd * heap_base = heapBaseOpnd(unmanagedPtrType, (POINTER_SIZE_INT)VMInterface::getHeapBase()); - Opnd * compressed_src = irManager.newOpnd(typeManager.compressType(src->getType())); - Opnd * opnd = irManager.newMemOpndAutoKind(typeManager.compressType(src->getType()), addr); - appendInsts(irManager.newInstEx(Mnemonic_SUB, 1, compressed_src, src, heap_base)); - appendInsts(irManager.newCopyPseudoInst(Mnemonic_MOV, opnd, compressed_src)); + + if(autoCompressRef) { + Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); + Opnd * base = heapBaseOpnd(unmanagedPtrType); + Opnd * compressed_src = irManager.newOpnd(src->getType()); // the type is still 64 + appendInsts(irManager.newInstEx(Mnemonic_SUB, 1, compressed_src, src, base)); + src = compressed_src; + } + Opnd * opnd = irManager.newMemOpndAutoKind(typeManager.getInt32Type(), addr); + appendInsts(irManager.newCopyPseudoInst(Mnemonic_MOV, opnd, src)); } else #endif { @@ -2055,7 +2112,7 @@ CG_OpndHandle* tauBaseNonNull, CG_OpndHandle* tauAddressInRange) { - return simpleLdInd(dstType, (Opnd*)ptr, memType, (Opnd*)tauBaseNonNull, (Opnd*)tauAddressInRange); + return simpleLdInd(dstType, (Opnd*)ptr, memType, autoUncompressRef, (Opnd*)tauBaseNonNull, (Opnd*)tauAddressInRange); } //_______________________________________________________________________________________________________________ @@ -2215,30 +2272,31 @@ // TODO: rewrite this as soon as the helper for loading ref addr at compile time is ready. Type* objectType = irManager.getTypeFromTag(Type::Object); + Type* unmanagedPtrType = typeManager.getUnmanagedPtrType(typeManager.getInt8Type()); if(irManager.refsAreCompressed()) { - Opnd * base = irManager.newOpnd(objectType); - copyOpnd(base, irManager.newImmOpnd(objectType, (POINTER_SIZE_INT)VMInterface::getHeapBase())); + Opnd * base = heapBaseOpnd(unmanagedPtrType); Opnd * tmp = irManager.newImmOpnd(irManager.getTypeFromTag(Type::UInt64), Opnd::RuntimeInfo::Kind_StringAddress, enclosingMethod, (void*)(POINTER_SIZE_INT)refToken); - Opnd * ptr; + Opnd* ptr = irManager.newOpnd(irManager.getTypeFromTag(Type::Object)); + copyOpnd(ptr,tmp); + + Opnd * memOpnd = irManager.newMemOpndAutoKind(typeManager.getInt32Type(), ptr); if (uncompress) { - ptr = irManager.newOpnd(objectType); - copyOpnd(ptr,tmp); + Opnd * compressedRef = irManager.newOpnd(typeManager.getUInt64Type()); + // loading compressed 32-bit managed address, ensure zero-extention + copyOpnd(compressedRef, memOpnd, true); + // uncompress + appendInsts(irManager.newInstEx(Mnemonic_ADD, 1, retOpnd, compressedRef, base)); } else { - ptr = simpleOp_I8(Mnemonic_ADD, objectType,base,tmp); + copyOpnd(retOpnd, memOpnd); } - - Opnd* memOpnd = irManager.newMemOpnd(typeManager.getSystemStringType(), MemOpndKind_Heap, - ptr, NULL, NULL, NULL); - retOpnd = simpleOp_I8(Mnemonic_ADD, memOpnd->getType(), memOpnd, base); } else { #ifdef _EM64T_ // in uncompressed mode the ptr can be greater than MAX_INT32 so it can not be an immediate Opnd * tmp = irManager.newImmOpnd(irManager.getTypeFromTag(Type::UInt64), Opnd::RuntimeInfo::Kind_StringAddress, enclosingMethod, (void*)(POINTER_SIZE_INT)refToken); Opnd* ptr = irManager.newOpnd(objectType); - // Opnd* ptr = irManager.newOpnd(typeManager.getUnmanagedPtrType(typeManager.getSystemStringType())); copyOpnd(ptr,tmp); Opnd* memOpnd = irManager.newMemOpnd(typeManager.getSystemStringType(), MemOpndKind_Heap, ptr, NULL, NULL, NULL); @@ -2256,15 +2314,6 @@ } //_______________________________________________________________________________________________________________ -// Load token - -CG_OpndHandle * InstCodeSelector::ldToken(Type *dstType,MethodDesc* enclosingMethod,uint32 token) -{ - ICS_ASSERT(0); - return 0; -} - -//_______________________________________________________________________________________________________________ // Increment counter for the program instrumentation void InstCodeSelector::incCounter(Type *counterType,uint32 key) @@ -2866,62 +2915,11 @@ } //_______________________________________________________________________________________________________________ -// Box a value - -CG_OpndHandle* InstCodeSelector::box(ObjectType * boxedType, - CG_OpndHandle * val) -{ - ICS_ASSERT(0); - return 0; -} - -//_______________________________________________________________________________________________________________ -// Unbox object - -CG_OpndHandle* InstCodeSelector::unbox(Type * dstType, CG_OpndHandle* objHandle) -{ - ICS_ASSERT(0); - return 0; -} - -//_______________________________________________________________________________________________________________ -// Load value object - -CG_OpndHandle* InstCodeSelector::ldValueObj(Type* objType, CG_OpndHandle *srcAddr) -{ - ICS_ASSERT(0); - return 0; -} - -//_______________________________________________________________________________________________________________ -// Store value object - -void InstCodeSelector::stValueObj(CG_OpndHandle *dstAddr, CG_OpndHandle *src) -{ - ICS_ASSERT(0); -} - -//_______________________________________________________________________________________________________________ -// Initialize value object - -void InstCodeSelector::initValueObj(Type* objType, CG_OpndHandle *objAddr) -{ - ICS_ASSERT(0); -} - -//_______________________________________________________________________________________________________________ -// Copy value object - -void InstCodeSelector::copyValueObj(Type* objType, CG_OpndHandle *dstAddr, CG_OpndHandle *srcAddr) -{ - ICS_ASSERT(0); -} - -//_______________________________________________________________________________________________________________ // Acquire monitor for an object void InstCodeSelector::tau_monitorEnter(CG_OpndHandle* obj, CG_OpndHandle* tauIsNonNull) { + assert(!((Opnd*)obj)->getType()->isCompressedReference()); Opnd * helperOpnds[] = { (Opnd*)obj }; CallInst * callInst=irManager.newRuntimeHelperCallInst(VM_RT_MONITOR_ENTER_NON_NULL, 1, helperOpnds, NULL); @@ -2934,6 +2932,7 @@ void InstCodeSelector::tau_monitorExit(CG_OpndHandle* obj, CG_OpndHandle* tauIsNonNull) { + assert(!((Opnd*)obj)->getType()->isCompressedReference()); Opnd * helperOpnds[] = { (Opnd*)obj }; CallInst * callInst=irManager.newRuntimeHelperCallInst(VM_RT_MONITOR_EXIT_NON_NULL, 1, helperOpnds, NULL); @@ -3000,6 +2999,7 @@ void InstCodeSelector::typeMonitorEnter(NamedType *type) { + assert(!type->isCompressedReference()); Opnd * helperOpnds[]={irManager.newImmOpnd(getRuntimeIdType(), Opnd::RuntimeInfo::Kind_TypeRuntimeId, type)}; CallInst * callInst=irManager.newRuntimeHelperCallInst(VM_RT_MONITOR_ENTER_STATIC, 1, helperOpnds, NULL); @@ -3012,6 +3012,7 @@ void InstCodeSelector::typeMonitorExit(NamedType *type) { + assert(!type->isCompressedReference()); Opnd * helperOpnds[]={irManager.newImmOpnd(getRuntimeIdType(), Opnd::RuntimeInfo::Kind_TypeRuntimeId, type)}; CallInst * callInst=irManager.newRuntimeHelperCallInst(VM_RT_MONITOR_EXIT_STATIC, 1, helperOpnds, NULL); Index: vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h =================================================================== --- vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h (revision 609948) +++ vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.h (working copy) @@ -192,7 +192,6 @@ CG_OpndHandle* newArray(ArrayType* arrayType, CG_OpndHandle* numElems); CG_OpndHandle* newMultiArray(ArrayType* arrayType, uint32 numDims, CG_OpndHandle** dims); CG_OpndHandle* ldRef(Type *dstType, MethodDesc* enclosingMethod,uint32 stringToken, bool uncompress); - CG_OpndHandle* ldToken(Type *dstType,MethodDesc* enclosingMethod,uint32 token); void incCounter(Type *counterType,uint32 counter); void ret(); @@ -223,12 +222,6 @@ CG_OpndHandle* callvmhelper(uint32 numArgs, CG_OpndHandle** args, Type* retType, VM_RT_SUPPORT callId); - CG_OpndHandle* box(ObjectType * boxedType, CG_OpndHandle* val); - CG_OpndHandle* unbox(Type * dstType, CG_OpndHandle* objHandle); - CG_OpndHandle* ldValueObj(Type* objType, CG_OpndHandle *srcAddr); - void stValueObj(CG_OpndHandle *dstAddr, CG_OpndHandle *src); - void initValueObj(Type* objType, CG_OpndHandle *objAddr); - void copyValueObj(Type* objType, CG_OpndHandle *dstAddr, CG_OpndHandle *srcAddr); void tau_monitorEnter(CG_OpndHandle* obj, CG_OpndHandle* tauIsNonNull); void tau_monitorExit(CG_OpndHandle* obj, CG_OpndHandle* tauIsNonNull); CG_OpndHandle* ldLockAddr(CG_OpndHandle* obj); @@ -346,7 +339,8 @@ // zero or HeapBase depending on compression mode Opnd* zeroForComparison(Opnd* target); // immediate or general opnd with heapBase value - Opnd* heapBaseOpnd(Type* type, POINTER_SIZE_INT heapBase); + Opnd* heapBaseOpnd(Type* type); + Opnd* heapBaseOpnd(Type* type, POINTER_SIZE_INT heapBase, bool forComparison = false); // // Enums @@ -385,7 +379,7 @@ void compressOpnd(Opnd *dst, Opnd *src); void makeComparable(Opnd*& srcOpnd1, Opnd*& srcOpnd2); CG_OpndHandle* simpleLdInd(Type * dstType, Opnd *addr, Type::Tag memType, - Opnd *baseTau, Opnd *offsetTau); + bool autoUncompressRef, Opnd *baseTau, Opnd *offsetTau); void simpleStInd(Opnd *addr, Opnd *src, Type::Tag memType, bool autoCompressRef, Opnd *baseTau, Opnd *offsetAndTypeTau); Type * getFieldRefType(Type *dstType, Type::Tag memType); Index: vm/jitrino/src/codegenerator/ipf/include/IpfCodeSelector.h =================================================================== --- vm/jitrino/src/codegenerator/ipf/include/IpfCodeSelector.h (revision 609948) +++ vm/jitrino/src/codegenerator/ipf/include/IpfCodeSelector.h (working copy) @@ -266,12 +266,8 @@ CG_OpndHandle *ldElemAddr(CG_OpndHandle*,CG_OpndHandle*) { NOT_IMPLEMENTED_C("ldElemAddr") } CG_OpndHandle *ldStatic(Type*, FieldDesc*, Type::Tag, bool) { NOT_IMPLEMENTED_C("ldStatic") } CG_OpndHandle *ldVarAddr(uint32) { NOT_IMPLEMENTED_C("ldVarAddr") } - CG_OpndHandle *ldToken(Type*, MethodDesc*, uint32) { NOT_IMPLEMENTED_C("ldToken") } CG_OpndHandle *tau_cast(ObjectType*, CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("tau_cast") } CG_OpndHandle *tau_asType(ObjectType*, CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("tau_asType") } - CG_OpndHandle *box(ObjectType*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("box") } - CG_OpndHandle *unbox(Type*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("unbox") } - CG_OpndHandle *ldValueObj(Type*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("ldValueObj") } CG_OpndHandle *tau_ckfinite(CG_OpndHandle*) { NOT_IMPLEMENTED_C("tau_ckfinite") } CG_OpndHandle *callhelper(uint32, CG_OpndHandle**, Type*, JitHelperCallOp::Id) { NOT_IMPLEMENTED_C("callhelper") } CG_OpndHandle *tau_callvirt(uint32, CG_OpndHandle**, Type*, MethodDesc*, CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_C("tau_callvirt") } @@ -285,9 +281,6 @@ void incRecursionCount(CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_V("incRecursionCount") } void monitorEnterFence(CG_OpndHandle*) { NOT_IMPLEMENTED_V("monitorEnterFence") } void monitorExitFence(CG_OpndHandle*) { NOT_IMPLEMENTED_V("monitorExitFence") } - void stValueObj(CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_V("stValueObj") } - void initValueObj(Type*, CG_OpndHandle*) { NOT_IMPLEMENTED_V("initValueObj") } - void copyValueObj(Type*, CG_OpndHandle*, CG_OpndHandle*) { NOT_IMPLEMENTED_V("copyValueObj") } void prefetch(CG_OpndHandle*) { NOT_IMPLEMENTED_V("prefetch") } void jump() { NOT_IMPLEMENTED_V("jump") } void throwLazyException(uint32, CG_OpndHandle**, MethodDesc*) { NOT_IMPLEMENTED_V("throwLazyException") } Index: vm/jitrino/src/codegenerator/CodeGenIntfc.h =================================================================== --- vm/jitrino/src/codegenerator/CodeGenIntfc.h (revision 609948) +++ vm/jitrino/src/codegenerator/CodeGenIntfc.h (working copy) @@ -416,7 +416,6 @@ virtual CG_OpndHandle* newArray(ArrayType* arrayType, CG_OpndHandle* numElems) = 0; virtual CG_OpndHandle* newMultiArray(ArrayType* arrayType, uint32 numDims, CG_OpndHandle** dims) = 0; virtual CG_OpndHandle* ldRef(Type* type,MethodDesc* enclosingMethod,uint32 stringToken, bool autouncompress) = 0; - virtual CG_OpndHandle* ldToken(Type *dstType,MethodDesc* enclosingMethod,uint32 token) = 0; virtual void incCounter(Type *counterType,uint32 counter) = 0; virtual void ret() = 0; @@ -456,12 +455,6 @@ virtual CG_OpndHandle* tau_instanceOf(ObjectType *type, CG_OpndHandle* obj, CG_OpndHandle* tauCheckedNull) = 0; virtual void initType(Type* type) = 0; - virtual CG_OpndHandle* box(ObjectType * dstType, CG_OpndHandle* val) = 0; - virtual CG_OpndHandle* unbox(Type * dstType, CG_OpndHandle* obj) = 0; - virtual CG_OpndHandle* ldValueObj(Type* objType, CG_OpndHandle *srcAddr) = 0; - virtual void stValueObj(CG_OpndHandle *dstAddr, CG_OpndHandle *src) = 0; - virtual void initValueObj(Type* objType, CG_OpndHandle *objAddr) = 0; - virtual void copyValueObj(Type* objType, CG_OpndHandle *dstAddr, CG_OpndHandle *srcAddr) = 0; virtual CG_OpndHandle* copy(CG_OpndHandle *src) = 0; virtual CG_OpndHandle* catchException(Type * exceptionType) = 0; virtual void prefetch(CG_OpndHandle *addr) = 0; Index: vm/jitrino/src/optimizer/HLOAPIMagics.cpp =================================================================== --- vm/jitrino/src/optimizer/HLOAPIMagics.cpp (revision 609948) +++ vm/jitrino/src/optimizer/HLOAPIMagics.cpp (working copy) @@ -273,6 +273,26 @@ } Opnd* +HLOAPIMagicIRBuilder::genCompressRef(Opnd* src) { + Type* uncomprefType = src->getType(); + assert(uncomprefType->isReference() && !uncomprefType->isCompressedReference()); + + Opnd* dst = createOpnd(typeManager.compressType(uncomprefType)); + appendInst(instFactory.makeCompressRef(dst, src)); + return dst; +} + +Opnd* +HLOAPIMagicIRBuilder::genUncompressRef(Opnd* src) { + Type* comprefType = src->getType(); + assert(comprefType->isReference() && comprefType->isCompressedReference()); + + Opnd* dst = createOpnd(typeManager.uncompressType(comprefType)); + appendInst(instFactory.makeUncompressRef(dst, src)); + return dst; +} + +Opnd* HLOAPIMagicIRBuilder::genLdField(FieldDesc* fieldDesc, Opnd* base, Opnd* tauBaseNonNull, Opnd* tauAddressInRange) { Type* fieldType = fieldDesc->getFieldType(); @@ -280,14 +300,16 @@ Opnd* fieldAddr; Modifier mod; - - if (compRefs) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it + bool compressedRef = compRefs && Type::isReference(fieldType->tag); + if (compressedRef) { Type *compressedType = typeManager.compressType(fieldType); fieldAddr = createOpnd(typeManager.getManagedPtrType(compressedType)); - mod = AutoCompress_Yes; + if (typeManager.autoCompressRefs) { + mod = AutoCompress_Yes; + } else { + mod = AutoCompress_No; + fieldType = compressedType; + } } else { fieldAddr = createOpnd(typeManager.getManagedPtrType(fieldType)); mod = AutoCompress_No; @@ -297,6 +319,10 @@ Opnd* fieldVal = createOpnd(fieldType); appendInst(instFactory.makeTauLdInd(mod, fieldType->tag, fieldVal, fieldAddr, tauBaseNonNull, tauAddressInRange)); + if ( compressedRef && typeManager.autoCompressRefs == false) { + fieldVal = genUncompressRef(fieldVal); + } + return fieldVal; } Index: vm/jitrino/src/optimizer/simplifier.cpp =================================================================== --- vm/jitrino/src/optimizer/simplifier.cpp (revision 609948) +++ vm/jitrino/src/optimizer/simplifier.cpp (working copy) @@ -126,7 +126,7 @@ Inst* inst = opnd->getInst(); switch (inst->getOpcode()) { case Op_NewObj: case Op_NewArray: case Op_NewMultiArray: - case Op_Box: case Op_LdRef: case Op_Catch: + case Op_LdRef: case Op_Catch: return true; default: return false; @@ -156,7 +156,7 @@ Inst* inst = opnd->getInst(); switch (inst->getOpcode()) { case Op_NewObj: case Op_NewArray: case Op_NewMultiArray: - case Op_Box: case Op_LdRef: + case Op_LdRef: return true; case Op_DefArg: return (inst->getDefArgModifier() == SpecializedToExactType); @@ -2623,8 +2623,8 @@ { Inst *srci = src1->getInst(); if (srci->getOpcode() == Op_TauStaticCast) { - newInstType = instType; newSrc1 = srci->getSrc(0); + newInstType = newSrc1->getType()->tag; newmod = mod; return true; } Index: vm/jitrino/src/optimizer/Opcode.cpp =================================================================== --- vm/jitrino/src/optimizer/Opcode.cpp (revision 609948) +++ vm/jitrino/src/optimizer/Opcode.cpp (working copy) @@ -185,24 +185,7 @@ { Op_MethodEntry, true, MB::None, MK::None, "methodentry", "--- MethodEntry(%d): (%s) %b",}, // method entry label { Op_MethodEnd, true, MB::None, MK::None, "methodend", "+++ MethodEnd(%d) (%s)", }, // end of a method { Op_SourceLineNumber, true, MB::None, MK::None, "lineno", "???", }, // change to source position - { Op_LdObj, false, MB::Load, MK::None, "ldobj ", "ldobj [%0] -) %l", }, // load a value type to the stack - { Op_StObj, true, MB::StoreOrSync, MK::None, "stobj ", "stobj %1 -) [%0] -- %d", }, // store a value type from the stack - { Op_CopyObj, true, MB::StoreOrSync, MK::None, "cpobj ", "cpobj [%1] -) [%0] -- %d", }, // copy a value type - { Op_InitObj, true, MB::StoreOrSync, MK::None, "initobj", "initobj [%0]", }, // initialize a value type - { Op_Sizeof, false, MB::Movable, MK::None, "sizeof", "sizeof %d -) %l", }, // Pushes the size of a value type as a U4 - { Op_Box, false, MB::Exception, MK::None, "box ", "box %0,%d -) %l", }, - { Op_Unbox, false, MB::CSEable, MK::None, "unbox ", "unbox %0,%d -) %l", }, - { Op_LdToken, true , MB::None, MK::None, "ldtok ", "ldtok -) %l", }, - { Op_MkRefAny, false, MB::CSEable, MK::None, "mkrefany", "mkrefany", }, // transforms a pointer to a typed reference - { Op_RefAnyVal, false, MB::CSEable, MK::None, "refanyval", "refanyval", }, // ??? Pushes a pointer to the typed reference ??? - { Op_RefAnyType, false, MB::CSEable, MK::None, "refanytype", "refanytype", }, // Pushes the type token in a typed reference - same as obj.getClass()? - // Memory instructions - { Op_InitBlock, true, MB::StoreOrSync, MK::None, "initblk", "initblk", }, // memset - { Op_CopyBlock, true, MB::StoreOrSync, MK::None, "cpblk ", "cpblk ", }, // memcopy - { Op_Alloca, true, MB::StoreOrSync, MK::None, "alloca", "alloca", }, // allocations memory from the stack, },not verifiable - { Op_ArgList, true, MB::None, MK::None, "arglist", "arglist", }, // for implementing varargs, use is private to CLI System.ArgIterator - // Special SSA nodes { Op_Phi, false, MB::None, MK::None, "phi ", "phi(%s) -) %l", }, // merge point { Op_TauPi, false, MB::Movable, MK::None, "pi ", "pi(%0 : %d) ((%1)) -) %l", }, // liverange split based on condition @@ -213,7 +196,7 @@ // Compressed Pointer instructions { Op_UncompressRef, false, MB::Movable, MK::None, "uncmpref", "uncmpref %s -) %l", }, - { Op_CompressRef, false, MB::Movable, MK::None, "cmpref", "cmpref %s -) %l", }, + { Op_CompressRef, false, MB::Movable, MK::None, "cmprsref", "cmprsref %s -) %l", }, { Op_LdFieldOffset, false, MB::Movable, MK::None, "ldfldoff", "ldfldoff [.%d] -) %l", }, { Op_LdFieldOffsetPlusHeapbase, false, MB::Movable, MK::None, "ldfldophb", "ldfldoffphb [.%d] -) %l", }, @@ -809,7 +792,6 @@ case Op_LdArrayBaseOffsetPlusHeapbase: case Op_LdArrayLenOffset: case Op_LdArrayLenOffsetPlusHeapbase: - case Op_LdToken: return true; default: return false; Index: vm/jitrino/src/optimizer/codelowerer.h =================================================================== --- vm/jitrino/src/optimizer/codelowerer.h (revision 609948) +++ vm/jitrino/src/optimizer/codelowerer.h (working copy) @@ -283,35 +283,6 @@ // source markers Inst* caseMethodMarker(Inst* inst) {return caseDefault(inst);} - // value type instructions - Inst* caseLdObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseStObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseCopyObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseInitObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseBox(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseUnbox(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseLdToken(TokenInst* inst) {return caseDefault(inst);} - - Inst* caseMkRefAny(Inst* inst) {return caseDefault(inst);} - - Inst* caseRefAnyVal(Inst* inst) {return caseDefault(inst);} - - Inst* caseRefAnyType(Inst* inst) {return caseDefault(inst);} - - Inst* caseInitBlock(Inst* inst) {return caseDefault(inst);} - - Inst* caseCopyBlock(Inst* inst) {return caseDefault(inst);} - - Inst* caseAlloca(Inst* inst) {return caseDefault(inst);} - - Inst* caseArgList(Inst* inst) {return caseDefault(inst);} - Inst* casePhi(Inst* inst) {return caseDefault(inst);} Inst* caseTauPi(TauPiInst* inst) {return caseDefault(inst);} Index: vm/jitrino/src/optimizer/HLOAPIMagics.h =================================================================== --- vm/jitrino/src/optimizer/HLOAPIMagics.h (revision 609948) +++ vm/jitrino/src/optimizer/HLOAPIMagics.h (working copy) @@ -29,14 +29,14 @@ class HLOAPIMagicIRBuilder { public: - HLOAPIMagicIRBuilder(IRManager* irmanager, MemoryManager& _mm, bool _compRefs) + HLOAPIMagicIRBuilder(IRManager* irmanager, MemoryManager& _mm) : irm(irmanager), instFactory(irm->getInstFactory()), opndManager(irm->getOpndManager()), typeManager(irm->getTypeManager()), cfg(irm->getFlowGraph()), mm(_mm), - compRefs(_compRefs), + compRefs(VMInterface::areReferencesCompressed()), currentNode(NULL), currentBCOffset(ILLEGAL_BC_MAPPING_VALUE) {} @@ -65,6 +65,8 @@ // IR building void appendInst(Inst* inst); void genCopy(Opnd* trgt, Opnd* src); + Opnd* genCompressRef(Opnd* src); + Opnd* genUncompressRef(Opnd* src); Opnd* genLdField(FieldDesc* fieldDesc, Opnd* base, Opnd* tauBaseNonNull, Opnd* tauAddressInRange); Opnd* createOpnd(Type* type); VarOpnd* createVarOpnd(Type* type, bool isPinned); @@ -166,8 +168,7 @@ } if(handlers.size() != 0) { - bool compRefs = getBoolArg("compressedReferences", false); - HLOAPIMagicIRBuilder builder = HLOAPIMagicIRBuilder(&irm, mm, compRefs); + HLOAPIMagicIRBuilder builder = HLOAPIMagicIRBuilder(&irm, mm); //running all handlers for (StlVector::const_iterator it = handlers.begin(), end = handlers.end(); it!=end; ++it) { HLOAPIMagicHandler* handler = *it; Index: vm/jitrino/src/optimizer/escanalyzer.cpp =================================================================== --- vm/jitrino/src/optimizer/escanalyzer.cpp (revision 609948) +++ vm/jitrino/src/optimizer/escanalyzer.cpp (working copy) @@ -587,6 +587,8 @@ case Op_TauStaticCast: // staticcast case Op_TauCast: // cast + case Op_CompressRef: + case Op_UncompressRef: type = inst->getDst()->getType(); if (type->isObject()) { assert(findCnGNode_op(inst->getDst()->getId())==NULL); @@ -617,14 +619,13 @@ case Op_TauVirtualCall: // callvirt case Op_IndirectCall: // calli - + case Op_TauStRef: case Op_TauStField: case Op_TauStElem: case Op_Prefetch: case Op_TauStStatic: case Op_Copy: - case Op_Box: break; default: @@ -1218,7 +1219,11 @@ } if (cgnfrom->nodeType == NT_REF && cgnfrom->lNode == NULL) { bool helperCall = isVMHelperCall(cgnfrom->nInst, VM_RT_CHECKCAST_WITHRESOLVE); - assert(cgnfrom->nInst->getOpcode()==Op_TauCast || cgnfrom->nInst->getOpcode()==Op_TauStaticCast || helperCall); + assert(cgnfrom->nInst->getOpcode()==Op_TauCast || + cgnfrom->nInst->getOpcode()==Op_TauStaticCast || + cgnfrom->nInst->getOpcode()==Op_CompressRef || + cgnfrom->nInst->getOpcode()==Op_UncompressRef || + helperCall); cgn1 = findCnGNode_op(cgnfrom->nInst->getSrc(helperCall ? 2 : 0)->getId()); assert(cgn1!=NULL); cgnfrom->lNode = cgn1; @@ -4711,12 +4716,6 @@ Inst *headInst = (Inst*)node->getFirstInst(); Opnd* opnd; for (Inst* inst=headInst->getNextInst();inst!=NULL;inst=inst->getNextInst()) { - if (inst->getOpcode() == Op_InitObj) { - if (verboseLog) { - os_sc << " Op_InitObj: "; - inst->print(os_sc); os_sc << std::endl; - } - } uint32 nsrc = inst->getNumSrcOperands(); for (uint32 i=0; igetSrc(i))->isSsaOpnd()) // check ssa operands @@ -4784,12 +4783,6 @@ Inst *headInst = (Inst*)node->getFirstInst(); Opnd* opnd; for (Inst* inst=headInst->getNextInst();inst!=NULL;inst=inst->getNextInst()) { - if (inst->getOpcode() == Op_InitObj) { - if (verboseLog) { - os_sc << " Op_InitObj: "; - inst->print(os_sc); os_sc << std::endl; - } - } uint32 nsrc = inst->getNumSrcOperands(); for (uint32 i=0; igetSrc(i))->isSsaOpnd()) // check ssa operands @@ -5882,5 +5875,3 @@ } //namespace Jitrino - - Index: vm/jitrino/src/optimizer/lazyexceptionopt.cpp =================================================================== --- vm/jitrino/src/optimizer/lazyexceptionopt.cpp (revision 609948) +++ vm/jitrino/src/optimizer/lazyexceptionopt.cpp (working copy) @@ -1053,22 +1053,6 @@ case Op_MethodEnd: case Op_SourceLineNumber: return false; - case Op_LdObj: - case Op_StObj: - case Op_CopyObj: - case Op_InitObj: - case Op_Sizeof: - case Op_Box: - case Op_Unbox: - case Op_LdToken: - case Op_MkRefAny: - case Op_RefAnyVal: - case Op_RefAnyType: - case Op_InitBlock: - case Op_CopyBlock: - case Op_Alloca: - case Op_ArgList: - return true; case Op_Phi: case Op_TauPi: return false; Index: vm/jitrino/src/optimizer/Inst.h =================================================================== --- vm/jitrino/src/optimizer/Inst.h (revision 609948) +++ vm/jitrino/src/optimizer/Inst.h (working copy) @@ -1257,17 +1257,6 @@ Inst* makeMethodMarker(MethodMarkerInst::Kind kind, MethodDesc* methodDesc); - // value type instructions - Inst* makeLdObj(Opnd* dst, Opnd* addrOfSrcValObj, Type* type); - Inst* makeStObj(Opnd* addrOfDstVal, Opnd* srcVal, Type* type); - Inst* makeCopyObj(Opnd* dstValPtr, Opnd* srcValPtr, Type* type); - Inst* makeInitObj(Opnd* valPtr, Type* type); - Inst* makeSizeof(Opnd* dst, Type* type); - Inst* makeBox(Opnd* dst, Opnd* val, Type* type); - Inst* makeUnbox(Opnd* dst, Opnd* obj, Type* type); - Inst* makeLdToken(Opnd* dst, MethodDesc* enclosingMethod, uint32 metadataToken); - - // SSA Inst* makePhi(Opnd* dst, uint32 numOpnds, Opnd** opnds); // array is copied Inst* makeTauPi(Opnd* dst, Opnd* src, Opnd *tau, PiCondition *cond); @@ -1911,52 +1900,7 @@ virtual Inst* caseMethodMarker(Inst* inst)=0;// {return caseDefault(inst);} - // value type instructions virtual Inst* - caseLdObj(TypeInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseStObj(TypeInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseCopyObj(TypeInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseInitObj(TypeInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseBox(TypeInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseUnbox(TypeInst* inst)=0;// {return caseDefault(inst);} - - // checks - virtual Inst* - caseLdToken(TokenInst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseMkRefAny(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseRefAnyVal(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseRefAnyType(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseInitBlock(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseCopyBlock(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* - caseAlloca(Inst* inst)=0;// {return caseDefault(inst);} - - // makeGetType is not used - virtual Inst* - caseArgList(Inst* inst)=0;// {return caseDefault(inst);} - - virtual Inst* casePhi(Inst* inst)=0;// {return caseDefault(inst);} virtual Inst* Index: vm/jitrino/src/optimizer/IRBuilder.h =================================================================== --- vm/jitrino/src/optimizer/IRBuilder.h (revision 609948) +++ vm/jitrino/src/optimizer/IRBuilder.h (working copy) @@ -89,9 +89,7 @@ Opnd* genAdd(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2); // TR //SI Opnd* genMul(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2);//TR //SI Opnd* genSub(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2);//TR //SI - Opnd* genCliDiv(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2); // TR Opnd* genDiv(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2); //TR - Opnd* genCliRem(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2); // TR Opnd* genRem(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2);//TR Opnd* genNeg(Type* dstType, Opnd* src);//TR //SI Opnd* genMulHi(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2); //SI @@ -202,7 +200,6 @@ Opnd* genLdRef(MethodDesc* enclosingMethod, uint32 stringToken, Type* type);//TR Opnd* genLdVar(Type* dstType, VarOpnd* var);//TR Opnd* genLdVarAddr(VarOpnd* var);//TR - Opnd* genLdInd(Type*, Opnd* ptr); // for use by front-ends, but not simplifier//TR Opnd* genLdField(Type*, Opnd* base, FieldDesc* fieldDesc); //TR Opnd* genLdStatic(Type*, FieldDesc* fieldDesc);//TR Opnd* genLdElem(Type* elemType, Opnd* array, Opnd* index); //TR @@ -220,6 +217,8 @@ Opnd* genLdFieldWithResolve(Type*, Opnd* base, ObjectType* enclClass, uint32 cpIdx); //TR Opnd* genLdStaticWithResolve(Type*, ObjectType* enclClass, uint32 cpIdx);//TR + Opnd* genStoredTypeIsOk(Type* type, Opnd* src); + // store instructions void genStVar(VarOpnd* var, Opnd* src);//TR void genStField(Type*, Opnd* base, FieldDesc* fieldDesc, Opnd* src);//TR @@ -227,7 +226,6 @@ void genStElem(Type*, Opnd* array, Opnd* index, Opnd* src);//TR void genStElem(Type*, Opnd* array, Opnd* index, Opnd* src, Opnd* tauNullCheck, Opnd* tauBaseTypeCheck, Opnd* tauAddressInRange); - void genStInd(Type*, Opnd* ptr, Opnd* src);//TR void genStFieldWithResolve(Type*, Opnd* base, ObjectType* enclClass, uint32 cpIdx, Opnd* src);//TR void genStStaticWithResolve(Type*, ObjectType* enclClass, uint32 cpIdx, Opnd* src);//TR @@ -259,8 +257,6 @@ // CastException (succeeds if argument is null, returns casted object) Opnd* genCast(Opnd* src, Type* type); // TR Opnd* genCastWithResolve(Opnd* src, Type* type, ObjectType* enclClass, uint32 cpIndex); // TR - // returns trueResult if src is an instance of type, 0 otherwise - Opnd* genAsType(Opnd* src, Type* type); // TR // returns 1 if src is not null and an instance of type, 0 otherwise Opnd* genInstanceOf(Opnd* src, Type* type); //TR Opnd* genInstanceOfWithResolve(Opnd* src, ObjectType* enclClass, uint32 cpIndex); //TR @@ -270,23 +266,6 @@ void genFallThroughLabel(LabelInst* labelInst); //TR // method entry/exit LabelInst* genMethodEntryLabel(MethodDesc* methodDesc);//TR - // value object instructions - Opnd* genLdObj(Type* type, Opnd* addrOfValObj);//TR - void genStObj(Opnd* addrOfDstVal, Opnd* srcVal, Type* type);//TR - void genCopyObj(Type* type, Opnd* dstValPtr, Opnd* srcValPtr); // TR - void genInitObj(Type* type, Opnd* valPtr); //TR - Opnd* genSizeOf(Type* type);//TR - Opnd* genMkRefAny(Type* type, Opnd* ptr);//TR - Opnd* genRefAnyType(Opnd* typedRef);//TR - Opnd* genRefAnyVal(Type* type, Opnd* typedRef);//TR - Opnd* genUnbox(Type* type, Opnd* obj);//TR - Opnd* genBox(Type* type, Opnd* val); // TR - Opnd* genLdToken(MethodDesc* enclosingMethod, uint32 metadataToken);//TR - // block instructions - void genCopyBlock(Opnd* dstAddr, Opnd* srcAddr, Opnd* size); // TR - void genInitBlock(Opnd* dstAddr, Opnd* val, Opnd* size); //TR - Opnd* genLocAlloc(Opnd* size);//TR - Opnd* genArgList(); // TR void genTauTypeCompare(Opnd *arg0, MethodDesc* methodDesc, LabelInst *target, Opnd *tauNullChecked);//TR @@ -382,12 +361,7 @@ Opnd* genLdArrayBaseOffset(Type *elemType); Opnd* genLdArrayLenOffset(Type *elemType); Opnd* genAddOffset(Type *ptrType, Opnd* ref, Opnd* offset); - // lowered parts of monitor enter/exit; - // these assume src is already checked and is not null - void genIncRecCount(Opnd *obj, Opnd *oldLock); // result is ref:int16 - Opnd* genTauOptimisticBalancedMonitorEnter(Type *dstType, Opnd* src, Opnd *lockAddr, - Opnd *tauNullChecked); // result is int32 - void genOptimisticBalancedMonitorExit(Opnd* src, Opnd *lockAddr, Opnd *oldValue); + void genMonitorEnterFence(Opnd *src); void genMonitorExitFence(Opnd *src); // checks @@ -401,7 +375,11 @@ // // private helper methods // + Modifier getAutoCompressModifier(Type* type); Opnd* propagateCopy(Opnd*); + Opnd* uncompressOnDemand(Opnd* opnd); + Opnd* compressOnDemand(Opnd* opnd); + Type::Tag preparePairForCMP(Type::Tag cmpType, Opnd* &src1, Opnd* &src2); Inst* appendInst(Inst*); Type* getOpndTypeFromLdType(Type* ldType); Opnd* createOpnd(Type*); Index: vm/jitrino/src/optimizer/IRBuilderFlags.h =================================================================== --- vm/jitrino/src/optimizer/IRBuilderFlags.h (revision 609948) +++ vm/jitrino/src/optimizer/IRBuilderFlags.h (working copy) @@ -30,7 +30,6 @@ // struct IRBuilderFlags { IRBuilderFlags () { - expandMemAddrs = expandNullChecks = false; expandCallAddrs = expandVirtualCallAddrs = false; expandElemAddrs = expandElemTypeChecks = false; doSimplify = doCSE = false; @@ -42,7 +41,6 @@ useNewTypeSystem = false; } /* expansion flags */ - bool expandMemAddrs : 1; // expand field/array element accesses bool expandElemAddrs : 1; // expand array elem address computation bool expandCallAddrs : 1; // expand fun address computation for direct calls bool expandVirtualCallAddrs : 1; // expand fun address computation for virtual calls Index: vm/jitrino/src/optimizer/escapeanalyzer.cpp =================================================================== --- vm/jitrino/src/optimizer/escapeanalyzer.cpp (revision 609948) +++ vm/jitrino/src/optimizer/escapeanalyzer.cpp (working copy) @@ -45,7 +45,6 @@ case Op_LdRef: case Op_NewObj: case Op_NewArray: return true; case Op_NewMultiArray: - case Op_Box: return true; case Op_LdConstant: // @@ -172,7 +171,6 @@ // no src operands to mark break; case Op_NewMultiArray: - case Op_Box: break; // // sources of loads do not escape further @@ -197,7 +195,6 @@ break; case Op_LdStaticAddr: case Op_LdVarAddr: break; - case Op_Unbox: case Op_LdObj: default: ::std::cerr << "ERROR: unknown escaping ref opcode: " << inst->getOperation().getOpcodeString() @@ -282,7 +279,6 @@ case Op_TauLdField: case Op_TauLdElem: case Op_TauLdInd: case Op_LdArrayBaseAddr: case Op_AddScaledIndex: case Op_LdFieldAddr: case Op_LdElemAddr: - case Op_Unbox: case Op_AddOffset: // create a def-use to base address computation defUseBuilder.addDefUse(inst->getSrc(0)->getInst(),inst,0); Index: vm/jitrino/src/optimizer/Inst.cpp =================================================================== --- vm/jitrino/src/optimizer/Inst.cpp (revision 609948) +++ vm/jitrino/src/optimizer/Inst.cpp (working copy) @@ -2487,10 +2487,6 @@ return makeInst(Op_MonitorExitFence, Modifier(), Type::Void, OpndManager::getNullOpnd(), src); } -Inst* InstFactory::makeLdToken(Opnd* dst, MethodDesc* enclosingMethod, uint32 metadataToken) { - return makeTokenInst(Op_LdToken, Modifier(), Type::Object, dst, metadataToken, enclosingMethod); -} - Inst* InstFactory::makeLdRef(Modifier mod, Opnd* dst, MethodDesc* enclosingMethod, uint32 token) { return makeTokenInst(Op_LdRef, mod, dst->getType()->tag, dst, token, enclosingMethod); } @@ -2509,10 +2505,6 @@ dst->getType()->tag, dst, src, tauCheckedNull, type); } -Inst* InstFactory::makeSizeof(Opnd* dst, Type* type) { - return makeTypeInst(Op_Sizeof, Modifier(), dst->getType()->tag, dst, type); -} - Inst* InstFactory::makeTauAsType(Opnd* dst, Opnd* src, Opnd *tauNullChecked, Type* type) { assert(tauNullChecked->getType()->tag == Type::Tau); return makeTypeInst(Op_TauAsType, Modifier(), dst->getType()->tag, dst, src, tauNullChecked, type); @@ -2530,31 +2522,6 @@ Type::Void, OpndManager::getNullOpnd(), type); } -// value type instructions -Inst* InstFactory::makeLdObj(Opnd* dst, Opnd* addrOfSrcValObj, Type* type) { - return makeTypeInst(Op_LdObj, Modifier(), type->tag, dst, addrOfSrcValObj, type); -} - -Inst* InstFactory::makeStObj(Opnd* addrOfDstVal, Opnd* srcVal, Type* type) { - return makeTypeInst(Op_StObj, Modifier(), type->tag, OpndManager::getNullOpnd(), addrOfDstVal, srcVal, type); -} - -Inst* InstFactory::makeCopyObj(Opnd* dstValPtr, Opnd* srcValPtr, Type* type) { - return makeTypeInst(Op_CopyObj, Modifier(), type->tag, OpndManager::getNullOpnd(), dstValPtr, srcValPtr, type); -} - -Inst* InstFactory::makeInitObj(Opnd* valPtr, Type* type) { - return makeTypeInst(Op_InitObj, Modifier(), type->tag, OpndManager::getNullOpnd(), valPtr, type); -} - -Inst* InstFactory::makeBox(Opnd* dst, Opnd* val, Type* type) { - return makeTypeInst(Op_Box, Modifier(), dst->getType()->tag, dst, val, type); -} - -Inst* InstFactory::makeUnbox(Opnd* dst, Opnd* obj, Type* type) { - return makeTypeInst(Op_Unbox, Modifier(), dst->getType()->tag, dst, obj, type); -} - // lowered instructions Inst* InstFactory::makeTauCheckBounds(Opnd *dst, Opnd* arrayLen, Opnd* index) { return makeInst(Op_TauCheckBounds, Modifier(Exception_Sometimes)|Modifier(Overflow_Unsigned), @@ -2800,21 +2767,6 @@ case Op_MethodEntry: return caseMethodEntry(inst); case Op_MethodEnd: return caseMethodEnd(inst); case Op_SourceLineNumber: return caseSourceLineNumber(inst); - case Op_LdObj: return caseLdObj(inst->asTypeInst()); - case Op_StObj: return caseStObj(inst->asTypeInst()); - case Op_CopyObj: return caseCopyObj(inst->asTypeInst()); - case Op_InitObj: return caseInitObj(inst->asTypeInst()); - case Op_Sizeof: return caseSizeof(inst->asTypeInst()); - case Op_Box: return caseBox(inst->asTypeInst()); - case Op_Unbox: return caseUnbox(inst->asTypeInst()); - case Op_LdToken: return caseLdToken(inst->asTokenInst()); - case Op_MkRefAny: return caseMkRefAny(inst); - case Op_RefAnyVal: return caseRefAnyVal(inst); - case Op_RefAnyType: return caseRefAnyType(inst); - case Op_InitBlock: return caseInitBlock(inst); - case Op_CopyBlock: return caseCopyBlock(inst); - case Op_Alloca: return caseAlloca(inst); - case Op_ArgList: return caseArgList(inst); case Op_Phi: return casePhi(inst); case Op_TauPi: return caseTauPi(inst->asTauPiInst()); case Op_IncCounter: return caseIncCounter(inst); Index: vm/jitrino/src/optimizer/hashvaluenumberer.cpp =================================================================== --- vm/jitrino/src/optimizer/hashvaluenumberer.cpp (revision 609948) +++ vm/jitrino/src/optimizer/hashvaluenumberer.cpp (working copy) @@ -858,49 +858,7 @@ virtual Inst* caseMethodMarker(Inst* inst) { return caseDefault(inst); } - // value type instructions virtual Inst* - caseLdObj(TypeInst* inst) { return caseDefault(inst); } - - virtual Inst* - caseStObj(TypeInst* inst) { return caseDefault(inst); } - - virtual Inst* - caseCopyObj(TypeInst* inst) { return caseDefault(inst); } - - virtual Inst* - caseInitObj(TypeInst* inst) { return caseDefault(inst); } - - virtual Inst* - caseBox(TypeInst* inst) { return caseDefault(inst); } - - virtual Inst* - caseUnbox(TypeInst* inst) { return caseDefault(inst); } - - Inst* caseLdToken(TokenInst* inst) { return hashInst(inst); } - - virtual Inst* - caseMkRefAny(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseRefAnyVal(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseRefAnyType(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseInitBlock(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseCopyBlock(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseAlloca(Inst* inst) { return caseDefault(inst); } - - virtual Inst* - caseArgList(Inst* inst) { return caseDefault(inst); } - - virtual Inst* casePhi(Inst* inst) { return caseDefault(inst); } virtual Inst* @@ -1088,9 +1046,19 @@ return getKey(); } CSEHashKey getKey(ConstInst* inst) { - return getKey(inst->getOperation(), - (uint32)inst->getValue().dword1, - (uint32)inst->getValue().dword2); + Type* dstType = inst->getDst()->getType(); + if ( dstType->isNullObject() ) { + // we need to take the type into hashcode as there is a difference + // between compressed and uncompressed null while const value for them is empty + return getKey(inst->getOperation(), + dstType->getId(), + (uint32)inst->getValue().dword1, + (uint32)inst->getValue().dword2); + } else { + return getKey(inst->getOperation(), + (uint32)inst->getValue().dword1, + (uint32)inst->getValue().dword2); + } } CSEHashKey getKey(TokenInst* inst) { return getKey(inst->getOperation(), Index: vm/jitrino/src/optimizer/IRBuilder.cpp =================================================================== --- vm/jitrino/src/optimizer/IRBuilder.cpp (revision 609948) +++ vm/jitrino/src/optimizer/IRBuilder.cpp (working copy) @@ -37,7 +37,6 @@ static const char* help = \ " expansion flags:\n"\ - " expandMemAddrs[={ON|off}]\n"\ " expandElemAddrs[={ON|off}]\n"\ " expandCallAddrs[={on|OFF}]\n"\ " expandVirtualCallAddrs[={ON|off}]\n"\ @@ -243,7 +242,6 @@ void IRBuilderAction::readFlags() { // IRBuilder expansion flags // - irBuilderFlags.expandMemAddrs = getBoolArg("expandMemAddrs", true); irBuilderFlags.expandElemAddrs = getBoolArg("expandElemAddrs", true); irBuilderFlags.expandCallAddrs = getBoolArg("expandCallAddrs", false); irBuilderFlags.expandVirtualCallAddrs = getBoolArg("expandVirtualCallAddrs", true); @@ -259,7 +257,7 @@ irBuilderFlags.suppressCheckBounds = getBoolArg("suppressCheckBounds", false); irBuilderFlags.insertMethodLabels = getBoolArg("insertMethodLabels", true); - irBuilderFlags.compressedReferences = getBoolArg("compressedReferences", false); + irBuilderFlags.compressedReferences = VMInterface::areReferencesCompressed(); irBuilderFlags.genMinMaxAbs = getBoolArg("genMinMaxAbs", false); irBuilderFlags.genFMinMaxAbs = getBoolArg("genFMinMaxAbs", false); @@ -303,7 +301,6 @@ CompilationInterface* ci = getCompilationContext()->getVMCompilationInterface(); irBuilderFlags.insertWriteBarriers = ci->needWriteBarriers(); - irBuilderFlags.compressedReferences = irBuilderFlags.compressedReferences || VMInterface::areReferencesCompressed(); } @@ -466,42 +463,7 @@ return dst; } -// -// for CLI: inserts a CheckDivOpnds before the divide Opnd* -IRBuilder::genCliDiv(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2) { - src1 = propagateCopy(src1); - src2 = propagateCopy(src2); - - Operation operation(Op_TauDiv, dstType->tag, mod); - uint32 hashcode = operation.encodeForHashing(); - Opnd* dst = lookupHash(hashcode, src1, src2); // tauDivOk is not needed in hash - if (dst) return dst; - - Opnd *tauDivOk = 0; - if(src2->getType()->isInteger()) { - if (mod.getSignedModifier() == SignedOp) { - // for CLI: if signed, insert a CheckDivOpnds before the divide - tauDivOk = genTauCheckDivOpnds(src1, src2); - } else { - // if unsigned, still need a zero check - tauDivOk = genTauCheckZero(src2); - } - } else { - tauDivOk = genTauSafe(); // safe by construction - } - if (irBuilderFlags.doSimplify) { - dst = simplifier->simplifyTauDiv(dstType, mod, src1, src2, tauDivOk); - } - if (!dst) { - dst = createOpnd(dstType); - appendInst(instFactory->makeTauDiv(mod, dst, src1, src2, tauDivOk)); - } - insertHash(hashcode, src1, src2, dst->getInst()); // tauDivOk is not needed in hash - return dst; -} - -Opnd* IRBuilder::genRem(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2) { src1 = propagateCopy(src1); src2 = propagateCopy(src2); @@ -529,44 +491,7 @@ return dst; } -// -// for CLI: inserts a CheckDivOpnds before the divide -// Opnd* -IRBuilder::genCliRem(Type* dstType, Modifier mod, Opnd* src1, Opnd* src2) { - src1 = propagateCopy(src1); - src2 = propagateCopy(src2); - - Operation operation(Op_TauRem, dstType->tag, mod); - uint32 hashcode = operation.encodeForHashing(); - Opnd* dst = lookupHash(hashcode, src1, src2); // tauDivOk is not needed in hash - if (dst) return dst; - - - Opnd *tauDivOk = 0; - if (src2->getType()->isInteger()) - if (mod.getSignedModifier() == SignedOp) - // for CLI: if signed, insert a CheckDivOpnds before the divide - tauDivOk = genTauCheckDivOpnds(src1, src2); - else - // if unsigned, still need zero check - tauDivOk = genTauCheckZero(src2); - else - tauDivOk = genTauSafe(); // safe by construction - - if (irBuilderFlags.doSimplify) { - dst = simplifier->simplifyTauRem(dstType, mod, src1, src2, tauDivOk); - } - - if (!dst) { - dst = createOpnd(dstType); - appendInst(instFactory->makeTauRem(mod, dst, src1, src2, tauDivOk)); - } - insertHash(hashcode, src1, src2, dst->getInst()); // tauDivOk is not needed in hash - return dst; -} - -Opnd* IRBuilder::genNeg(Type* dstType, Opnd* src) { src = propagateCopy(src); Operation operation(Op_Neg, dstType->tag, Modifier()); @@ -1070,6 +995,7 @@ Opnd* src2) { src1 = propagateCopy(src1); src2 = propagateCopy(src2); + instType = preparePairForCMP(instType,src1,src2); Operation operation(Op_Cmp, instType, mod); uint32 hashcode = operation.encodeForHashing(); @@ -1103,6 +1029,7 @@ Opnd* src2) { src1 = propagateCopy(src1); src2 = propagateCopy(src2); + instType = preparePairForCMP(instType,src1,src2); Operation operation(Op_Cmp3, instType, mod); uint32 hashcode = operation.encodeForHashing(); Opnd* dst = lookupHash(hashcode, src1, src2); @@ -1131,6 +1058,7 @@ Opnd* src2) { src1 = propagateCopy(src1); src2 = propagateCopy(src2); + instType = preparePairForCMP(instType,src1,src2); if (mod > Cmp_GTE_Un) // bad modifier invalid(); @@ -1149,6 +1077,7 @@ LabelInst* label, Opnd* src) { src = propagateCopy(src); + assert(src->getType()->tag == instType); if (mod < Cmp_Zero) // bad modifier invalid(); @@ -1184,6 +1113,7 @@ void IRBuilder::genThrow(ThrowModifier mod, Opnd* exceptionObj) { exceptionObj = propagateCopy(exceptionObj); + exceptionObj = uncompressOnDemand(exceptionObj); appendInst(instFactory->makeThrow(mod, exceptionObj)); } @@ -1238,6 +1168,7 @@ void IRBuilder::genPrefetch(Opnd *addr) { + assert(!addr->getType()->isCompressedReference()); appendInst(instFactory->makePrefetch(propagateCopy(addr))); } @@ -1329,6 +1260,7 @@ } for (uint32 i=0; imakeDirectCall(dst, tauNullCheckedFirstArg, tauTypesChecked, @@ -1354,6 +1286,7 @@ numArgs, args); for (uint32 i=0; imakeJitHelperCall(dst, helperId, numArgs, args)); @@ -1443,6 +1378,7 @@ Opnd* args[]) { for (uint32 i=0; imakeVMHelperCall(dst, helperId, numArgs, args)); @@ -1475,6 +1411,7 @@ { for (uint32 i=0; imakeReturn(src)); } @@ -1724,23 +1664,21 @@ Opnd* IRBuilder::genLdRef(MethodDesc* enclosingMethod, uint32 stringToken, Type* type) { - bool uncompress = irBuilderFlags.compressedReferences; - - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; + Modifier mod = getAutoCompressModifier(type); + // system class ref is being obtained by rth_ldc_ref_helper that always returns uncompressed address. + bool noAutoCompression = irBuilderFlags.compressedReferences && + typeManager->autoCompressRefs == false && + Type::isReference(type->tag) && + !type->isSystemClass(); + if ( noAutoCompression ) { + type = typeManager->compressType(type); + } Opnd* dst = createOpnd(type); - appendInst(instFactory->makeLdRef(mod, dst, enclosingMethod, stringToken)); return dst; } Opnd* -IRBuilder::genLdToken(MethodDesc* enclosingMethod, uint32 metadataToken) { - Opnd* dst = createOpnd(typeManager->getSystemObjectType()); - appendInst(instFactory->makeLdToken(dst, enclosingMethod, metadataToken)); - return dst; -} - -Opnd* IRBuilder::genLdVar(Type* dstType, VarOpnd* var) { if (!var->isAddrTaken()) { Opnd *dst = lookupHash(Op_LdVar, var); @@ -1771,25 +1709,12 @@ } Opnd* -IRBuilder::genLdInd(Type* type, Opnd *ptr) -{ - ptr = propagateCopy(ptr); - Opnd *tauUnsafe = genTauUnsafe(); - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; - } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; - Opnd *dst = genTauLdInd(mod, type, type->tag, ptr, - tauUnsafe, tauUnsafe); - return dst; -} - -Opnd* IRBuilder::genTauLdInd(Modifier mod, Type* type, Type::Tag ldType, Opnd* ptr, Opnd *tauBaseNonNull, Opnd *tauAddressInRange) { ptr = propagateCopy(ptr); + assert(!ptr->getType()->isCompressedReference()); // address must not be compressed + // result type can be compressed only when autoCompress is OFF + assert(!type->isCompressedReference() || typeManager->autoCompressRefs == false); tauBaseNonNull = propagateCopy(tauBaseNonNull); tauAddressInRange = propagateCopy(tauAddressInRange); Opnd* dst = createOpnd(type); @@ -1812,26 +1737,17 @@ assert(!fieldDesc->isStatic()); base = propagateCopy(base); Opnd *tauNullCheck = genTauCheckNull(base); - Opnd *tauAddressInRange = - genTauHasType(base, fieldDesc->getParentType()); + Opnd *tauAddressInRange = genTauHasType(base, fieldDesc->getParentType()); - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; - } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; - if (irBuilderFlags.expandMemAddrs) { - return genTauLdInd(mod, type, type->tag, - genLdFieldAddr(type, base, fieldDesc), - tauNullCheck, tauAddressInRange); - } + Modifier mod = getAutoCompressModifier(type); + Opnd* fieldAddr = genLdFieldAddr(type, base, fieldDesc); - Opnd* dst = createOpnd(type); - appendInst(instFactory->makeTauLdField(mod, type, dst, base, - tauNullCheck, tauAddressInRange, - fieldDesc)); - return dst; + Opnd* res = genTauLdInd(mod, type, type->tag, + fieldAddr, + tauNullCheck, tauAddressInRange); + // result type can be compressed only when autoCompress is OFF + assert(!res->getType()->isCompressedReference() || typeManager->autoCompressRefs == false); + return res; } Opnd* @@ -1840,16 +1756,13 @@ Opnd *tauNullCheck = genTauCheckNull(base); Opnd *tauAddressInRange = genTauSafe(); - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; - } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; - assert(irBuilderFlags.expandMemAddrs); + Modifier mod = getAutoCompressModifier(type); Opnd* addr = genLdFieldAddrWithResolve(type, base, enclClass, cpIndex, false); - return genTauLdInd(mod, type, type->tag, addr, tauNullCheck, tauAddressInRange); + Opnd* res = genTauLdInd(mod, type, type->tag, addr, tauNullCheck, tauAddressInRange); + // result type can be compressed only when autoCompress is OFF + assert(!res->getType()->isCompressedReference() || typeManager->autoCompressRefs == false); + return res; } void @@ -1868,39 +1781,38 @@ insertHash(Op_InitType, type->getId(), appendInst(instFactory->makeInitType(type))); } -Opnd* -IRBuilder::genLdStaticWithResolve(Type* type, ObjectType* enclClass, uint32 cpIdx) { - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; +Modifier +IRBuilder::getAutoCompressModifier(Type* type) { + if ( Type::isReference(type->tag) && irBuilderFlags.compressedReferences && typeManager->autoCompressRefs ) + { + return AutoCompress_Yes; } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; + return AutoCompress_No; +} +Opnd* +IRBuilder::genLdStaticWithResolve(Type* type, ObjectType* enclClass, uint32 cpIdx) { Opnd *tauOk = genTauSafe(); // static field, always safe + Modifier mod = getAutoCompressModifier(type); + Opnd* addrOpnd = genLdStaticAddrWithResolve(type, enclClass, cpIdx, false); - return genTauLdInd(mod, type, type->tag, addrOpnd, tauOk, tauOk); + Opnd* res = genTauLdInd(mod, type, type->tag, addrOpnd, tauOk, tauOk); + return res; } Opnd* IRBuilder::genLdStatic(Type* type, FieldDesc* fieldDesc) { - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; - } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; + Modifier mod = getAutoCompressModifier(type); genInitType(fieldDesc->getParentType()); - if (irBuilderFlags.expandMemAddrs) { - Opnd *tauOk = genTauSafe(); // static field, always safe - return genTauLdInd(mod, type, type->tag, genLdStaticAddr(type, fieldDesc), - tauOk, tauOk); - } - - Opnd* dst = createOpnd(type); - appendInst(instFactory->makeLdStatic(mod, type, dst, fieldDesc)); - return dst; + Opnd *tauOk = genTauSafe(); // static field, always safe + Opnd* fieldAddr = genLdStaticAddr(type, fieldDesc); + // unmanaged pointers are never being compressed + // Actually, there is only one possible case caused by magics: + // unmanaged pointer to Int8 + assert( !type->isUnmanagedPtr() || type->asPtrType()->getPointedToType()->isInt1()); + Opnd* res = genTauLdInd(mod, type, type->tag, fieldAddr, tauOk, tauOk); + return res; } @@ -1913,28 +1825,21 @@ array = propagateCopy(array); index = propagateCopy(index); - bool uncompress = false; - if (irBuilderFlags.compressedReferences && type->isObject()) { - assert(!type->isCompressedReference()); - uncompress = true; - } - Modifier mod = uncompress ? AutoCompress_Yes : AutoCompress_No; - - if (irBuilderFlags.expandMemAddrs) { - return genTauLdInd(mod, type, type->tag, - genLdElemAddrNoChecks(type, array, index), - tauNullChecked, tauAddressInRange); - } - Opnd* dst = createOpnd(type); - appendInst(instFactory->makeTauLdElem(mod, type, dst, array, index, - tauNullChecked, tauAddressInRange)); - return dst; + Modifier mod = getAutoCompressModifier(type); + Opnd* elemAddr = genLdElemAddrNoChecks(type, array, index); + Opnd* res = genTauLdInd(mod, type, type->tag, + elemAddr, + tauNullChecked, tauAddressInRange); + // result type can be compressed only when autoCompress is OFF + assert(!res->getType()->isCompressedReference() || typeManager->autoCompressRefs == false); + return res; } Opnd* IRBuilder::genLdElem(Type* type, Opnd* array, Opnd* index) { array = propagateCopy(array); + array = uncompressOnDemand(array); index = propagateCopy(index); Opnd *tauNullChecked = genTauCheckNull(array); @@ -1950,6 +1855,7 @@ assert(!fieldDesc->isStatic()); base = propagateCopy(base); + base = uncompressOnDemand(base); genTauCheckNull(base); @@ -1960,12 +1866,11 @@ // unmanaged pointer dst = createOpnd(typeManager->getIntPtrType()); } else if (irBuilderFlags.compressedReferences && type->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!type->isCompressedReference()); - Type *compressedType = typeManager->compressType(type); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); + if(!type->isCompressedReference()) { + assert(typeManager->autoCompressRefs == true); + type = typeManager->compressType(type); + } + dst = createOpnd(typeManager->getManagedPtrType(type)); } else { dst = createOpnd(typeManager->getManagedPtrType(type)); } @@ -1978,22 +1883,19 @@ IRBuilder::genLdFieldAddrWithResolve(Type* type, Opnd* base, ObjectType* enclClass, uint32 cpIndex, bool putfield) { base = propagateCopy(base); genTauCheckNull(base); + base = uncompressOnDemand(base); //1. loading field offset JavaByteCodes opcode = putfield? OPCODE_PUTFIELD : OPCODE_GETFIELD; Opnd* dst = lookupHash(Op_VMHelperCall, opcode, base->getId(), cpIndex); if (dst) return dst; - if (irBuilderFlags.compressedReferences && type->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!type->isCompressedReference()); - Type *compressedType = typeManager->compressType(type); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); - } else { - dst = createOpnd(typeManager->getManagedPtrType(type)); + if (irBuilderFlags.compressedReferences && type->isObject() && !type->isCompressedReference()) { + assert(typeManager->autoCompressRefs == true); + type = typeManager->compressType(type); } + dst = createOpnd(typeManager->getManagedPtrType(type)); + Opnd** args = new (irManager->getMemoryManager()) Opnd*[3]; args[0] = createTypeOpnd(enclClass); args[1] = genLdConstant((int)cpIndex); @@ -2014,16 +1916,12 @@ Opnd* dst = lookupHash(Op_LdStaticAddr, fieldDesc->getId()); if (dst) return dst; - if (irBuilderFlags.compressedReferences && type->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!type->isCompressedReference()); - Type *compressedType = typeManager->compressType(type); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); - } else { - dst = createOpnd(typeManager->getManagedPtrType(type)); + if (irBuilderFlags.compressedReferences && type->isObject() && !type->isCompressedReference()) { + assert(typeManager->autoCompressRefs == true); + type = typeManager->compressType(type); } + dst = createOpnd(typeManager->getManagedPtrType(type)); + appendInst(instFactory->makeLdStaticAddr(dst, fieldDesc)); insertHash(Op_LdStaticAddr, fieldDesc->getId(), dst->getInst()); return dst; @@ -2035,16 +1933,12 @@ Opnd* dst = lookupHash(Op_VMHelperCall, opcode, cpIndex); if (dst) return dst; - if (irBuilderFlags.compressedReferences && type->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!type->isCompressedReference()); - Type *compressedType = typeManager->compressType(type); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); - } else { - dst = createOpnd(typeManager->getManagedPtrType(type)); + if (irBuilderFlags.compressedReferences && type->isObject() && !type->isCompressedReference()) { + assert(typeManager->autoCompressRefs == true); + type = typeManager->compressType(type); } + dst = createOpnd(typeManager->getManagedPtrType(type)); + Opnd** args = new (irManager->getMemoryManager()) Opnd*[3]; args[0] = createTypeOpnd(enclClass); args[1] = genLdConstant((int)cpIndex); @@ -2059,6 +1953,7 @@ // null and bounds checks index = propagateCopy(index); array = propagateCopy(array); + array = uncompressOnDemand(array); Opnd *tauNullChecked = genTauCheckNull(array); genTauCheckBounds(array, index, tauNullChecked); return genLdElemAddrNoChecks(elemType, array, index); @@ -2066,6 +1961,8 @@ Opnd* IRBuilder::genLdElemAddrNoChecks(Type* elemType, Opnd* array, Opnd* index) { + assert(!array->getType()->isCompressedReference() || typeManager->autoCompressRefs == false); + assert(!elemType->isCompressedReference() || typeManager->autoCompressRefs == false); Opnd* dst; if (irBuilderFlags.expandElemAddrs) { // @@ -2081,16 +1978,10 @@ dst = lookupHash(Op_LdElemAddr, array, index); if (dst) return dst; - if (irBuilderFlags.compressedReferences && elemType->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!elemType->isCompressedReference()); - Type *compressedType = typeManager->compressType(elemType); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); - } else { - dst = createOpnd(typeManager->getManagedPtrType(elemType)); - } + // dst must point to a compressed ref if refs are compressed + assert(!irBuilderFlags.compressedReferences || !elemType->isObject() || elemType->isCompressedReference()); + dst = createOpnd(typeManager->getManagedPtrType(elemType)); + appendInst(instFactory->makeLdElemAddr(elemType, dst, array, index)); insertHash(Op_LdElemAddr, array, index, dst->getInst()); } @@ -2114,7 +2005,6 @@ Opnd* dst = lookupHash(Op_LdFunAddrSlot, methodDesc->getId()); if (dst) return dst; - dst = createOpnd(typeManager->getMethodPtrType(methodDesc)); appendInst(instFactory->makeLdFunAddrSlot(dst, methodDesc)); insertHash(Op_LdFunAddrSlot, methodDesc->getId(), dst->getInst()); @@ -2124,6 +2014,7 @@ Opnd* IRBuilder::genLdVTable(Opnd* base, Type* type) { base = propagateCopy(base); + assert(!base->getType()->isCompressedReference()); Opnd *tauNullChecked = genTauCheckNull(base); return genTauLdVTable(base, tauNullChecked, type); @@ -2132,6 +2023,7 @@ Opnd* IRBuilder::genTauLdVTable(Opnd* base, Opnd *tauNullChecked, Type* type) { base = propagateCopy(base); + base = uncompressOnDemand(base); SsaOpnd* obj = base->asSsaOpnd(); assert(obj); @@ -2169,6 +2061,7 @@ Opnd* IRBuilder::genGetVTable(ObjectType* type) { + assert(!type->isCompressedReference()); assert(type->isClass() && (!type->isAbstract() || type->isArray())); Opnd* dst = lookupHash(Op_GetVTableAddr, type->getId()); if (dst) return dst; @@ -2182,6 +2075,7 @@ Opnd* IRBuilder::genLdVirtFunAddr(Opnd* base, MethodDesc* methodDesc) { base = propagateCopy(base); + base = uncompressOnDemand(base); Opnd* dst = lookupHash(Op_TauLdVirtFunAddr, base->getId(), methodDesc->getId()); if (dst) return dst; @@ -2203,6 +2097,7 @@ Opnd* IRBuilder::genTauLdVirtFunAddrSlot(Opnd* base, Opnd *tauOk, MethodDesc* methodDesc) { base = propagateCopy(base); + base = uncompressOnDemand(base); Opnd* dst = lookupHash(Op_TauLdVirtFunAddrSlot, base->getId(), methodDesc->getId()); if (dst) return dst; @@ -2229,6 +2124,7 @@ Opnd* IRBuilder::genArrayLen(Type* dstType, Type::Tag type, Opnd* array) { array = propagateCopy(array); + array = uncompressOnDemand(array); Opnd* dst = lookupHash(Op_TauArrayLen, array->getId()); if (dst) return dst; @@ -2248,6 +2144,7 @@ IRBuilder::genTauArrayLen(Type* dstType, Type::Tag type, Opnd* array, Opnd* tauNullChecked, Opnd *tauTypeChecked) { array = propagateCopy(array); + array = uncompressOnDemand(array); Opnd* dst = lookupHash(Op_TauArrayLen, array->getId()); if (dst) return dst; @@ -2268,6 +2165,7 @@ Opnd* IRBuilder::genLdArrayBaseAddr(Type* elemType, Opnd* array) { array = propagateCopy(array); + array = uncompressOnDemand(array); Opnd* dst = lookupHash(Op_LdArrayBaseAddr, array); if (dst) return dst; @@ -2278,16 +2176,11 @@ Type* baseType = typeManager->getArrayBaseType(arrayVal); dst = createOpnd(baseType); } else { - if (irBuilderFlags.compressedReferences && elemType->isObject()) { - // until VM type system is upgraded, - // fieldDesc type will have uncompressed ref type; - // compress it - assert(!elemType->isCompressedReference()); - Type *compressedType = typeManager->compressType(elemType); - dst = createOpnd(typeManager->getManagedPtrType(compressedType)); - } else { - dst = createOpnd(typeManager->getManagedPtrType(elemType)); + if (irBuilderFlags.compressedReferences && elemType->isObject() && !elemType->isCompressedReference()) { + assert(typeManager->autoCompressRefs == true); + elemType = typeManager->compressType(elemType); } + dst = createOpnd(typeManager->getManagedPtrType(elemType)); } appendInst(instFactory->makeLdArrayBaseAddr(elemType, dst, array)); insertHash(Op_LdArrayBaseAddr, array, dst->getInst()); @@ -2297,6 +2190,7 @@ Opnd* IRBuilder::genAddScaledIndex(Opnd* ptr, Opnd* index) { ptr = propagateCopy(ptr); + assert(!ptr->getType()->isCompressedReference()); index = propagateCopy(index); Opnd* dst = lookupHash(Op_AddScaledIndex, ptr, index); if (dst) return dst; @@ -2360,7 +2254,6 @@ Opnd *dst = lookupHash(Op_LdFieldOffset, fieldDesc->getId()); if (dst) return dst; - dst = createOpnd(typeManager->getOffsetType()); appendInst(instFactory->makeLdFieldOffset(dst, fieldDesc)); insertHash(Op_LdFieldOffset, fieldDesc->getId(), dst->getInst()); @@ -2375,7 +2268,6 @@ Opnd *dst = lookupHash(Op_LdFieldOffsetPlusHeapbase, fieldDesc->getId()); if (dst) return dst; - dst = createOpnd(typeManager->getOffsetPlusHeapbaseType()); appendInst(instFactory->makeLdFieldOffsetPlusHeapbase(dst, fieldDesc)); insertHash(Op_LdFieldOffsetPlusHeapbase, fieldDesc->getId(), dst->getInst()); @@ -2434,11 +2326,11 @@ IRBuilder::genAddOffset(Type *ptrType, Opnd* ref, Opnd* offset) { ref = propagateCopy(ref); + assert(!ref->getType()->isCompressedReference()); offset = propagateCopy(offset); Opnd* dst = lookupHash(Op_AddOffset, ref, offset); if (dst) return dst; - assert(!ref->getType()->isCompressedReference()); assert(offset->getType()->isOffset()); @@ -2456,7 +2348,6 @@ Opnd* dst = lookupHash(Op_AddOffsetPlusHeapbase, compref, offset); if (dst) return dst; - assert(compref->getType()->isCompressedReference()); assert(offset->getType()->isOffsetPlusHeapbase()); @@ -2470,6 +2361,17 @@ void IRBuilder::genStVar(VarOpnd* var, Opnd* src) { src = propagateCopy(src); + bool varIsCompressed = var->getType()->isCompressedReference(); + bool srcIsCompressed = src->getType()->isCompressedReference(); + if(!varIsCompressed && srcIsCompressed) { + // can be only when autoCompression is off + assert(typeManager->autoCompressRefs == false); + src = genUncompressRef(src); + } else if(varIsCompressed && !srcIsCompressed) { + // can be only when autoCompression is off + assert(typeManager->autoCompressRefs == false); + src = genCompressRef(src); + } appendInst(instFactory->makeStVar(var, src)); if (irBuilderFlags.doCSE) { insertHash(Op_LdVar, var->getId(), src->getInst()); @@ -2477,36 +2379,6 @@ } void -IRBuilder::genStInd(Type* type, - Opnd* ptr, - Opnd* src) { - ptr = propagateCopy(ptr); - src = propagateCopy(src); - - Type *ptrType = ptr->getType(); - assert(ptrType->isPtr() || ptrType->isIntPtr()); - Type *fieldType = ((PtrType *)ptrType)->getPointedToType(); - bool compress = (fieldType->isCompressedReference() && - !type->isCompressedReference()); - Modifier compressMod = Modifier(compress ? AutoCompress_Yes - : AutoCompress_No); - - Opnd *tauUnsafe = genTauUnsafe(); - - if (irBuilderFlags.insertWriteBarriers) { - appendInst(instFactory->makeTauStInd((Modifier(Store_WriteBarrier)| - compressMod), - type->tag, src, ptr, - tauUnsafe, tauUnsafe, tauUnsafe)); - } else { - appendInst(instFactory->makeTauStInd((Modifier(Store_NoWriteBarrier)| - compressMod), - type->tag, src, ptr, - tauUnsafe, tauUnsafe, tauUnsafe)); - } -} - -void IRBuilder::genTauStInd(Type* type, Opnd* ptr, Opnd* src, @@ -2514,6 +2386,7 @@ Opnd *tauAddressInRange, Opnd* tauElemTypeChecked) { ptr = propagateCopy(ptr); + assert(!ptr->getType()->isCompressedReference()); src = propagateCopy(src); Type *ptrType = ptr->getType(); @@ -2522,10 +2395,7 @@ if (fieldType->isArrayElement()) { fieldType = fieldType->getNonValueSupertype(); } - bool compress = (fieldType->isCompressedReference() && - !type->isCompressedReference()); - Modifier compressMod = Modifier(compress ? AutoCompress_Yes - : AutoCompress_No); + Modifier compressMod = getAutoCompressModifier(type); if (irBuilderFlags.insertWriteBarriers) { appendInst(instFactory->makeTauStInd((Modifier(Store_WriteBarrier)| @@ -2545,7 +2415,9 @@ Opnd *tauBaseNonNull, Opnd *tauAddressInRange, Opnd *tauElemTypeChecked) { objectbase = propagateCopy(objectbase); + assert(!objectbase->getType()->isCompressedReference()); ptr = propagateCopy(ptr); + assert(!ptr->getType()->isCompressedReference()); src = propagateCopy(src); tauBaseNonNull = propagateCopy(tauBaseNonNull); tauAddressInRange = propagateCopy(tauAddressInRange); @@ -2557,10 +2429,7 @@ if (fieldType->isArrayElement()) { fieldType = fieldType->getNonValueSupertype(); } - bool compress = (fieldType->isCompressedReference() && - !type->isCompressedReference()); - Modifier compressMod = Modifier(compress ? AutoCompress_Yes - : AutoCompress_No); + Modifier compressMod = getAutoCompressModifier(type); if (irBuilderFlags.insertWriteBarriers) { appendInst(instFactory->makeTauStRef((Modifier(Store_WriteBarrier)| compressMod), @@ -2576,50 +2445,47 @@ } } +Opnd* +IRBuilder::genStoredTypeIsOk(Type* type, Opnd* src) { + Type* srcType = src->getType(); + bool typeIsCompressed = type->isCompressedReference(); + bool srcIsCompressed = srcType->isCompressedReference(); + if (type->isObject() && !srcType->isNullObject()) { + if (typeIsCompressed == srcIsCompressed) { + return genTauHasType(src, type); + } else { + assert(irBuilderFlags.compressedReferences); + assert( typeIsCompressed && !srcIsCompressed ); + return genTauHasType(src, typeManager->uncompressType(type)); + } + } else { + return genTauSafe(); + } +} + void IRBuilder::genStField(Type* type, Opnd* base, FieldDesc* fieldDesc,Opnd* src) { assert (!fieldDesc->isStatic()); base = propagateCopy(base); + base = uncompressOnDemand(base); src = propagateCopy(src); + Opnd *tauBaseNonNull = genTauCheckNull(base); Opnd *tauBaseTypeIsOk = genTauHasType(base, fieldDesc->getParentType()); -// Type *fieldType = fieldDesc->getFieldType(); - Opnd *tauStoredTypeIsOk = (type->isObject() - ? genTauHasType(src, type) - : genTauSafe()); // safe, not an object - if (irBuilderFlags.expandMemAddrs) { // do not expand ldField of stack values - Opnd *ptr = genLdFieldAddr(type, base, fieldDesc); - if (irBuilderFlags.insertWriteBarriers && src->getType()->isObject()) { - genTauStRef(type, base, ptr, src, - tauBaseNonNull, - tauBaseTypeIsOk, - tauStoredTypeIsOk); - } else { - genTauStInd(type, ptr, src, - tauBaseNonNull, - tauBaseTypeIsOk, - tauStoredTypeIsOk); - } + Opnd *tauStoredTypeIsOk = genStoredTypeIsOk(type,src); + Opnd *ptr = genLdFieldAddr(type, base, fieldDesc); + if (irBuilderFlags.insertWriteBarriers && src->getType()->isObject()) { + genTauStRef(type, base, ptr, src, + tauBaseNonNull, + tauBaseTypeIsOk, + tauStoredTypeIsOk); } else { - if (irBuilderFlags.insertWriteBarriers && - base->getType()->isValue()==false) { - appendInst(instFactory->makeTauStField((Modifier(Store_WriteBarrier)| - Modifier(AutoCompress_Yes)), - type->tag, src, base, - tauBaseNonNull, - tauBaseTypeIsOk, - tauStoredTypeIsOk, - fieldDesc)); - } else { - appendInst(instFactory->makeTauStField((Modifier(Store_NoWriteBarrier)| - Modifier(AutoCompress_Yes)), - type->tag, src, base, - tauBaseNonNull, - tauBaseTypeIsOk, - tauStoredTypeIsOk, - fieldDesc)); - } + src = compressOnDemand(src); + genTauStInd(type, ptr, src, + tauBaseNonNull, + tauBaseTypeIsOk, + tauStoredTypeIsOk); } } @@ -2627,12 +2493,13 @@ void IRBuilder::genStFieldWithResolve(Type* type, Opnd* base, ObjectType* enclClass, uint32 cpIdx, Opnd* src) { base = propagateCopy(base); + base = uncompressOnDemand(base); src = propagateCopy(src); Opnd *tauBaseNonNull = genTauCheckNull(base); Opnd *tauBaseTypeIsOk = genTauSafe(); - Opnd *tauStoredTypeIsOk = (type->isObject() ? genTauHasType(src, type) : genTauSafe()); - assert(irBuilderFlags.expandMemAddrs); + Opnd *tauStoredTypeIsOk = genStoredTypeIsOk(type,src); Opnd *ptr = genLdFieldAddrWithResolve(type, base, enclClass, cpIdx, true); + src = compressOnDemand(src); if (irBuilderFlags.insertWriteBarriers && src->getType()->isObject()) { genTauStRef(type, base, ptr, src, tauBaseNonNull, @@ -2651,9 +2518,9 @@ IRBuilder::genStStaticWithResolve(Type* type, ObjectType* enclClass, uint32 cpIdx, Opnd* src) { src = propagateCopy(src); Opnd *tauOk = genTauSafe(); // address is always ok - Opnd *tauTypeIsOk = type->isObject() ? genTauHasType(src, type) : genTauSafe(); - assert(irBuilderFlags.expandMemAddrs); + Opnd *tauTypeIsOk = genStoredTypeIsOk(type,src); Opnd* addr = genLdStaticAddrWithResolve(type, enclClass, cpIdx, true); + src = compressOnDemand(src); genTauStInd(type, addr, src, tauOk, tauOk, tauTypeIsOk); return; } @@ -2663,31 +2530,21 @@ src = propagateCopy(src); genInitType(fieldDesc->getParentType()); Opnd *tauOk = genTauSafe(); // address is always ok -// Type *fieldType = fieldDesc->getFieldType(); - Opnd *tauTypeIsOk = (type->isObject() - ? genTauHasType(src, type) - : genTauSafe()); // safe, not an object - if (irBuilderFlags.expandMemAddrs) { - genTauStInd(type, genLdStaticAddr(type, fieldDesc), src, - tauOk, - tauOk, - tauTypeIsOk // safety may depend on a type check - ); - return; + Opnd *tauTypeIsOk = genStoredTypeIsOk(type,src); + Opnd* ptr = genLdStaticAddr(type, fieldDesc); + // unmanaged pointers are never being compressed + // Actually, there is only one possible case caused by magics: + // unmanaged pointer to Int8 + assert( !type->isUnmanagedPtr() || type->asPtrType()->getPointedToType()->isInt1()); + if (!type->isUnmanagedPtr()) { + src = compressOnDemand(src); } - if (irBuilderFlags.insertWriteBarriers) { - appendInst(instFactory->makeTauStStatic((Modifier(Store_WriteBarrier)| - Modifier(AutoCompress_Yes)), - type->tag, src, - tauTypeIsOk, - fieldDesc)); - } else { - appendInst(instFactory->makeTauStStatic((Modifier(Store_NoWriteBarrier)| - Modifier(AutoCompress_Yes)), - type->tag, src, - tauTypeIsOk, - fieldDesc)); - } + genTauStInd(type, ptr, src, + tauOk, + tauOk, + tauTypeIsOk // safety may depend on a type check + ); + return; } void @@ -2699,6 +2556,7 @@ Opnd* tauBaseTypeChecked, Opnd* tauAddressInRange) { array = propagateCopy(array); + array = uncompressOnDemand(array); src = propagateCopy(src); index = propagateCopy(index); @@ -2709,37 +2567,20 @@ } else { tauElemTypeChecked = genTauSafe(); // src type is ok if non-object } - if (irBuilderFlags.expandMemAddrs) { - Opnd *ptr = NULL; - if (tauNullChecked && tauAddressInRange) { - ptr = genLdElemAddrNoChecks(elemType, array, index); - } else { - ptr = genLdElemAddr(elemType, array, index); - } - if (irBuilderFlags.insertWriteBarriers && elemType->isObject()) { - genTauStRef(elemType, array, ptr, src, tauNullChecked, tauAddressInRange, - tauElemTypeChecked); - } else { - genTauStInd(elemType, ptr, src, tauNullChecked, tauAddressInRange, - tauElemTypeChecked); - } + Opnd *ptr = NULL; + if (tauNullChecked && tauAddressInRange) { + ptr = genLdElemAddrNoChecks(elemType, array, index); } else { - if (irBuilderFlags.insertWriteBarriers) { - appendInst(instFactory->makeTauStElem((Modifier(Store_WriteBarrier)| - Modifier(AutoCompress_Yes)), - elemType->tag, src, array, index, - tauNullChecked, - tauAddressInRange, - tauElemTypeChecked)); - } else { - appendInst(instFactory->makeTauStElem((Modifier(Store_NoWriteBarrier)| - Modifier(AutoCompress_Yes)), - elemType->tag, src, array, index, - tauNullChecked, - tauAddressInRange, - tauElemTypeChecked)); - } + ptr = genLdElemAddr(elemType, array, index); } + if (irBuilderFlags.insertWriteBarriers && elemType->isObject()) { + genTauStRef(elemType, array, ptr, src, tauNullChecked, tauAddressInRange, + tauElemTypeChecked); + } else { + src = compressOnDemand(src); + genTauStInd(elemType, ptr, src, tauNullChecked, tauAddressInRange, + tauElemTypeChecked); + } } void @@ -2770,7 +2611,6 @@ IRBuilder::genNewObj(Type* type) { assert(type->isNamedType()); Opnd* dst = createOpnd(type); - //FIXME class initialization must be done before allocating new object appendInst(instFactory->makeNewObj(dst, type)); genInitType(type->asNamedType()); return dst; @@ -2855,6 +2695,7 @@ void IRBuilder::genMonitorEnter(Opnd* src) { src = propagateCopy(src); + src = uncompressOnDemand(src); Opnd *tauNullChecked = genTauCheckNull(src); appendInst(instFactory->makeTauMonitorEnter(src, tauNullChecked)); } @@ -2862,6 +2703,7 @@ void IRBuilder::genMonitorExit(Opnd* src) { src = propagateCopy(src); + src = uncompressOnDemand(src); Opnd *tauNullChecked = genTauCheckNull(src); appendInst(instFactory->makeTauMonitorExit(src, tauNullChecked)); } @@ -2869,6 +2711,7 @@ Opnd* IRBuilder::genLdLockAddr(Type* dstType, Opnd* obj) { obj = propagateCopy(obj); + obj = uncompressOnDemand(obj); Opnd* dst = lookupHash(Op_LdLockAddr, obj); if (dst) return dst; @@ -2878,18 +2721,11 @@ return dst; } -void -IRBuilder::genIncRecCount(Opnd* obj, Opnd *oldLock) { - obj = propagateCopy(obj); - oldLock = propagateCopy(oldLock); - appendInst(instFactory->makeLdLockAddr(obj, oldLock)); -} - - Opnd* IRBuilder::genBalancedMonitorEnter(Type* dstType, Opnd* src, Opnd *lockAddr) { // src should already have been checked for null src = propagateCopy(src); + src = uncompressOnDemand(src); lockAddr = propagateCopy(lockAddr); Opnd *tauNullChecked = genTauCheckNull(src); return genTauBalancedMonitorEnter(dstType, src, lockAddr, tauNullChecked); @@ -2900,6 +2736,7 @@ Opnd* tauNullChecked) { // src should already have been checked for null src = propagateCopy(src); + src = uncompressOnDemand(src); lockAddr = propagateCopy(lockAddr); Opnd *dst = createOpnd(dstType); appendInst(instFactory->makeTauBalancedMonitorEnter(dst, src, lockAddr, @@ -2911,32 +2748,21 @@ IRBuilder::genBalancedMonitorExit(Opnd* src, Opnd *lockAddr, Opnd *oldValue) { // src should already have been checked for null src = propagateCopy(src); + src = uncompressOnDemand(src); appendInst(instFactory->makeBalancedMonitorExit(src, lockAddr, oldValue)); } -Opnd* -IRBuilder::genTauOptimisticBalancedMonitorEnter(Type* dstType, Opnd* src, - Opnd *lockAddr, - Opnd *tauNullChecked) { - // src should already have been checked for null - src = propagateCopy(src); - lockAddr = propagateCopy(lockAddr); - Opnd *dst = createOpnd(dstType); - appendInst(instFactory->makeTauOptimisticBalancedMonitorEnter(dst, src, - lockAddr, - tauNullChecked)); - return dst; -} - void IRBuilder::genMonitorEnterFence(Opnd* src) { src = propagateCopy(src); + src = uncompressOnDemand(src); appendInst(instFactory->makeMonitorEnterFence(src)); } void IRBuilder::genMonitorExitFence(Opnd* src) { src = propagateCopy(src); + src = uncompressOnDemand(src); appendInst(instFactory->makeMonitorExitFence(src)); } @@ -2956,6 +2782,8 @@ Opnd* IRBuilder::genCast(Opnd* src, Type* castType) { src = propagateCopy(src); + src = uncompressOnDemand(src); + assert(!castType->isCompressedReference()); Opnd* dst = lookupHash(Op_TauCast, src->getId(), castType->getId()); if (dst) return dst; @@ -2979,6 +2807,8 @@ Opnd* IRBuilder::genCastWithResolve(Opnd* src, Type* type, ObjectType* enclClass, uint32 cpIndex) { src = propagateCopy(src); + src = uncompressOnDemand(src); + assert(!type->isCompressedReference()); Opnd* dst = lookupHash(Op_VMHelperCall, OPCODE_CHECKCAST, src->getId(), cpIndex); if (dst) return dst; @@ -2995,6 +2825,8 @@ Opnd* IRBuilder::genTauCheckCast(Opnd* src, Opnd *tauNullChecked, Type* castType) { src = propagateCopy(src); + src = uncompressOnDemand(src); + assert(!castType->isCompressedReference()); tauNullChecked = propagateCopy(tauNullChecked); Opnd* dst = lookupHash(Op_TauCheckCast, src->getId(), castType->getId()); if (dst) return dst; @@ -3013,32 +2845,11 @@ return dst; } -// returns src if src is an instance of type, NULL otherwise Opnd* -IRBuilder::genAsType(Opnd* src, Type* type) { - if (type->isUserValue()) { - assert(0); - } - src = propagateCopy(src); - - Opnd* tauCheckedNull = genTauUnsafe(); - - Opnd* dst = lookupHash(Op_TauAsType, src->getId(), tauCheckedNull->getId(), type->getId()); - if (dst) return dst; - - if (irBuilderFlags.doSimplify) { - dst = simplifier->simplifyTauAsType(src, tauCheckedNull, type); - if (dst) return dst; - } - dst = createOpnd(type); - appendInst(instFactory->makeTauAsType(dst, src, tauCheckedNull, type)); - insertHash(Op_TauAsType, src->getId(), tauCheckedNull->getId(), type->getId(), dst->getInst()); - return dst; -} - -Opnd* IRBuilder::genInstanceOf(Opnd* src, Type* type) { src = propagateCopy(src); + src = uncompressOnDemand(src); + assert(!type->isCompressedReference()); Opnd *tauNullChecked = genTauUnsafe(); @@ -3061,6 +2872,7 @@ Opnd* IRBuilder::genInstanceOfWithResolve(Opnd* src, ObjectType* enclClass, uint32 cpIndex) { src = propagateCopy(src); + src = uncompressOnDemand(src); Opnd* dst = lookupHash(Op_VMHelperCall, OPCODE_INSTANCEOF, src->getId(), cpIndex); if (dst) { @@ -3076,96 +2888,6 @@ return dst; } - -Opnd* -IRBuilder::genSizeOf(Type* type) { - Opnd* dst = createOpnd(typeManager->getUInt32Type()); - appendInst(instFactory->makeSizeof(dst, type)); - return dst; -} - -Opnd* -IRBuilder::genUnbox(Type* type, Opnd* obj) { - assert(type->isValue()); - Opnd *src = propagateCopy(obj); - genTauCheckNull(obj); - Opnd *two = genCast(src, typeManager->getObjectType(((NamedType*)type)->getVMTypeHandle())); - Opnd* dst = createOpnd(typeManager->getManagedPtrType(type)); - appendInst(instFactory->makeUnbox(dst, two, type)); - return dst; -} - -Opnd* -IRBuilder::genBox(Type* type, Opnd* val) { - assert(type->isValue()); - val = propagateCopy(val); - Opnd* dst = createOpnd(typeManager->getObjectType(((NamedType*)type)->getVMTypeHandle())); - appendInst(instFactory->makeBox(dst, val, type)); - return dst; -} - -void -IRBuilder::genCopyObj(Type* type, Opnd* dstValPtr, Opnd* srcValPtr) { - appendInst(instFactory->makeCopyObj(dstValPtr, srcValPtr, type)); -} - -void -IRBuilder::genInitObj(Type* type, Opnd* valPtr) { - appendInst(instFactory->makeInitObj(valPtr, type)); -} - -Opnd* -IRBuilder::genLdObj(Type* type, Opnd* addrOfValObj) { - Opnd* dst = createOpnd(type); - appendInst(instFactory->makeLdObj(dst, addrOfValObj, type)); - return dst; -} - -void -IRBuilder::genStObj(Opnd* addrOfDstVal, Opnd* srcVal, Type* type) { - appendInst(instFactory->makeStObj(addrOfDstVal, srcVal, type)); -} - -void -IRBuilder::genCopyBlock(Opnd* dstAddr, Opnd* srcAddr, Opnd* size) { - assert(0); -} - -void -IRBuilder::genInitBlock(Opnd* dstAddr, Opnd* srcAddr, Opnd* size) { - assert(0); -} - -Opnd* -IRBuilder::genLocAlloc(Opnd* size) { - assert(0); - return NULL;; -} - -Opnd* -IRBuilder::genArgList() { - assert(0); - return NULL; -} - -Opnd* -IRBuilder::genMkRefAny(Type* type, Opnd* ptr) { - assert(0); - return NULL; -} - -Opnd* -IRBuilder::genRefAnyType(Opnd* typedRef) { - assert(0); - return NULL; -} - -Opnd* -IRBuilder::genRefAnyVal(Type* type, Opnd* typedRef) { - assert(0); - return NULL; -} - //----------------------------------------------------------------------------- // // Private helper methods for generating instructions @@ -3253,6 +2975,7 @@ return genTauUnsafe(); array = propagateCopy(array); + array = uncompressOnDemand(array); index = propagateCopy(index); // we also hash operation with array as the opnd @@ -3276,6 +2999,7 @@ return genTauUnsafe(); array = propagateCopy(array); + array = uncompressOnDemand(array); src = propagateCopy(src); Opnd* res = lookupHash(Op_TauCheckElemType, array, src); if (res) return res; @@ -3285,6 +3009,8 @@ res = simplifier->simplifyTauCheckElemType(array, src, alwaysThrows); if (res && (res->getInst()->getOpcode() != Op_TauUnsafe)) return res; } + // this uncompression must be later than simplifier + src = uncompressOnDemand(src); Opnd* dst = createOpnd(typeManager->getTauType()); Inst* inst = appendInst(instFactory->makeTauCheckElemType(dst, array, src, tauNullChecked, @@ -3464,6 +3190,7 @@ Opnd* IRBuilder::genTauStaticCast(Opnd *src, Opnd *tauCheckedCast, Type *castType) { + assert(!src->getType()->isCompressedReference()); Operation operation(Op_TauStaticCast, castType->tag, Modifier()); uint32 hashcode = operation.encodeForHashing(); Opnd* dst = lookupHash(hashcode, src->getId(), tauCheckedCast->getId(), castType->getId()); @@ -3482,6 +3209,16 @@ Opnd* IRBuilder::genTauHasType(Opnd *src, Type *castType) { + Type* srcType = src->getType(); + bool srcIsCompressed = srcType->isCompressedReference(); + if (srcIsCompressed) { + if(!castType->isCompressedReference()) { + castType = typeManager->compressType(castType); + } + } else { + assert(!castType->isCompressedReference()); + } + Operation operation(Op_TauHasType, castType->tag, Modifier()); uint32 hashcode = operation.encodeForHashing(); Opnd* dst = lookupHash(hashcode, src->getId(), castType->getId()); @@ -3509,6 +3246,7 @@ Opnd* IRBuilder::genTauHasExactType(Opnd *src, Type *castType) { + assert(!src->getType()->isCompressedReference()); Operation operation(Op_TauHasExactType, castType->tag, Modifier()); uint32 hashcode = operation.encodeForHashing(); Opnd* dst = lookupHash(hashcode, src->getId(), castType->getId()); @@ -3523,6 +3261,7 @@ Opnd* IRBuilder::genTauIsNonNull(Opnd *src) { + assert(!src->getType()->isCompressedReference()); uint32 hashcode = Op_TauCheckNull; Opnd* dst = lookupHash(hashcode, src->getId()); if (dst) return dst; @@ -3563,4 +3302,45 @@ return (Inst*)currentLabel->getNode()->getLastInst(); } +Opnd* IRBuilder::uncompressOnDemand(Opnd* opnd) { + Type* type = opnd->getType(); + if ( irBuilderFlags.compressedReferences && typeManager->autoCompressRefs == false && + type->isCompressedReference() ) + { + opnd = genUncompressRef(opnd); + } + return opnd; +} + +Opnd* IRBuilder::compressOnDemand(Opnd* opnd) { + Type* type = opnd->getType(); + if ( irBuilderFlags.compressedReferences && typeManager->autoCompressRefs == false && + Type::isReference(type->tag) && !type->isCompressedReference() ) + { + opnd = genCompressRef(opnd); + } + return opnd; +} + +Type::Tag IRBuilder::preparePairForCMP(Type::Tag cmpType, Opnd* &src1, Opnd* &src2) { + Type* src1Type = src1->getType(); + Type* src2Type = src2->getType(); + assert(src1Type->tag == cmpType || src2Type->tag == cmpType); + if( Type::isReference(cmpType) && src1Type != src2Type) { + bool src1IsCompressed = src1Type->isCompressedReference(); + bool src2IsCompressed = src2Type->isCompressedReference(); + if ( src1IsCompressed != src2IsCompressed ) { + if (src1IsCompressed) { + assert(!src2IsCompressed); + src2 = genCompressRef(src2); + } else if (src2IsCompressed) { + assert(!src1IsCompressed); + src1 = genCompressRef(src1); + } else {assert(0);} + return Type::CompressedObject; + } + } + return cmpType; +} + } //namespace Jitrino Index: vm/jitrino/src/optimizer/escanalyzer.h =================================================================== --- vm/jitrino/src/optimizer/escanalyzer.h (revision 609948) +++ vm/jitrino/src/optimizer/escanalyzer.h (working copy) @@ -83,8 +83,9 @@ NT_INTPTR = NT_LDOBJ+1, // Op_SaveRet NT_VARVAL = NT_LDOBJ+2, // Op_StVar,Op_Phi NT_ARRELEM = NT_LDOBJ+3, // Op_LdArrayBaseAddr,Op_AddScaledIndex - NT_REF = NT_LDOBJ+4, // reference value - Op_LdFieldAddr, - // Op_LdStaticAddr, Op_TauCast, Op_TauStaticCast + NT_REF = NT_LDOBJ+4, // reference value - Op_LdFieldAddr, Op_LdStaticAddr, + // Op_TauCast, Op_TauStaticCast + // Op_CompressRef, Op_UncompressRef NT_STFLD = 64, // Op_LdStaticAddr NT_INSTFLD = NT_STFLD+1, // Op_LdFieldAddr NT_ACTARG = 128, // Op_DirectCall,Op_IndirectMemoryCall Index: vm/jitrino/src/optimizer/CodeSelectors.cpp =================================================================== --- vm/jitrino/src/optimizer/CodeSelectors.cpp (revision 609948) +++ vm/jitrino/src/optimizer/CodeSelectors.cpp (working copy) @@ -283,6 +283,12 @@ case Type::Double: return CompareOp::D; default: + if (Type::isCompressedReference(type)) { + return CompareOp::CompRef; + } else { + assert(Type::isReference(type)); + return CompareOp::Ref; + } assert(Type::isReference(type)); return CompareOp::Ref; @@ -302,8 +308,12 @@ case Type::IntPtr: return CompareZeroOp::I; default: - assert(Type::isReference(type)); - return CompareZeroOp::Ref; + if (Type::isCompressedReference(type)) { + return CompareZeroOp::CompRef; + } else { + assert(Type::isReference(type)); + return CompareZeroOp::Ref; + } } } @@ -1410,7 +1420,7 @@ AutoCompressModifier acmod = inst->getAutoCompressModifier(); bool autocompress = (acmod == AutoCompress_Yes); Type::Tag type = inst->getType(); - if (acmod == AutoCompress_Yes) { + if (autocompress) { assert(Type::isReference(type)); assert(!Type::isCompressedReference(type)); } @@ -1810,93 +1820,6 @@ { break; // nothing to do } - case Op_LdObj: - { - assert(inst->getNumSrcOperands() == 1); - cgInst = instructionCallback.ldValueObj(inst->getDst()->getType(), - getCGInst(inst->getSrc(0))); - } - break; - case Op_StObj: - { - assert(inst->getNumSrcOperands() == 2); - instructionCallback.stValueObj(getCGInst(inst->getSrc(0)), - getCGInst(inst->getSrc(1))); - } - break; - case Op_CopyObj: - { - assert(inst->getNumSrcOperands() == 2); - TypeInst *typeInst = (TypeInst *)inst; - instructionCallback.copyValueObj(typeInst->getTypeInfo(), - getCGInst(inst->getSrc(0)), - getCGInst(inst->getSrc(1))); - } - break; - case Op_InitObj: - { - assert(inst->getNumSrcOperands() == 1); - TypeInst * typeInst = (TypeInst *)inst; - instructionCallback.initValueObj(typeInst->getTypeInfo(), - getCGInst(inst->getSrc(0))); - } - break; - case Op_Sizeof: - { - Type* type = inst->asTypeInst()->getTypeInfo(); - assert(type->isValueType()); - uint32 size = ((UserValueType*) type)->getUnboxedSize(); - instructionCallback.ldc_i4(size); - } - break; - case Op_Box: - { - assert(inst->getNumSrcOperands() == 1); - Type * boxedType = inst->getDst()->getType(); - assert(boxedType->isObject()); - cgInst = instructionCallback.box((ObjectType *)boxedType, - getCGInst(inst->getSrc(0))); - } - break; - case Op_Unbox: - { - assert(inst->getNumSrcOperands() == 1); - cgInst = instructionCallback.unbox(inst->getDst()->getType(), - getCGInst(inst->getSrc(0))); - } - break; - case Op_LdToken: - { - assert(inst->getNumSrcOperands() == 0); - if (!genConsts) break; - TokenInst *tokenInst = (TokenInst *)inst; - uint32 token = tokenInst->getToken(); - cgInst = instructionCallback.ldToken(inst->getDst()->getType(), - tokenInst->getEnclosingMethod(), token); - isConstant = true; - } - break; - case Op_MkRefAny: - assert(0); - break; - case Op_RefAnyVal: - assert(0); - break; - case Op_RefAnyType: - assert(0); - break; - case Op_InitBlock: - assert(0); - break; - case Op_CopyBlock: - assert(0); - break; - case Op_Alloca: - assert(0); - break; - case Op_ArgList: - assert(0); - break; case Op_Phi: { assert(0); // Phi nodes should be eliminated by deSSAing Index: vm/jitrino/src/optimizer/simplifier.h =================================================================== --- vm/jitrino/src/optimizer/simplifier.h (revision 609948) +++ vm/jitrino/src/optimizer/simplifier.h (working copy) @@ -830,35 +830,6 @@ // source markers Inst* caseMethodMarker(Inst* inst) {return caseDefault(inst);} - // value type instructions - Inst* caseLdObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseStObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseCopyObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseInitObj(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseBox(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseUnbox(TypeInst* inst) {return caseDefault(inst);} - - Inst* caseLdToken(TokenInst* inst) {return caseDefault(inst);} - - Inst* caseMkRefAny(Inst* inst) {return caseDefault(inst);} - - Inst* caseRefAnyVal(Inst* inst) {return caseDefault(inst);} - - Inst* caseRefAnyType(Inst* inst) {return caseDefault(inst);} - - Inst* caseInitBlock(Inst* inst) {return caseDefault(inst);} - - Inst* caseCopyBlock(Inst* inst) {return caseDefault(inst);} - - Inst* caseAlloca(Inst* inst) {return caseDefault(inst);} - - Inst* caseArgList(Inst* inst) {return caseDefault(inst);} - Inst* casePhi(Inst* inst){return caseDefault(inst);} Inst* caseTauPi(TauPiInst* inst){return caseDefault(inst);} Index: vm/jitrino/src/optimizer/codelowerer.cpp =================================================================== --- vm/jitrino/src/optimizer/codelowerer.cpp (revision 609948) +++ vm/jitrino/src/optimizer/codelowerer.cpp (working copy) @@ -1352,7 +1352,9 @@ assert(array->getType()->isObject()); ObjectType* arrayType = (ObjectType*) array->getType(); - ObjectType* systemObjectType = typeManager.getSystemObjectType(); + ObjectType* systemObjectType = VMInterface::areReferencesCompressed() && typeManager.autoCompressRefs == false ? + typeManager.getCompressedSystemObjectType() : + typeManager.getSystemObjectType(); ObjectType* arrayOfObjectType = typeManager.getArrayType(systemObjectType); if(arrayType == arrayOfObjectType) { // Check if it's an exact match at runtime. If so, we can avoid the more expensive elemType check. Index: vm/jitrino/src/optimizer/Opcode.h =================================================================== --- vm/jitrino/src/optimizer/Opcode.h (revision 609948) +++ vm/jitrino/src/optimizer/Opcode.h (working copy) @@ -425,30 +425,6 @@ Op_MethodEnd, // end of a method Op_SourceLineNumber, // change to source position - // Instructions for manipulating value objects in CIL - Op_LdObj, // load a value type to the stack - Op_StObj, // store a value type from the stack - Op_CopyObj, // copy a value type - Op_InitObj, // initialize a value type - Op_Sizeof, // Pushes the size of a value type as a U4 - Op_Box, - Op_Unbox, - - // Pushes a RuntimeMethodHandle, RuntimeTypeHandle, or RuntimeFieldHandle for - // passing to the Reflection methods in the system class library. - Op_LdToken, - - // Instruction for manipulating typed references - Op_MkRefAny, // transforms a pointer to a typed reference - Op_RefAnyVal, - Op_RefAnyType, - - // Memory instructions - Op_InitBlock, // memset - Op_CopyBlock, // memcopy - Op_Alloca, // allocations memory from the stack, not verifiable - Op_ArgList, // for implementing varargs; use is private to CLI System.ArgIterator - // Special SSA nodes Op_Phi, // merge point Op_TauPi, // leverage split based on condition @@ -459,7 +435,7 @@ // Compressed Pointer instructions Op_UncompressRef, // uncmpref = (cmpref<getCompilationLevelMemoryManager(); OptInitAction* myAction = (OptInitAction*)getAction(); OptimizerFlags& flags = myAction->optimizerFlags; + cc->getVMCompilationInterface()->getTypeManager().autoCompressRefs = getBoolArg("doAutoCompression", true); IRManager* irm = new (mm) IRManager(mm, *cc->getVMCompilationInterface(), flags); cc->setHIRManager(irm); } @@ -124,7 +125,6 @@ optimizerFlags.sink_constants = getBoolArg("sink_constants", true); optimizerFlags.sink_constants1 = getBoolArg("sink_constants1", false); - //simplifier flags optimizerFlags.elim_cmp3 = getBoolArg("elim_cmp3", true); optimizerFlags.use_mulhi = getBoolArg("use_mulhi", false); Index: vm/jitrino/src/optimizer/simplifytaus.cpp =================================================================== --- vm/jitrino/src/optimizer/simplifytaus.cpp (revision 609948) +++ vm/jitrino/src/optimizer/simplifytaus.cpp (working copy) @@ -643,23 +643,6 @@ case Op_MethodEnd: case Op_SourceLineNumber: assert(0); // no dstOpnd - - case Op_LdObj: - case Op_StObj: - case Op_CopyObj: - case Op_InitObj: - case Op_Sizeof: - case Op_Box: - case Op_Unbox: - case Op_LdToken: - case Op_MkRefAny: - case Op_RefAnyVal: - case Op_RefAnyType: - case Op_InitBlock: - case Op_CopyBlock: - case Op_Alloca: - case Op_ArgList: - assert(0); // do not know what to do with these break; case Op_Phi: Index: vm/jitrino/src/optimizer/memoryopt.cpp =================================================================== --- vm/jitrino/src/optimizer/memoryopt.cpp (revision 609948) +++ vm/jitrino/src/optimizer/memoryopt.cpp (working copy) @@ -896,23 +896,6 @@ thePass->effectIncCounter(n, i); break; - - case Op_LdObj: - - case Op_StObj: - case Op_CopyObj: - - case Op_InitObj: - case Op_Box: - case Op_Unbox: - case Op_LdToken: - case Op_InitBlock: - case Op_CopyBlock: - case Op_Alloca: - assert(0); - break; - - // loads vtable from object, depends on object initialization case Op_TauLdVTableAddr: @@ -963,13 +946,7 @@ case Op_TauStaticCast: // just a compile-time assertion case Op_Label: case Op_SourceLineNumber: - case Op_Sizeof: - case Op_MkRefAny: // these may allow pointers to escape or something - case Op_RefAnyVal: - case Op_RefAnyType: - - case Op_ArgList: case Op_Phi: case Op_TauPi: case Op_TauPoint: @@ -1410,7 +1387,6 @@ } } - case Op_Alloca: case Op_LdVarAddr: case Op_Phi: case Op_DefArg: //magic as method param Index: vm/jitrino/src/vm/VMInterface.cpp =================================================================== --- vm/jitrino/src/vm/VMInterface.cpp (revision 609948) +++ vm/jitrino/src/vm/VMInterface.cpp (working copy) @@ -484,6 +484,11 @@ assert(!lazy); return NULL; } + bool refsCompressed = VMInterface::areReferencesCompressed(); + if ( refsCompressed && elemType->isObject() && typeManager.autoCompressRefs == false) { + assert(!elemType->isCompressedReference()); + elemType = typeManager.compressType(elemType); + } type = typeManager.getArrayType(elemType); } else { // should not get here @@ -543,7 +548,7 @@ } - +// returns _uncompressed_ address of the string void* CompilationInterface::loadStringObject(MethodDesc* enclosingMethodDesc, uint32 stringToken) { Index: vm/jitrino/src/shared/Type.cpp =================================================================== --- vm/jitrino/src/shared/Type.cpp (revision 609948) +++ vm/jitrino/src/shared/Type.cpp (working copy) @@ -178,10 +178,11 @@ bool TypeManager::isSubTypeOf(Type *type1, Type *type2) { if (type1==type2) return true; - bool oneIsCompressed = type1->isCompressedReference(); - bool twoIsCompressed = type2->isCompressedReference(); - if (oneIsCompressed != twoIsCompressed) return false; + if (type1->isCompressedReference()) + type1 = uncompressType(type1); + if (type2->isCompressedReference()) + type2 = uncompressType(type2); switch (type1->tag) { case Type::SystemClass: @@ -275,23 +276,28 @@ Type* TypeManager::getCommonType(Type *type1, Type* type2) { assert(type1 != NULL && type2 != NULL); + Type *common = NULL; + bool oneIsCompressed = type1->isCompressedReference(); + bool twoIsCompressed = type2->isCompressedReference(); + + if( oneIsCompressed != twoIsCompressed && !type1->isNullObject() && !type2->isNullObject() ) { + if(oneIsCompressed) { + type1 = uncompressType(type1); + } else if(twoIsCompressed) { + type2 = uncompressType(type2); + } + } if (type1 == type2) return type1; - Type *common = NULL; - bool oneIsCompressed = type1->isCompressedReference(); - assert(type1->isCompressedReference() == type2->isCompressedReference()); + if (type1->isUnresolvedType() || type2->isUnresolvedType()) { if (type1->isNullObject()) return type2; if (type2->isNullObject()) return type1; return type1->isUnresolvedType() ? type1 : type2; } - if ( type2->isObject() && (oneIsCompressed - ? (type1 == getCompressedNullObjectType()) - : (type1 == getNullObjectType())) ) { + if ( type2->isObject() && type1->isNullObject()) { return type2; - } else if ( type1->isObject() && (oneIsCompressed - ? (type2 == getCompressedNullObjectType()) - : (type2 == getNullObjectType())) ) { + } else if ( type1->isObject() && type2->isNullObject()) { return type1; } else if (type1->isArrayType()) { if (type2->isArrayType()) { @@ -384,6 +390,7 @@ void TypeManager::init() { + autoCompressRefs = true; areReferencesCompressed = VMInterface::areReferencesCompressed(); void* systemStringVMTypeHandle = VMInterface::getSystemStringVMTypeHandle(); void* systemObjectVMTypeHandle = VMInterface::getSystemObjectVMTypeHandle(); @@ -436,9 +443,6 @@ PtrHashTable &lookupTable = isCompressed ? compressedArrayTypes : arrayTypes; if (elemType->isObject() || elemType->isValue()) { - if (elemType->isCompressedReference()) { - elemType = uncompressType(elemType); - } NamedType* elemNamedType = (NamedType*)elemType; // // change lookup to vmtypehandle of elem (elemVMTypeHandle) @@ -462,6 +466,10 @@ lookupTable.insert(elemNamedType,type); } } + assert(!autoCompressRefs || (!elemType->isCompressedReference() && + !type->getElementType()->isCompressedReference())); + assert(autoCompressRefs || !elemType->isObject() || (elemType->isCompressedReference() && + type->getElementType()->isCompressedReference()) ); return type; } assert(0); @@ -477,11 +485,11 @@ if (VMInterface::isArrayOfPrimitiveElements(vmTypeHandle)) { elemType = getValueType(elemClassHandle); } else { - elemType = getObjectType(elemClassHandle, areReferencesCompressed); + elemType = getObjectType(elemClassHandle, areReferencesCompressed && autoCompressRefs == false); } return getArrayType(elemType, isCompressed, vmTypeHandle); } - PtrHashTable &typeTable = (isCompressed + PtrHashTable &typeTable = (isCompressed ? compressedUserObjectTypes : userObjectTypes); ObjectType* type = typeTable.lookup(vmTypeHandle); @@ -802,6 +810,20 @@ } // +// yields the corresponding compressed reference type +// if autoCompressRefs mode is OFF +// +Type* +TypeManager::prepareType(Type* type) +{ + if (areReferencesCompressed && type->isObject() && autoCompressRefs == false) { + assert(!Type::isCompressedReference(type->tag)); + return compressType(type); + } + return type; +} + +// // Returns size of the object // uint32 @@ -1119,8 +1141,9 @@ ArrayType* arrayType = val->getType()->asArrayType(); assert(arrayType); Type* elementType = arrayType->getElementType(); - if (areReferencesCompressed && elementType->isObject()) - elementType = compressType(elementType); +// if (areReferencesCompressed && elementType->isObject()) +// elementType = compressType(elementType); + elementType = prepareType(elementType); arrayElementType = new (memManager) ValueNameType(Type::ArrayElementType, val, elementType); arrayElementTypes.insert(val, arrayElementType); } Index: vm/jitrino/src/shared/Type.h =================================================================== --- vm/jitrino/src/shared/Type.h (revision 609948) +++ vm/jitrino/src/shared/Type.h (working copy) @@ -178,7 +178,7 @@ bool isOffsetPlusHeapbase() {return (tag == Type::OffsetPlusHeapbase); } bool isSystemObject() {return isSystemObject(tag); } bool isSystemClass() {return isSystemClass(tag); } - bool isSystemString() {return (tag == Type::SystemString);} + bool isSystemString() {return isSystemString(tag);} bool isSignedInteger() {return isSignedInteger(tag);} bool isFloatingPoint() {return isFloatingPoint(tag);} bool isNullObject() {return isNullObject(tag);} @@ -678,6 +678,8 @@ TypeManager(MemoryManager& mm); virtual ~TypeManager() {} + bool autoCompressRefs; + void init(); //MemoryManager& getMemManager() {return memManager;} @@ -731,6 +733,8 @@ Type* uncompressType(Type *compRefType); Type* compressType(Type *uncompRefType); + Type* prepareType(Type* type); + PtrType* getManagedPtrType(Type* pointedToType); PtrType* getUnmanagedPtrType(Type* pointedToType); Index: vm/jitrino/src/translator/java/JavaLabelPrepass.cpp =================================================================== --- vm/jitrino/src/translator/java/JavaLabelPrepass.cpp (revision 609948) +++ vm/jitrino/src/translator/java/JavaLabelPrepass.cpp (working copy) @@ -1192,7 +1192,8 @@ Type* type = compilationInterface.getNamedType(methodDesc.getParentHandle(), constPoolIndex); assert(type); - slot.type = typeManager.getArrayType(type); + type = typeManager.prepareType(type); + slot.type = typeManager.getArrayType(type,false); // new_array helper returns uncompressed array slot.vars = NULL; jitrino_assert( slot.type); pushType(slot); @@ -1269,6 +1270,7 @@ fieldType = compilationInterface.getFieldType(methodDesc.getParentHandle(), constPoolIndex); } assert(fieldType); + fieldType = typeManager.prepareType(fieldType); pushType(typeManager.toInternalType(fieldType)); } @@ -1276,6 +1278,7 @@ FieldDesc *fdesc = compilationInterface.getStaticField(methodDesc.getParentHandle(), constPoolIndex, true); Type* fieldType = fdesc ? fdesc->getFieldType() : NULL; if (fieldType){ + fieldType = typeManager.prepareType(fieldType); popAndCheck(getJavaType(fieldType)); } else { // lazy resolution mode or @@ -1295,6 +1298,7 @@ fieldType = compilationInterface.getFieldType(methodDesc.getParentHandle(), constPoolIndex); } assert(fieldType); + fieldType = typeManager.prepareType(fieldType); pushType(typeManager.toInternalType(fieldType)); } @@ -1302,6 +1306,7 @@ FieldDesc *fdesc = compilationInterface.getNonStaticField(methodDesc.getParentHandle(), constPoolIndex, true); Type* fieldType = fdesc ? fdesc->getFieldType() : NULL; if (fieldType){ + fieldType = typeManager.prepareType(fieldType);//can be skipped (getJavaType doesn't depend on compression) popAndCheck(getJavaType(fieldType)); } else { // throwing respective exception helper will be inserted at the Translator @@ -1372,7 +1377,7 @@ // recognize and push respective returnType Type* retType = getRetTypeBySignature(compilationInterface, methodDesc.getParentHandle(), methodSig); - assert(retType); + assert(retType && !retType->isCompressedReference()); // push the return type if (retType->tag != Type::Void) { @@ -1495,7 +1500,8 @@ if (arrayDim > 0) { for (;arrayDim > 0; arrayDim--) { - resType = typeManager.getArrayType(resType, false); + resType = typeManager.prepareType(resType); + resType = typeManager.getArrayType(resType,false); // call returns uncompressed array } } return resType; @@ -1508,6 +1514,7 @@ Type* type = methodDesc->getReturnType(); // push the return type if (type) { + assert(!type->isCompressedReference()); if ( type->tag != Type::Void ) { pushType(typeManager.toInternalType(type)); } Index: vm/jitrino/src/translator/java/JavaLabelPrepass.h =================================================================== --- vm/jitrino/src/translator/java/JavaLabelPrepass.h (revision 609948) +++ vm/jitrino/src/translator/java/JavaLabelPrepass.h (working copy) @@ -284,6 +284,7 @@ case Type::CompressedNullObject: case Type::CompressedUnresolvedObject: case Type::CompressedSystemString: + case Type::CompressedSystemClass: case Type::CompressedSystemObject: return A; case Type::IntPtr: // reserved for JSR Index: vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp =================================================================== --- vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp (revision 609948) +++ vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp (working copy) @@ -708,6 +708,7 @@ // but field->getFieldType() returns NULL in this case) fieldType = compilationInterface.getFieldType(methodToCompile.getParentHandle(),constPoolIndex); } + fieldType = typeManager.prepareType(fieldType); return fieldType; } @@ -717,7 +718,7 @@ FieldDesc *field = compilationInterface.getStaticField(methodToCompile.getParentHandle(), constPoolIndex, false); if (field && field->isStatic()) { bool fieldValueInlined = false; - Type* fieldType = field->getFieldType(); + Type* fieldType = getFieldType(field,constPoolIndex); assert(fieldType); bool fieldIsMagic = VMMagicUtils::isVMMagicClass(fieldType->getName()); if (fieldIsMagic) { @@ -1261,7 +1262,7 @@ lastInstructionWasABranch = true; checkStack(); LabelInst *target = getLabel(labelId(targetOffset)); - irBuilder.genBranch(Type::Object,Cmp_EQ,target,src1,src2); + irBuilder.genBranch(src1->getType()->tag,Cmp_EQ,target,src1,src2); } void @@ -1276,7 +1277,7 @@ lastInstructionWasABranch = true; checkStack(); LabelInst *target = getLabel(labelId(targetOffset)); - irBuilder.genBranch(Type::Object,Cmp_NE_Un,target,src1,src2); + irBuilder.genBranch(src1->getType()->tag,Cmp_NE_Un,target,src1,src2); } void @@ -1766,16 +1767,17 @@ void JavaByteCodeTranslator::anewarray(uint32 constPoolIndex) { - NamedType* type = compilationInterface.getNamedType(methodToCompile.getParentHandle(), constPoolIndex); + Type* type = compilationInterface.getNamedType(methodToCompile.getParentHandle(), constPoolIndex); + type = typeManager.prepareType(type); Opnd* sizeOpnd = popOpnd(); if (type->isUnresolvedType()) { if (!typeManager.isLazyResolutionMode()) { linkingException(constPoolIndex, OPCODE_ANEWARRAY); } //res type can be an array of multi array with uninitialized dimensions. - pushOpnd(irBuilder.genNewArrayWithResolve(type, sizeOpnd, methodToCompile.getParentType()->asObjectType(), constPoolIndex)); + pushOpnd(irBuilder.genNewArrayWithResolve(type->asNamedType(), sizeOpnd, methodToCompile.getParentType()->asObjectType(), constPoolIndex)); } else { - pushOpnd(irBuilder.genNewArray(type,sizeOpnd)); + pushOpnd(irBuilder.genNewArray(type->asNamedType(),sizeOpnd)); } } @@ -1862,14 +1864,14 @@ newFallthroughBlock(); Opnd * nullObj = irBuilder.genLdNull(); - irBuilder.genBranch(Type::IntPtr, Cmp_EQ, ObjIsNullLabel, nullObj, src); + irBuilder.genBranch(srcType->tag, Cmp_EQ, ObjIsNullLabel, nullObj, src); // src is not null here newFallthroughBlock(); Opnd* srcIsSafe = irBuilder.genTauSafe(); Opnd* dynamicVTable = irBuilder.genTauLdVTable(src, srcIsSafe, srcType); Opnd* staticVTable = irBuilder.genGetVTable((ObjectType*) type); - irBuilder.genStVar(resVar, irBuilder.genCmp(intPtrType,Type::IntPtr,Cmp_EQ,staticVTable,dynamicVTable)); + irBuilder.genStVar(resVar, irBuilder.genCmp(intPtrType,staticVTable->getType()->tag,Cmp_EQ,staticVTable,dynamicVTable)); irBuilder.genJump(Exit); // src is null, instanceOf returns 0 @@ -2173,7 +2175,9 @@ stateInfo->stack[5].type->print(Log::out()); Log::out() << ::std::endl; Log::out() << "CONFLICT IN ARRAY ACCESS\n"; } - type = typeManager.getSystemObjectType(); + type = typeManager.autoCompressRefs ? + typeManager.getSystemObjectType() : + typeManager.getCompressedSystemObjectType(); } else type = ((ArrayType*)type)->getElementType(); pushOpnd(irBuilder.genLdElem(type,base,index)); @@ -2290,12 +2294,13 @@ checkStack(); LabelInst *target = getLabel(labelId(targetOffset)); - if (src1->getType() == typeManager.getNullObjectType()) { + Type* srcType = src1->getType(); + if (srcType->isNullObject()) { if (mod == Cmp_Zero) irBuilder.genJump(target); return; } - irBuilder.genBranch(Type::SystemObject,mod,target,src1); + irBuilder.genBranch(srcType->tag,mod,target,src1); } void @@ -2512,7 +2517,7 @@ // The case of same arrays and same positions Opnd * diff = irBuilder.genCmp3(intType,intTag,Cmp_GT,dstPos,srcPos); - Opnd * sameArrays = irBuilder.genCmp(intType,Type::IntPtr,Cmp_EQ,src,dst); + Opnd * sameArrays = irBuilder.genCmp(intType,src->getType()->tag,Cmp_EQ,src,dst); Opnd * zeroDiff = irBuilder.genCmp(intType,intTag,Cmp_EQ,diff,zero); Opnd * nothingToCopy = irBuilder.genAnd(intType,sameArrays,zeroDiff); irBuilder.genBranch(intTag,Cmp_GT,Exit,nothingToCopy,zero); @@ -2680,7 +2685,7 @@ // The case of same arrays and same positions Opnd * diff = irBuilder.genCmp3(intType,Type::Int32,Cmp_GT,dstPos,srcPos); - Opnd * sameArrays = irBuilder.genCmp(intType,Type::IntPtr,Cmp_EQ,src,dst); + Opnd * sameArrays = irBuilder.genCmp(intType,src->getType()->tag,Cmp_EQ,src,dst); Opnd * zeroDiff = irBuilder.genCmp(intType,Type::Int32,Cmp_EQ,diff,zero); Opnd * nothingToCopy = irBuilder.genAnd(intType,sameArrays,zeroDiff); irBuilder.genBranch(Type::Int32,Cmp_GT,L1,nothingToCopy,zero);