Index: llvm/include/llvm/IR/InstVisitor.h =================================================================== --- llvm/include/llvm/IR/InstVisitor.h +++ llvm/include/llvm/IR/InstVisitor.h @@ -167,7 +167,7 @@ RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);} RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);} RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);} - RetTy visitLoadInst(LoadInst &I) { DELEGATE(UnaryInstruction);} + RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); } RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction);} RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);} RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction);} Index: llvm/include/llvm/IR/Instructions.h =================================================================== --- llvm/include/llvm/IR/Instructions.h +++ llvm/include/llvm/IR/Instructions.h @@ -171,7 +171,7 @@ /// An instruction for reading from memory. This uses the SubclassData field in /// Value to store whether or not the load is volatile. -class LoadInst : public UnaryInstruction { +class LoadInst : public Instruction { using VolatileField = BoolBitfieldElementT<0>; using AlignmentField = AlignmentBitfieldElementT; using OrderingField = AtomicOrderingBitfieldElementT; @@ -207,6 +207,15 @@ Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); + ~LoadInst() { + setLoadInstNumOperands(2); // needed by operator delete + } + // allocate space for exactly two operands + void *operator new(size_t s) { return User::operator new(s, 2); } + + /// Transparently provide more efficient getOperand methods. + DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); + /// Return true if this is a load from a volatile memory location. bool isVolatile() const { return getSubclassData(); } @@ -273,6 +282,14 @@ return getPointerOperandType()->getPointerAddressSpace(); } + bool hasNoaliasProvenanceOperand() const { return getNumOperands() == 2; } + Value *getNoaliasProvenanceOperand() const { + assert(hasNoaliasProvenanceOperand() && "we need a ptr_provenance"); + return getOperand(1); + } + static unsigned getNoaliasProvenanceOperandIndex() { return 1U; } + void setNoaliasProvenanceOperand(Value *Provenance); + void removeNoaliasProvenanceOperand(); // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Load; @@ -295,6 +312,11 @@ SyncScope::ID SSID; }; +template <> +struct OperandTraits : public VariadicOperandTraits {}; + +DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LoadInst, Value) + //===----------------------------------------------------------------------===// // StoreInst Class //===----------------------------------------------------------------------===// @@ -331,10 +353,10 @@ StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); - // allocate space for exactly two operands - void *operator new(size_t s) { - return User::operator new(s, 2); + ~StoreInst() { + setStoreInstNumOperands(3); // needed by operator delete } + void *operator new(size_t s) { return User::operator new(s, 3); } /// Return true if this is a store to a volatile memory location. bool isVolatile() const { return getSubclassData(); } @@ -408,6 +430,14 @@ return getPointerOperandType()->getPointerAddressSpace(); } + bool hasNoaliasProvenanceOperand() const { return getNumOperands() == 3; } + Value *getNoaliasProvenanceOperand() const { + assert(hasNoaliasProvenanceOperand() && "we need a ptr_provenance"); + return getOperand(2); + } + static unsigned getNoaliasProvenanceOperandIndex() { return 2U; } + void setNoaliasProvenanceOperand(Value *Provenance); + void removeNoaliasProvenanceOperand(); // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Store; @@ -431,8 +461,7 @@ }; template <> -struct OperandTraits : public FixedNumOperandTraits { -}; +struct OperandTraits : public VariadicOperandTraits {}; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) Index: llvm/include/llvm/IR/User.h =================================================================== --- llvm/include/llvm/IR/User.h +++ llvm/include/llvm/IR/User.h @@ -209,6 +209,24 @@ NumUserOperands = NumOps; } + /// FIXME: As the number of operands is used to find the start of the + /// allocated memory in operator delete, we need to always think we have 3 + /// operands before delete. + void setStoreInstNumOperands(unsigned NumOps) { + assert((2 <= NumOps) && (NumOps <= 3) && + "StoreInst can only have 2 or 3 operands"); + NumUserOperands = NumOps; + } + + /// FIXME: As the number of operands is used to find the start of the + /// allocated memory in operator delete, we need to always think we have 2 + /// operands before delete. + void setLoadInstNumOperands(unsigned NumOps) { + assert((1 <= NumOps) && (NumOps <= 2) && + "LoadInst can only have 1 or 2 operands"); + NumUserOperands = NumOps; + } + /// Subclasses with hung off uses need to manage the operand count /// themselves. In these instances, the operand count isn't used to find the /// OperandList, so there's no issue in having the operand count change. Index: llvm/lib/IR/AsmWriter.cpp =================================================================== --- llvm/lib/IR/AsmWriter.cpp +++ llvm/lib/IR/AsmWriter.cpp @@ -4160,15 +4160,21 @@ } Out << ", "; TypePrinter.print(I.getType(), Out); - } else if (Operand) { // Print the normal way. + } else if (const auto *LI = dyn_cast(&I)) { + Out << ' '; + TypePrinter.print(LI->getType(), Out); + Out << ", "; + writeOperand(I.getOperand(0), true); + } else if (isa(&I)) { + Out << ' '; + writeOperand(I.getOperand(0), true); + Out << ", "; + writeOperand(I.getOperand(1), true); + } else if (Operand) { // Print the normal way. if (const auto *GEP = dyn_cast(&I)) { Out << ' '; TypePrinter.print(GEP->getSourceElementType(), Out); Out << ','; - } else if (const auto *LI = dyn_cast(&I)) { - Out << ' '; - TypePrinter.print(LI->getType(), Out); - Out << ','; } // PrintAllTypes - Instructions who have operands of all the same type @@ -4178,8 +4184,7 @@ Type *TheType = Operand->getType(); // Select, Store and ShuffleVector always print all types. - if (isa(I) || isa(I) || isa(I) - || isa(I)) { + if (isa(I) || isa(I) || isa(I)) { PrintAllTypes = true; } else { for (unsigned i = 1, E = I.getNumOperands(); i != E; ++i) { @@ -4209,11 +4214,19 @@ if (const LoadInst *LI = dyn_cast(&I)) { if (LI->isAtomic()) writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID()); + if (LI->hasNoaliasProvenanceOperand()) { + Out << ", ptr_provenance "; + writeOperand(LI->getNoaliasProvenanceOperand(), true); + } if (LI->getAlignment()) Out << ", align " << LI->getAlignment(); } else if (const StoreInst *SI = dyn_cast(&I)) { if (SI->isAtomic()) writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID()); + if (SI->hasNoaliasProvenanceOperand()) { + Out << ", ptr_provenance "; + writeOperand(SI->getNoaliasProvenanceOperand(), true); + } if (SI->getAlignment()) Out << ", align " << SI->getAlignment(); } else if (const AtomicCmpXchgInst *CXI = dyn_cast(&I)) { Index: llvm/lib/IR/Instructions.cpp =================================================================== --- llvm/lib/IR/Instructions.cpp +++ llvm/lib/IR/Instructions.cpp @@ -1367,6 +1367,11 @@ "Ptr must have pointer type."); assert(!(isAtomic() && getAlignment() == 0) && "Alignment required for atomic load"); + assert((!hasNoaliasProvenanceOperand() || getOperand(1)) && + "ptr_provenance must be non-null"); + assert((!hasNoaliasProvenanceOperand() || + (getOperand(0)->getType() == getOperand(1)->getType())) && + "ptr_provenance must have the same type as the pointer"); } static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) { @@ -1413,8 +1418,11 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, Align Align, AtomicOrdering Order, SyncScope::ID SSID, Instruction *InsertBef) - : UnaryInstruction(Ty, Load, Ptr, InsertBef) { + : Instruction(Ty, Load, OperandTraits::op_end(this) - 2, 2, + InsertBef) { assert(Ty == cast(Ptr->getType())->getElementType()); + setLoadInstNumOperands(1); + Op<0>() = Ptr; setVolatile(isVolatile); setAlignment(Align); setAtomic(Order, SSID); @@ -1425,7 +1433,10 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAE) - : UnaryInstruction(Ty, Load, Ptr, InsertAE) { + : Instruction(Ty, Load, OperandTraits::op_end(this) - 2, 2, + InsertAE) { + setLoadInstNumOperands(1); + Op<0>() = Ptr; assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); setAlignment(Align); @@ -1434,6 +1445,26 @@ setName(Name); } +void LoadInst::setNoaliasProvenanceOperand(llvm::LoadInst::Value *Provenance) { + assert(Provenance && "Needs a provenance"); + if (!hasNoaliasProvenanceOperand()) { + setLoadInstNumOperands(2); + // shift operands + setOperand(0, getOperand(1)); + } + setOperand(1, Provenance); + AssertOK(); +} + +void LoadInst::removeNoaliasProvenanceOperand() { + assert(hasNoaliasProvenanceOperand() && "nothing to remove"); + // shift operands + setOperand(1, getOperand(0)); + setOperand(0, nullptr); + setLoadInstNumOperands(1); + AssertOK(); +} + //===----------------------------------------------------------------------===// // StoreInst Implementation //===----------------------------------------------------------------------===// @@ -1447,6 +1478,11 @@ && "Ptr must be a pointer to Val type!"); assert(!(isAtomic() && getAlignment() == 0) && "Alignment required for atomic store"); + assert((!hasNoaliasProvenanceOperand() || getOperand(2)) && + "ptr_provenance must be non-null"); + assert((!hasNoaliasProvenanceOperand() || + (getOperand(1)->getType() == getOperand(2)->getType())) && + "ptr_provenance must have the same type as the pointer"); } StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore) @@ -1481,8 +1517,8 @@ AtomicOrdering Order, SyncScope::ID SSID, Instruction *InsertBefore) : Instruction(Type::getVoidTy(val->getContext()), Store, - OperandTraits::op_begin(this), - OperandTraits::operands(this), InsertBefore) { + OperandTraits::op_end(this) - 3, 3, InsertBefore) { + setStoreInstNumOperands(2); Op<0>() = val; Op<1>() = addr; setVolatile(isVolatile); @@ -1495,8 +1531,8 @@ AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd) : Instruction(Type::getVoidTy(val->getContext()), Store, - OperandTraits::op_begin(this), - OperandTraits::operands(this), InsertAtEnd) { + OperandTraits::op_end(this) - 3, 3, InsertAtEnd) { + setStoreInstNumOperands(2); Op<0>() = val; Op<1>() = addr; setVolatile(isVolatile); @@ -1505,6 +1541,29 @@ AssertOK(); } +void StoreInst::setNoaliasProvenanceOperand( + llvm::StoreInst::Value *Provenance) { + assert(Provenance && "Needs a provenance"); + if (!hasNoaliasProvenanceOperand()) { + setStoreInstNumOperands(3); + // shift uses; FIXME: can be made faster ? + setOperand(0, getOperand(1)); + setOperand(1, getOperand(2)); + } + setOperand(2, Provenance); + AssertOK(); +} + +void StoreInst::removeNoaliasProvenanceOperand() { + assert(hasNoaliasProvenanceOperand() && "nothing to remove"); + // make sure 'uses' are updated + setOperand(2, getOperand(1)); + setOperand(1, getOperand(0)); + setOperand(0, nullptr); + + setStoreInstNumOperands(2); + AssertOK(); +} //===----------------------------------------------------------------------===// // AtomicCmpXchgInst Implementation @@ -4307,13 +4366,32 @@ } LoadInst *LoadInst::cloneImpl() const { - return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), - getAlign(), getOrdering(), getSyncScopeID()); + LoadInst *Result = + new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), getAlign(), + getOrdering(), getSyncScopeID()); + // - we must keep the same number of arguments (for vector optimizations) + // - if we duplicate the provenance, we can get into problems with passes + // that don't know how to handle it (Like MergeLoadStoreMotion shows) + // - safe alternative: keep the argument, but map it to undef. + if (hasNoaliasProvenanceOperand()) + Result->setNoaliasProvenanceOperand( + UndefValue::get(getNoaliasProvenanceOperand()->getType())); + return Result; } StoreInst *StoreInst::cloneImpl() const { - return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(), - getOrdering(), getSyncScopeID()); + StoreInst *Result = + new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(), + getOrdering(), getSyncScopeID()); + + // we must keep the same number of arguments (for vector optimizations) + // - if we duplicate the provenance, we can get into problems with passes + // that don't know how to handle it (Like MergeLoadStoreMotion shows) + // - safe alternative: keep the argument, but map it to undef. + if (hasNoaliasProvenanceOperand()) + Result->setNoaliasProvenanceOperand( + UndefValue::get(getNoaliasProvenanceOperand()->getType())); + return Result; } AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const { Index: llvm/unittests/IR/IRBuilderTest.cpp =================================================================== --- llvm/unittests/IR/IRBuilderTest.cpp +++ llvm/unittests/IR/IRBuilderTest.cpp @@ -359,6 +359,31 @@ EXPECT_FALSE(verifyModule(*M)); } +TEST_F(IRBuilderTest, Provenance) { + IRBuilder<> Builder(BB); + + auto *L = Builder.CreateLoad(GV->getValueType(), GV); + EXPECT_TRUE(!L->hasNoaliasProvenanceOperand()); + + auto *S = Builder.CreateStore(L, GV); + EXPECT_TRUE(!S->hasNoaliasProvenanceOperand()); + + L->setNoaliasProvenanceOperand(GV); + EXPECT_TRUE(L->hasNoaliasProvenanceOperand()); + + S->setNoaliasProvenanceOperand(GV); + EXPECT_TRUE(S->hasNoaliasProvenanceOperand()); + + EXPECT_EQ(L->getNoaliasProvenanceOperand(), GV); + EXPECT_EQ(S->getNoaliasProvenanceOperand(), GV); + + L->removeNoaliasProvenanceOperand(); + EXPECT_TRUE(!L->hasNoaliasProvenanceOperand()); + + S->removeNoaliasProvenanceOperand(); + EXPECT_TRUE(!S->hasNoaliasProvenanceOperand()); +} + TEST_F(IRBuilderTest, Lifetime) { IRBuilder<> Builder(BB); AllocaInst *Var1 = Builder.CreateAlloca(Builder.getInt8Ty());