diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -109,11 +109,14 @@ UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd); - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; UnaryOperator *cloneImpl() const; + int compareInstSpecificProperties(const UnaryOperator *I) const; + public: /// Construct a unary instruction, given the opcode and an operand. @@ -196,11 +199,14 @@ BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd); - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; BinaryOperator *cloneImpl() const; + int compareInstSpecificProperties(const BinaryOperator *I) const; + public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -768,6 +774,12 @@ Value *LHS, Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd); + // Note: Instruction needs to be a friend here to call + // compareInstSpecificProperties. + friend class Instruction; + + int compareInstSpecificProperties(const CmpInst *I) const; + public: // allocate space for exactly two operands void *operator new(size_t s) { diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -645,6 +645,17 @@ /// Instruction *clone() const; + /// Function to introduce total ordering between two Instructions + /// based on their special state. + /// Returns -1 (this instruction < I1) + /// +1 (this instruction > I1) + /// 0 (this instruction = I1) + /// This checks whether instruction specific properties for instructions + /// which have the same Opcode and same number of operands. + /// Flags are used for switching on/off consideration of Alignment and + /// MetaData during IR comparison. + int compareSpecialState(const Instruction *I1, unsigned Flags = 0) const; + /// Return true if the specified instruction is exactly identical to the /// current one. This means that all operands match and any extra information /// (e.g. load is volatile) agree. @@ -659,10 +670,12 @@ /// sometimes useful to ignore certain attributes. enum OperationEquivalenceFlags { /// Check for equivalence ignoring load/store alignment. - CompareIgnoringAlignment = 1<<0, + CompareIgnoringAlignment = 1 << 0, + /// Check for equivalence ignoring MetaData. + CompareIgnoringMetaData = 1 << 1, /// Check for equivalence treating a type and a vector of that type /// as equivalent. - CompareUsingScalarTypes = 1<<1 + CompareUsingScalarTypes = 1 << 2 }; /// This function determines if the specified instruction executes the same diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -51,6 +51,9 @@ class DataLayout; class LLVMContext; +enum class IgnoreAlignment { No, Yes }; +enum class IgnoreMetaData { No, Yes }; + //===----------------------------------------------------------------------===// // AllocaInst Class //===----------------------------------------------------------------------===// @@ -60,11 +63,16 @@ Type *AllocatedType; protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; AllocaInst *cloneImpl() const; + int compareInstSpecificProperties( + const AllocaInst *AI, + IgnoreAlignment CmpIgnoringAlignment = IgnoreAlignment::No) const; + public: explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, Instruction *InsertBefore); @@ -168,11 +176,17 @@ void AssertOK(); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; LoadInst *cloneImpl() const; + int compareInstSpecificProperties( + const LoadInst *LI, + IgnoreAlignment CmpIgnoringAlignment = IgnoreAlignment::No, + IgnoreMetaData CmpIgnoringMetaData = IgnoreMetaData::No) const; + public: LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, Instruction *InsertBefore); @@ -292,11 +306,16 @@ void AssertOK(); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; StoreInst *cloneImpl() const; + int compareInstSpecificProperties( + const StoreInst *SI, + IgnoreAlignment CmpIgnoringAlignment = IgnoreAlignment::No) const; + public: StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); @@ -427,11 +446,14 @@ void Init(AtomicOrdering Ordering, SyncScope::ID SSID); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; FenceInst *cloneImpl() const; + int compareInstSpecificProperties(const FenceInst *FI) const; + public: // Ordering may only be Acquire, Release, AcquireRelease, or // SequentiallyConsistent. @@ -505,11 +527,14 @@ SyncScope::ID SSID); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; AtomicCmpXchgInst *cloneImpl() const; + int compareInstSpecificProperties(const AtomicCmpXchgInst *I) const; + public: AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, @@ -664,11 +689,14 @@ /// class AtomicRMWInst : public Instruction { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; AtomicRMWInst *cloneImpl() const; + int compareInstSpecificProperties(const AtomicRMWInst *I) const; + public: /// This enumeration lists the possible modifications atomicrmw can make. In /// the descriptions, 'p' is the pointer to the instruction's memory location, @@ -864,11 +892,14 @@ void init(Value *Ptr, ArrayRef IdxList, const Twine &NameStr); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; GetElementPtrInst *cloneImpl() const; + int compareInstSpecificProperties(const GetElementPtrInst *I) const; + public: static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, ArrayRef IdxList, @@ -1402,11 +1433,16 @@ } protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; CallInst *cloneImpl() const; + int compareInstSpecificProperties( + const CallInst *I, + IgnoreMetaData CmpIgnoringMetaData = IgnoreMetaData::No) const; + public: static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { @@ -1653,11 +1689,14 @@ } protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; SelectInst *cloneImpl() const; + int compareInstSpecificProperties(const SelectInst *I) const; + public: static SelectInst *Create(Value *C, Value *S1, Value *S2, const Twine &NameStr = "", @@ -1725,11 +1764,14 @@ /// class VAArgInst : public UnaryInstruction { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; VAArgInst *cloneImpl() const; + int compareInstSpecificProperties(const VAArgInst *I) const; + public: VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) @@ -1770,11 +1812,14 @@ BasicBlock *InsertAtEnd); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; ExtractElementInst *cloneImpl() const; + int compareInstSpecificProperties(const ExtractElementInst *I) const; + public: static ExtractElementInst *Create(Value *Vec, Value *Idx, const Twine &NameStr = "", @@ -1835,11 +1880,14 @@ BasicBlock *InsertAtEnd); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; InsertElementInst *cloneImpl() const; + int compareInstSpecificProperties(const InsertElementInst *I) const; + public: static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr = "", @@ -1904,11 +1952,14 @@ Constant *ShuffleMaskForBitcode; protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; ShuffleVectorInst *cloneImpl() const; + int compareInstSpecificProperties(const ShuffleVectorInst *I) const; + public: ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr = "", @@ -2239,11 +2290,14 @@ void init(ArrayRef Idxs, const Twine &NameStr); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; ExtractValueInst *cloneImpl() const; + int compareInstSpecificProperties(const ExtractValueInst *I) const; + public: static ExtractValueInst *Create(Value *Agg, ArrayRef Idxs, @@ -2292,9 +2346,7 @@ return (unsigned)Indices.size(); } - bool hasIndices() const { - return true; - } + bool hasIndices() const { return true; } // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { @@ -2359,11 +2411,14 @@ const Twine &NameStr); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; InsertValueInst *cloneImpl() const; + int compareInstSpecificProperties(const InsertValueInst *I) const; + public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -2423,9 +2478,7 @@ return (unsigned)Indices.size(); } - bool hasIndices() const { - return true; - } + bool hasIndices() const { return true; } // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { @@ -2498,11 +2551,14 @@ } protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; PHINode *cloneImpl() const; + int compareInstSpecificProperties(const PHINode *I) const; + // allocHungoffUses - this is more complicated than the generic // User::allocHungoffUses, because we have to allocate Uses for the incoming // values and pointers to the incoming blocks, all in one allocation. @@ -2739,11 +2795,14 @@ void init(unsigned NumReservedValues, const Twine &NameStr); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; LandingPadInst *cloneImpl() const; + int compareInstSpecificProperties(const LandingPadInst *I) const; + public: /// Constructors - NumReservedClauses is a hint for the number of incoming /// clauses that this landingpad will have (use 0 if you really have no idea). @@ -2837,11 +2896,14 @@ explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; ReturnInst *cloneImpl() const; + int compareInstSpecificProperties(const ReturnInst *I) const; + public: static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, Instruction *InsertBefore = nullptr) { @@ -2921,11 +2983,14 @@ void AssertOK(); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; BranchInst *cloneImpl() const; + int compareInstSpecificProperties(const BranchInst *I) const; + public: /// Iterator type that casts an operand to a basic block. /// @@ -3074,11 +3139,14 @@ void growOperands(); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; SwitchInst *cloneImpl() const; + int compareInstSpecificProperties(const SwitchInst *I) const; + public: // -2 static const unsigned DefaultPseudoIndex = static_cast(~0L-1); @@ -3472,11 +3540,14 @@ void growOperands(); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; IndirectBrInst *cloneImpl() const; + int compareInstSpecificProperties(const IndirectBrInst *I) const; + public: /// Iterator type that casts an operand to a basic block. /// @@ -3617,11 +3688,16 @@ } protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; InvokeInst *cloneImpl() const; + int compareInstSpecificProperties( + const InvokeInst *I, + IgnoreMetaData CmpIgnoringMetaData = IgnoreMetaData::No) const; + public: static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef Args, @@ -3825,11 +3901,16 @@ } protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; CallBrInst *cloneImpl() const; + int compareInstSpecificProperties( + const CallBrInst *I, + IgnoreMetaData CmpIgnoringMetaData = IgnoreMetaData::No) const; + public: static CallBrInst *Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, @@ -4036,11 +4117,14 @@ ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; ResumeInst *cloneImpl() const; + int compareInstSpecificProperties(const ResumeInst *I) const; + public: static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { return new(1) ResumeInst(Exn, InsertBefore); @@ -4119,11 +4203,18 @@ void growOperands(unsigned Size); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; + // CatchPadInst needs to be a friend because compareInstSpecificProperties + // accesses a protected method in CatchPadInst class. + friend class CatchPadInst; + CatchSwitchInst *cloneImpl() const; + int compareInstSpecificProperties(const CatchSwitchInst *I) const; + public: static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, @@ -4261,6 +4352,13 @@ // CleanupPadInst Class //===----------------------------------------------------------------------===// class CleanupPadInst : public FuncletPadInst { +protected: + // Note: Instruction needs to be a friend here to call + // compareInstSpecificProperties. + friend class Instruction; + + int compareInstSpecificProperties(const CleanupPadInst *I) const; + private: explicit CleanupPadInst(Value *ParentPad, ArrayRef Args, unsigned Values, const Twine &NameStr, @@ -4302,6 +4400,13 @@ // CatchPadInst Class //===----------------------------------------------------------------------===// class CatchPadInst : public FuncletPadInst { +protected: + // Note: Instruction needs to be a friend here to call + // compareInstSpecificProperties. + friend class Instruction; + + bool compareInstSpecificProperties(const CatchPadInst *I) const; + private: explicit CatchPadInst(Value *CatchSwitch, ArrayRef Args, unsigned Values, const Twine &NameStr, @@ -4360,11 +4465,14 @@ void init(Value *CatchPad, BasicBlock *BB); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; CatchReturnInst *cloneImpl() const; + int compareInstSpecificProperties(const CatchReturnInst *I) const; + public: static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore = nullptr) { @@ -4444,11 +4552,14 @@ void init(Value *CleanupPad, BasicBlock *UnwindBB); protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; CleanupReturnInst *cloneImpl() const; + int compareInstSpecificProperties(const CleanupReturnInst *I) const; + public: static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB = nullptr, @@ -4540,11 +4651,14 @@ /// class UnreachableInst : public Instruction { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; UnreachableInst *cloneImpl() const; + int compareInstSpecificProperties(const UnreachableInst *I) const; + public: explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); @@ -4581,12 +4695,15 @@ /// This class represents a truncation of integer types. class TruncInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical TruncInst TruncInst *cloneImpl() const; + int compareInstSpecificProperties(const TruncInst *I) const; + public: /// Constructor with insert-before-instruction semantics TruncInst( @@ -4598,10 +4715,10 @@ /// Constructor with insert-at-end-of-block semantics TruncInst( - Value *S, ///< The value to be truncated - Type *Ty, ///< The (smaller) type to truncate to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be truncated + Type *Ty, ///< The (smaller) type to truncate to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4620,12 +4737,15 @@ /// This class represents zero extension of integer types. class ZExtInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical ZExtInst ZExtInst *cloneImpl() const; + int compareInstSpecificProperties(const ZExtInst *I) const; + public: /// Constructor with insert-before-instruction semantics ZExtInst( @@ -4636,11 +4756,10 @@ ); /// Constructor with insert-at-end semantics. - ZExtInst( - Value *S, ///< The value to be zero extended - Type *Ty, ///< The type to zero extend to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + ZExtInst(Value *S, ///< The value to be zero extended + Type *Ty, ///< The type to zero extend to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4659,12 +4778,15 @@ /// This class represents a sign extension of integer types. class SExtInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical SExtInst SExtInst *cloneImpl() const; + int compareInstSpecificProperties(const SExtInst *I) const; + public: /// Constructor with insert-before-instruction semantics SExtInst( @@ -4675,11 +4797,10 @@ ); /// Constructor with insert-at-end-of-block semantics - SExtInst( - Value *S, ///< The value to be sign extended - Type *Ty, ///< The type to sign extend to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + SExtInst(Value *S, ///< The value to be sign extended + Type *Ty, ///< The type to sign extend to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4698,12 +4819,15 @@ /// This class represents a truncation of floating point types. class FPTruncInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical FPTruncInst FPTruncInst *cloneImpl() const; + int compareInstSpecificProperties(const FPTruncInst *I) const; + public: /// Constructor with insert-before-instruction semantics FPTruncInst( @@ -4715,10 +4839,10 @@ /// Constructor with insert-before-instruction semantics FPTruncInst( - Value *S, ///< The value to be truncated - Type *Ty, ///< The type to truncate to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be truncated + Type *Ty, ///< The type to truncate to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4737,12 +4861,15 @@ /// This class represents an extension of floating point types. class FPExtInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical FPExtInst FPExtInst *cloneImpl() const; + int compareInstSpecificProperties(const FPExtInst *I) const; + public: /// Constructor with insert-before-instruction semantics FPExtInst( @@ -4754,10 +4881,10 @@ /// Constructor with insert-at-end-of-block semantics FPExtInst( - Value *S, ///< The value to be extended - Type *Ty, ///< The type to extend to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be extended + Type *Ty, ///< The type to extend to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4776,12 +4903,15 @@ /// This class represents a cast unsigned integer to floating point. class UIToFPInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical UIToFPInst UIToFPInst *cloneImpl() const; + int compareInstSpecificProperties(const UIToFPInst *I) const; + public: /// Constructor with insert-before-instruction semantics UIToFPInst( @@ -4793,10 +4923,10 @@ /// Constructor with insert-at-end-of-block semantics UIToFPInst( - Value *S, ///< The value to be converted - Type *Ty, ///< The type to convert to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be converted + Type *Ty, ///< The type to convert to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4815,12 +4945,15 @@ /// This class represents a cast from signed integer to floating point. class SIToFPInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical SIToFPInst SIToFPInst *cloneImpl() const; + int compareInstSpecificProperties(const SIToFPInst *I) const; + public: /// Constructor with insert-before-instruction semantics SIToFPInst( @@ -4832,10 +4965,10 @@ /// Constructor with insert-at-end-of-block semantics SIToFPInst( - Value *S, ///< The value to be converted - Type *Ty, ///< The type to convert to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be converted + Type *Ty, ///< The type to convert to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4854,12 +4987,15 @@ /// This class represents a cast from floating point to unsigned integer class FPToUIInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical FPToUIInst FPToUIInst *cloneImpl() const; + int compareInstSpecificProperties(const FPToUIInst *I) const; + public: /// Constructor with insert-before-instruction semantics FPToUIInst( @@ -4870,11 +5006,10 @@ ); /// Constructor with insert-at-end-of-block semantics - FPToUIInst( - Value *S, ///< The value to be converted - Type *Ty, ///< The type to convert to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< Where to insert the new instruction + FPToUIInst(Value *S, ///< The value to be converted + Type *Ty, ///< The type to convert to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< Where to insert the new instruction ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4893,12 +5028,15 @@ /// This class represents a cast from floating point to signed integer. class FPToSIInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical FPToSIInst FPToSIInst *cloneImpl() const; + int compareInstSpecificProperties(const FPToSIInst *I) const; + public: /// Constructor with insert-before-instruction semantics FPToSIInst( @@ -4910,10 +5048,10 @@ /// Constructor with insert-at-end-of-block semantics FPToSIInst( - Value *S, ///< The value to be converted - Type *Ty, ///< The type to convert to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be converted + Type *Ty, ///< The type to convert to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// Methods for support type inquiry through isa, cast, and dyn_cast: @@ -4931,6 +5069,13 @@ /// This class represents a cast from an integer to a pointer. class IntToPtrInst : public CastInst { +protected: + // Note: Instruction needs to be a friend here to call + // compareInstSpecificProperties. + friend class Instruction; + + int compareInstSpecificProperties(const IntToPtrInst *I) const; + public: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; @@ -4975,12 +5120,15 @@ /// This class represents a cast from a pointer to an integer. class PtrToIntInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical PtrToIntInst. PtrToIntInst *cloneImpl() const; + int compareInstSpecificProperties(const PtrToIntInst *I) const; + public: /// Constructor with insert-before-instruction semantics PtrToIntInst( @@ -5026,12 +5174,15 @@ /// This class represents a no-op cast from one type to another. class BitCastInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical BitCastInst. BitCastInst *cloneImpl() const; + int compareInstSpecificProperties(const BitCastInst *I) const; + public: /// Constructor with insert-before-instruction semantics BitCastInst( @@ -5043,10 +5194,10 @@ /// Constructor with insert-at-end-of-block semantics BitCastInst( - Value *S, ///< The value to be casted - Type *Ty, ///< The type to casted to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be casted + Type *Ty, ///< The type to casted to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -5066,12 +5217,15 @@ /// to another. class AddrSpaceCastInst : public CastInst { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical AddrSpaceCastInst. AddrSpaceCastInst *cloneImpl() const; + int compareInstSpecificProperties(const AddrSpaceCastInst *I) const; + public: /// Constructor with insert-before-instruction semantics AddrSpaceCastInst( @@ -5083,10 +5237,10 @@ /// Constructor with insert-at-end-of-block semantics AddrSpaceCastInst( - Value *S, ///< The value to be casted - Type *Ty, ///< The type to casted to - const Twine &NameStr, ///< A name for the new instruction - BasicBlock *InsertAtEnd ///< The block to insert the instruction into + Value *S, ///< The value to be casted + Type *Ty, ///< The type to casted to + const Twine &NameStr, ///< A name for the new instruction + BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -5177,12 +5331,15 @@ /// value if an operand is either a poison value or an undef value class FreezeInst : public UnaryInstruction { protected: - // Note: Instruction needs to be a friend here to call cloneImpl. + // Note: Instruction needs to be a friend here to call cloneImpl and + // compareInstSpecificProperties. friend class Instruction; /// Clone an identical FreezeInst FreezeInst *cloneImpl() const; + int compareInstSpecificProperties(const FreezeInst *I) const; + public: explicit FreezeInst(Value *S, const Twine &NameStr = "", diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -401,71 +401,249 @@ } } -/// Return true if both instructions have the same special state. This must be -/// kept in sync with FunctionComparator::cmpOperations in -/// lib/Transforms/IPO/MergeFunctions.cpp. -static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2, - bool IgnoreAlignment = false) { - assert(I1->getOpcode() == I2->getOpcode() && +/// Function to introduce total ordering between two Instructions +/// based on the special state. +/// Returns -1 (this instruction < I) +/// +1 (this instruction > I) +/// 0 (this instruction = I) +/// This is used by FunctionComparator::CmpOperations in +/// lib/Transforms/Utils/FunctionComparator.cpp. +/// Flags can be used to switch on/off consideration of +/// Alignment and MetaData in comparison. +int Instruction::compareSpecialState(const Instruction *I1, + unsigned Flags) const { + assert(getOpcode() == I1->getOpcode() && "Can not compare special state of different instructions"); + assert(getNumOperands() == I1->getNumOperands() && + "Can not compare special state of instructions with different number " + "of operands"); - if (const AllocaInst *AI = dyn_cast(I1)) - return AI->getAllocatedType() == cast(I2)->getAllocatedType() && - (AI->getAlignment() == cast(I2)->getAlignment() || - IgnoreAlignment); - if (const LoadInst *LI = dyn_cast(I1)) - return LI->isVolatile() == cast(I2)->isVolatile() && - (LI->getAlignment() == cast(I2)->getAlignment() || - IgnoreAlignment) && - LI->getOrdering() == cast(I2)->getOrdering() && - LI->getSyncScopeID() == cast(I2)->getSyncScopeID(); - if (const StoreInst *SI = dyn_cast(I1)) - return SI->isVolatile() == cast(I2)->isVolatile() && - (SI->getAlignment() == cast(I2)->getAlignment() || - IgnoreAlignment) && - SI->getOrdering() == cast(I2)->getOrdering() && - SI->getSyncScopeID() == cast(I2)->getSyncScopeID(); - if (const CmpInst *CI = dyn_cast(I1)) - return CI->getPredicate() == cast(I2)->getPredicate(); - if (const CallInst *CI = dyn_cast(I1)) - return CI->isTailCall() == cast(I2)->isTailCall() && - CI->getCallingConv() == cast(I2)->getCallingConv() && - CI->getAttributes() == cast(I2)->getAttributes() && - CI->hasIdenticalOperandBundleSchema(*cast(I2)); - if (const InvokeInst *CI = dyn_cast(I1)) - return CI->getCallingConv() == cast(I2)->getCallingConv() && - CI->getAttributes() == cast(I2)->getAttributes() && - CI->hasIdenticalOperandBundleSchema(*cast(I2)); - if (const CallBrInst *CI = dyn_cast(I1)) - return CI->getCallingConv() == cast(I2)->getCallingConv() && - CI->getAttributes() == cast(I2)->getAttributes() && - CI->hasIdenticalOperandBundleSchema(*cast(I2)); - if (const InsertValueInst *IVI = dyn_cast(I1)) - return IVI->getIndices() == cast(I2)->getIndices(); - if (const ExtractValueInst *EVI = dyn_cast(I1)) - return EVI->getIndices() == cast(I2)->getIndices(); - if (const FenceInst *FI = dyn_cast(I1)) - return FI->getOrdering() == cast(I2)->getOrdering() && - FI->getSyncScopeID() == cast(I2)->getSyncScopeID(); - if (const AtomicCmpXchgInst *CXI = dyn_cast(I1)) - return CXI->isVolatile() == cast(I2)->isVolatile() && - CXI->isWeak() == cast(I2)->isWeak() && - CXI->getSuccessOrdering() == - cast(I2)->getSuccessOrdering() && - CXI->getFailureOrdering() == - cast(I2)->getFailureOrdering() && - CXI->getSyncScopeID() == - cast(I2)->getSyncScopeID(); - if (const AtomicRMWInst *RMWI = dyn_cast(I1)) - return RMWI->getOperation() == cast(I2)->getOperation() && - RMWI->isVolatile() == cast(I2)->isVolatile() && - RMWI->getOrdering() == cast(I2)->getOrdering() && - RMWI->getSyncScopeID() == cast(I2)->getSyncScopeID(); - if (const ShuffleVectorInst *SVI = dyn_cast(I1)) - return SVI->getShuffleMask() == - cast(I2)->getShuffleMask(); - - return true; + IgnoreAlignment CmpIgnoringAlignment = (Flags & CompareIgnoringAlignment) != 0 + ? IgnoreAlignment::Yes + : IgnoreAlignment::No; + IgnoreMetaData CmpIgnoringMetaData = (Flags & CompareIgnoringMetaData) != 0 + ? IgnoreMetaData::Yes + : IgnoreMetaData::No; + + switch (getOpcode()) { + default: + // Trying to compare an instruction whose Opcode is unknown to this + // function. This leads to failure when new instructions are added + // but this function is not updated. + llvm_unreachable("Instructions to compare are unknown"); + case Instruction::Alloca: { + const AllocaInst *AI = cast(this); + return AI->compareInstSpecificProperties(cast(I1), + CmpIgnoringAlignment); + } + case Instruction::Load: { + const LoadInst *LI = cast(this); + return LI->compareInstSpecificProperties( + cast(I1), CmpIgnoringAlignment, CmpIgnoringMetaData); + } + case Instruction::Store: { + const StoreInst *SI = cast(this); + return SI->compareInstSpecificProperties(cast(I1), + CmpIgnoringAlignment); + } + case Instruction::ICmp: + case Instruction::FCmp: { + const CmpInst *C = cast(this); + return C->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Call: { + const CallInst *CI = cast(this); + return CI->compareInstSpecificProperties(cast(I1), + CmpIgnoringMetaData); + } + case Instruction::Invoke: { + const InvokeInst *II = cast(this); + return II->compareInstSpecificProperties(cast(I1), + CmpIgnoringMetaData); + } + case Instruction::CallBr: { + const CallBrInst *CBI = cast(this); + return CBI->compareInstSpecificProperties(cast(I1), + CmpIgnoringMetaData); + } + case Instruction::InsertValue: { + const InsertValueInst *IVI = cast(this); + return IVI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::ExtractValue: { + const ExtractValueInst *EVI = cast(this); + return EVI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Fence: { + const FenceInst *FI = cast(this); + return FI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::AtomicCmpXchg: { + const AtomicCmpXchgInst *CXI = cast(this); + return CXI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::AtomicRMW: { + const AtomicRMWInst *RMWI = cast(this); + return RMWI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::ShuffleVector: { + const ShuffleVectorInst *SVI = cast(this); + return SVI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Ret: { + const ReturnInst *RI = cast(this); + return RI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Br: { + const BranchInst *BI = cast(this); + return BI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Switch: { + const SwitchInst *SI = cast(this); + return SI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::IndirectBr: { + const IndirectBrInst *IBI = cast(this); + return IBI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Resume: { + const ResumeInst *RI = cast(this); + return RI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::CatchSwitch: { + const CatchSwitchInst *CSI = cast(this); + return CSI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::CatchRet: { + const CatchReturnInst *CRI = cast(this); + return CRI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::CleanupRet: { + const CleanupReturnInst *CRI = cast(this); + return CRI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Unreachable: { + const UnreachableInst *UI = cast(this); + return UI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::FNeg: { + const UnaryOperator *UOP = cast(this); + return UOP->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + const BinaryOperator *BOP = cast(this); + return BOP->compareInstSpecificProperties(cast(I1)); + } + case Instruction::GetElementPtr: { + const GetElementPtrInst *GEP = cast(this); + return GEP->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Trunc: { + const TruncInst *TI = cast(this); + return TI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::ZExt: { + const ZExtInst *ZEI = cast(this); + return ZEI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::SExt: { + const SExtInst *SEI = cast(this); + return SEI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::FPToUI: { + const FPToUIInst *FUI = cast(this); + return FUI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::FPToSI: { + const FPToSIInst *FSI = cast(this); + return FSI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::UIToFP: { + const UIToFPInst *UFI = cast(this); + return UFI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::SIToFP: { + const SIToFPInst *SFI = cast(this); + return SFI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::FPTrunc: { + const FPTruncInst *FTI = cast(this); + return FTI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::FPExt: { + const FPExtInst *FEI = cast(this); + return FEI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::PtrToInt: { + const PtrToIntInst *PII = cast(this); + return PII->compareInstSpecificProperties(cast(I1)); + } + case Instruction::IntToPtr: { + const IntToPtrInst *IPI = cast(this); + return IPI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::BitCast: { + const BitCastInst *BI = cast(this); + return BI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::AddrSpaceCast: { + const AddrSpaceCastInst *ACI = cast(this); + return ACI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::CleanupPad: { + const CleanupPadInst *CPI = cast(this); + return CPI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::CatchPad: { + const CatchPadInst *CPI = cast(this); + return CPI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::PHI: { + const PHINode *PI = cast(this); + return PI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Select: { + const SelectInst *SI = cast(this); + return SI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::VAArg: { + const VAArgInst *VAI = cast(this); + return VAI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::ExtractElement: { + const ExtractElementInst *EEI = cast(this); + return EEI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::InsertElement: { + const InsertElementInst *IEI = cast(this); + return IEI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::LandingPad: { + const LandingPadInst *LPI = cast(this); + return LPI->compareInstSpecificProperties(cast(I1)); + } + case Instruction::Freeze: { + const FreezeInst *FI = cast(this); + return FI->compareInstSpecificProperties(cast(I1)); + } + } } bool Instruction::isIdenticalTo(const Instruction *I) const { @@ -475,13 +653,12 @@ bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const { if (getOpcode() != I->getOpcode() || - getNumOperands() != I->getNumOperands() || - getType() != I->getType()) + getNumOperands() != I->getNumOperands() || getType() != I->getType()) return false; // If both instructions have no operands, they are identical. if (getNumOperands() == 0 && I->getNumOperands() == 0) - return haveSameSpecialState(this, I); + return compareSpecialState(I, CompareIgnoringMetaData) == 0; // We have two instructions of identical opcode and #operands. Check to see // if all operands are the same. @@ -494,14 +671,13 @@ otherPHI->block_begin()); } - return haveSameSpecialState(this, I); + return compareSpecialState(I, CompareIgnoringMetaData) == 0; } // Keep this in sync with FunctionComparator::cmpOperations in // lib/Transforms/IPO/MergeFunctions.cpp. bool Instruction::isSameOperationAs(const Instruction *I, unsigned flags) const { - bool IgnoreAlignment = flags & CompareIgnoringAlignment; bool UseScalarTypes = flags & CompareUsingScalarTypes; if (getOpcode() != I->getOpcode() || @@ -514,13 +690,13 @@ // We have two instructions of identical opcode and #operands. Check to see // if all operands are the same type for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (UseScalarTypes ? - getOperand(i)->getType()->getScalarType() != - I->getOperand(i)->getType()->getScalarType() : - getOperand(i)->getType() != I->getOperand(i)->getType()) + if (UseScalarTypes + ? getOperand(i)->getType()->getScalarType() != + I->getOperand(i)->getType()->getScalarType() + : getOperand(i)->getType() != I->getOperand(i)->getType()) return false; - return haveSameSpecialState(this, I, IgnoreAlignment); + return compareSpecialState(I, flags | CompareIgnoringMetaData) == 0; } bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -45,6 +45,233 @@ using namespace llvm; +// Functions for comparision of basic types used for +// introducing total ordering between IR instructions. +static int compareIntegers(uint64_t L, uint64_t R) { + if (L < R) + return -1; + if (L > R) + return 1; + return 0; +} + +static int compareBools(bool L, bool R) { + if (L == R) + return 0; + if (L) + return 1; + return -1; +} + +static int compareOrderings(AtomicOrdering L, AtomicOrdering R) { + if ((int)L < (int)R) + return -1; + if ((int)L > (int)R) + return 1; + return 0; +} + +static int compareAPInts(const APInt L, const APInt R) { + if (L.getBitWidth() == R.getBitWidth()) { + if (L.ugt(R)) + return 1; + if (R.ugt(L)) + return -1; + return 0; + } else if (L.getBitWidth() > R.getBitWidth()) { + if (L.ugt(R.zext(L.getBitWidth()))) + return 1; + if (R.zext(L.getBitWidth()).ugt(L)) + return -1; + return 0; + } else { + if (L.zext(R.getBitWidth()).ugt(R)) + return 1; + if (R.ugt(L.zext(L.getBitWidth()))) + return -1; + return 0; + } +} + +static int compareRangeMetadata(const MDNode *L, const MDNode *R) { + if (L == R) + return 0; + if (!L) + return -1; + if (!R) + return 1; + // Range metadata is a sequence of numbers. Make sure they are the same + // sequence. + // TODO: Note that as this is metadata, it is possible to drop and/or merge + // this data when considering functions to merge. Thus this comparison would + // return 0 (i.e. equivalent), but merging would become more complicated + // because the ranges would need to be unioned. It is not likely that + // functions differ ONLY in this metadata if they are actually the same + // function semantically. + if (int Res = compareIntegers(L->getNumOperands(), R->getNumOperands())) + return Res; + for (size_t I = 0; I < L->getNumOperands(); ++I) { + ConstantInt *LLow = mdconst::extract(L->getOperand(I)); + ConstantInt *RLow = mdconst::extract(R->getOperand(I)); + if (int Res = compareAPInts(LLow->getValue(), RLow->getValue())) + return Res; + } + return 0; +} + +static int compareOperandBundlesSchema(const CallBase &LCS, + const CallBase &RCS) { + assert(LCS.getOpcode() == RCS.getOpcode() && "Can't compare otherwise!"); + + if (int Res = compareIntegers(LCS.getNumOperandBundles(), + RCS.getNumOperandBundles())) + return Res; + + for (unsigned I = 0, E = LCS.getNumOperandBundles(); I != E; ++I) { + auto OBL = LCS.getOperandBundleAt(I); + auto OBR = RCS.getOperandBundleAt(I); + + if (int Res = OBL.getTagName().compare(OBR.getTagName())) + return Res; + + if (int Res = compareIntegers(OBL.Inputs.size(), OBR.Inputs.size())) + return Res; + } + + return 0; +} + +static int compareTypes(Type *TyL, Type *TyR) { + if (TyL == TyR) + return 0; + + if (int Res = compareIntegers(TyL->getTypeID(), TyR->getTypeID())) + return Res; + + switch (TyL->getTypeID()) { + default: + llvm_unreachable("Unknown type!"); + case Type::IntegerTyID: + return compareIntegers(cast(TyL)->getBitWidth(), + cast(TyR)->getBitWidth()); + // TyL == TyR would have returned true earlier, because types are uniqued. + case Type::VoidTyID: + case Type::FloatTyID: + case Type::DoubleTyID: + case Type::X86_FP80TyID: + case Type::FP128TyID: + case Type::PPC_FP128TyID: + case Type::LabelTyID: + case Type::MetadataTyID: + case Type::TokenTyID: + return 0; + + case Type::PointerTyID: { + PointerType *PTyL = dyn_cast(TyL); + PointerType *PTyR = dyn_cast(TyR); + assert(PTyL && PTyR && "Both types must be pointers here."); + if (PTyL->getAddressSpace() != PTyR->getAddressSpace()) + return compareIntegers(PTyL->getAddressSpace(), PTyR->getAddressSpace()); + // Comparing the element types of both the pointers. + return compareTypes(PTyL->getElementType(), PTyR->getElementType()); + } + + case Type::StructTyID: { + StructType *STyL = cast(TyL); + StructType *STyR = cast(TyR); + if (STyL->getNumElements() != STyR->getNumElements()) + return compareIntegers(STyL->getNumElements(), STyR->getNumElements()); + + if (STyL->isPacked() != STyR->isPacked()) + return compareIntegers(STyL->isPacked(), STyR->isPacked()); + + for (unsigned I = 0, E = STyL->getNumElements(); I != E; ++I) { + if (int Res = + compareTypes(STyL->getElementType(I), STyR->getElementType(I))) + return Res; + } + return 0; + } + + case Type::FunctionTyID: { + FunctionType *FTyL = cast(TyL); + FunctionType *FTyR = cast(TyR); + if (FTyL->getNumParams() != FTyR->getNumParams()) + return compareIntegers(FTyL->getNumParams(), FTyR->getNumParams()); + + if (FTyL->isVarArg() != FTyR->isVarArg()) + return compareIntegers(FTyL->isVarArg(), FTyR->isVarArg()); + + if (int Res = compareTypes(FTyL->getReturnType(), FTyR->getReturnType())) + return Res; + + for (unsigned I = 0, E = FTyL->getNumParams(); I != E; ++I) { + if (int Res = compareTypes(FTyL->getParamType(I), FTyR->getParamType(I))) + return Res; + } + return 0; + } + + case Type::ArrayTyID: { + auto *STyL = cast(TyL); + auto *STyR = cast(TyR); + if (STyL->getNumElements() != STyR->getNumElements()) + return compareIntegers(STyL->getNumElements(), STyR->getNumElements()); + return compareTypes(STyL->getElementType(), STyR->getElementType()); + } + case Type::FixedVectorTyID: + case Type::ScalableVectorTyID: { + auto *STyL = cast(TyL); + auto *STyR = cast(TyR); + if (STyL->getElementCount().Scalable != STyR->getElementCount().Scalable) + return compareIntegers(STyL->getElementCount().Scalable, + STyR->getElementCount().Scalable); + if (STyL->getElementCount().Min != STyR->getElementCount().Min) + return compareIntegers(STyL->getElementCount().Min, + STyR->getElementCount().Min); + return compareTypes(STyL->getElementType(), STyR->getElementType()); + } + } +} + +int compareAttrs(const AttributeList L, const AttributeList R) { + if (int Res = compareIntegers(L.getNumAttrSets(), R.getNumAttrSets())) + return Res; + + for (unsigned I = L.index_begin(), E = L.index_end(); I != E; ++I) { + AttributeSet LAS = L.getAttributes(I); + AttributeSet RAS = R.getAttributes(I); + AttributeSet::iterator LI = LAS.begin(), LE = LAS.end(); + AttributeSet::iterator RI = RAS.begin(), RE = RAS.end(); + for (; LI != LE && RI != RE; ++LI, ++RI) { + Attribute LA = *LI; + Attribute RA = *RI; + if (LA.isTypeAttribute() && RA.isTypeAttribute()) { + if (LA.getKindAsEnum() != RA.getKindAsEnum()) + return compareIntegers(LA.getKindAsEnum(), RA.getKindAsEnum()); + + Type *TyL = LA.getValueAsType(); + Type *TyR = RA.getValueAsType(); + if (TyL && TyR) + return compareTypes(TyL, TyR); + + // Two pointers, at least one null, so the comparison result is + // independent of the value of a real pointer. + return compareIntegers((uint64_t)TyL, (uint64_t)TyR); + } + if (LA < RA) + return -1; + if (RA < LA) + return 1; + } + if (LI != LE) + return 1; + if (RI != RE) + return -1; + } + return 0; +} + //===----------------------------------------------------------------------===// // AllocaInst Class //===----------------------------------------------------------------------===// @@ -90,6 +317,16 @@ return nullptr; } +// Function to introduce total ordering between two Select Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in SelectInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int SelectInst::compareInstSpecificProperties(const SelectInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // PHINode Class //===----------------------------------------------------------------------===// @@ -177,6 +414,14 @@ return true; } +// Function to introduce total ordering between two PHI Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in PHINode class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int PHINode::compareInstSpecificProperties(const PHINode *I) const { return 0; } + //===----------------------------------------------------------------------===// // LandingPadInst Implementation //===----------------------------------------------------------------------===// @@ -243,6 +488,17 @@ getOperandList()[OpNo] = Val; } +// Function to introduce total ordering between two LandingPad Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in LandingPadInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int LandingPadInst::compareInstSpecificProperties( + const LandingPadInst *I) const { + return compareBools(isCleanup(), I->isCleanup()); +} + //===----------------------------------------------------------------------===// // CallBase Implementation //===----------------------------------------------------------------------===// @@ -750,6 +1006,27 @@ return FreeCall; } +// Function to introduce total ordering between two Call Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CallInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CallInst::compareInstSpecificProperties( + const CallInst *I, IgnoreMetaData CmpIgnoringMetaData) const { + if (int Res = compareIntegers(getCallingConv(), I->getCallingConv())) + return Res; + if (int Res = compareAttrs(getAttributes(), I->getAttributes())) + return Res; + if (int Res = compareOperandBundlesSchema(*this, *I)) + return Res; + if (CmpIgnoringMetaData == IgnoreMetaData::No) + if (int Res = compareRangeMetadata(getMetadata(LLVMContext::MD_range), + I->getMetadata(LLVMContext::MD_range))) + return Res; + return compareIntegers(getTailCallKind(), I->getTailCallKind()); +} + //===----------------------------------------------------------------------===// // InvokeInst Implementation //===----------------------------------------------------------------------===// @@ -812,11 +1089,29 @@ return NewII; } - LandingPadInst *InvokeInst::getLandingPadInst() const { return cast(getUnwindDest()->getFirstNonPHI()); } +// Function to introduce total ordering between two Invoke Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in InvokeInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int InvokeInst::compareInstSpecificProperties( + const InvokeInst *I, IgnoreMetaData CmpIgnoringMetaData) const { + if (int Res = compareIntegers(getCallingConv(), I->getCallingConv())) + return Res; + if (int Res = compareAttrs(getAttributes(), I->getAttributes())) + return Res; + if (CmpIgnoringMetaData == IgnoreMetaData::No) + if (int Res = compareRangeMetadata(getMetadata(LLVMContext::MD_range), + I->getMetadata(LLVMContext::MD_range))) + return Res; + return compareOperandBundlesSchema(*this, *I); +} + //===----------------------------------------------------------------------===// // CallBrInst Implementation //===----------------------------------------------------------------------===// @@ -896,6 +1191,25 @@ return NewCBI; } +// Function to introduce total ordering between two CallBr Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CallBrInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CallBrInst::compareInstSpecificProperties( + const CallBrInst *I, IgnoreMetaData CmpIgnoringMetaData) const { + if (int Res = compareIntegers(getCallingConv(), I->getCallingConv())) + return Res; + if (int Res = compareAttrs(getAttributes(), I->getAttributes())) + return Res; + if (CmpIgnoringMetaData == IgnoreMetaData::No) + if (int Res = compareRangeMetadata(getMetadata(LLVMContext::MD_range), + I->getMetadata(LLVMContext::MD_range))) + return Res; + return compareOperandBundlesSchema(*this, *I); +} + //===----------------------------------------------------------------------===// // ReturnInst Implementation //===----------------------------------------------------------------------===// @@ -929,6 +1243,16 @@ : Instruction(Type::getVoidTy(Context), Instruction::Ret, OperandTraits::op_end(this), 0, InsertAtEnd) {} +// Function to introduce total ordering between two Return Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ReturnInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int ReturnInst::compareInstSpecificProperties(const ReturnInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // ResumeInst Implementation //===----------------------------------------------------------------------===// @@ -951,6 +1275,16 @@ Op<0>() = Exn; } +// Function to introduce total ordering between two Resume Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ResumeInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int ResumeInst::compareInstSpecificProperties(const ResumeInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // CleanupReturnInst Implementation //===----------------------------------------------------------------------===// @@ -993,6 +1327,17 @@ init(CleanupPad, UnwindBB); } +// Function to introduce total ordering between two CleanupReturn Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CleanupReturnInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CleanupReturnInst::compareInstSpecificProperties( + const CleanupReturnInst *I) const { + return compareBools(unwindsToCaller(), I->unwindsToCaller()); +} + //===----------------------------------------------------------------------===// // CatchReturnInst Implementation //===----------------------------------------------------------------------===// @@ -1024,6 +1369,17 @@ init(CatchPad, BB); } +// Function to introduce total ordering between two CatchReturn Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CatchReturnInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CatchReturnInst::compareInstSpecificProperties( + const CatchReturnInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // CatchSwitchInst Implementation //===----------------------------------------------------------------------===// @@ -1107,6 +1463,17 @@ setNumHungOffUseOperands(getNumOperands() - 1); } +// Function to introduce total ordering between two CatchSwitch Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CatchSwitchInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CatchSwitchInst::compareInstSpecificProperties( + const CatchSwitchInst *I) const { + return compareBools(unwindsToCaller(), I->unwindsToCaller()); +} + //===----------------------------------------------------------------------===// // FuncletPadInst Implementation //===----------------------------------------------------------------------===// @@ -1157,6 +1524,17 @@ : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr, 0, InsertAtEnd) {} +// Function to introduce total ordering between two Unreachable Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in UnreachableInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int UnreachableInst::compareInstSpecificProperties( + const UnreachableInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // BranchInst Implementation //===----------------------------------------------------------------------===// @@ -1230,6 +1608,16 @@ swapProfMetadata(); } +// Function to introduce total ordering between two Branch Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in BranchInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int BranchInst::compareInstSpecificProperties(const BranchInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // AllocaInst Implementation //===----------------------------------------------------------------------===// @@ -1315,13 +1703,28 @@ /// into the prolog/epilog code, so it is basically free. bool AllocaInst::isStaticAlloca() const { // Must be constant size. - if (!isa(getArraySize())) return false; + if (!isa(getArraySize())) + return false; // Must be in the entry block. const BasicBlock *Parent = getParent(); return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca(); } +// Function to introduce total ordering between two Alloca Instructions. +// Returns -1 (this instruction < AI) +// +1 (this instruction > AI) +// 0 (this instruction = AI) +// Any addition of operands in AllocaInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int AllocaInst::compareInstSpecificProperties( + const AllocaInst *AI, IgnoreAlignment CmpIgnoringAlignment) const { + if (CmpIgnoringAlignment == IgnoreAlignment::No) + if (int Res = compareIntegers(getAlignment(), AI->getAlignment())) + return Res; + return compareTypes(getAllocatedType(), AI->getAllocatedType()); +} + //===----------------------------------------------------------------------===// // LoadInst Implementation //===----------------------------------------------------------------------===// @@ -1402,6 +1805,29 @@ assert(getAlign() == Align && "Alignment representation error!"); } +// Function to introduce total ordering between two Load Instructions. +// Returns -1 (this instruction < LI) +// +1 (this instruction > LI) +// 0 (this instruction = LI) +// Any addition of operands in LoadInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int LoadInst::compareInstSpecificProperties( + const LoadInst *LI, IgnoreAlignment CmpIgnoringAlignment, + IgnoreMetaData CmpIgnoringMetaData) const { + if (int Res = compareIntegers(isVolatile(), LI->isVolatile())) + return Res; + if (CmpIgnoringAlignment == IgnoreAlignment::No) + if (int Res = compareIntegers(getAlignment(), LI->getAlignment())) + return Res; + if (int Res = compareOrderings(getOrdering(), LI->getOrdering())) + return Res; + if (CmpIgnoringMetaData == IgnoreMetaData::No) + if (int Res = compareRangeMetadata(getMetadata(LLVMContext::MD_range), + LI->getMetadata(LLVMContext::MD_range))) + return Res; + return compareIntegers(getSyncScopeID(), LI->getSyncScopeID()); +} + //===----------------------------------------------------------------------===// // StoreInst Implementation //===----------------------------------------------------------------------===// @@ -1481,6 +1907,24 @@ assert(getAlign() == Alignment && "Alignment representation error!"); } +// Function to introduce total ordering between two Store Instructions. +// Returns -1 (this instruction < SI) +// +1 (this instruction > SI) +// 0 (this instruction = SI) +// Any addition of operands in StoreInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int StoreInst::compareInstSpecificProperties( + const StoreInst *SI, IgnoreAlignment CmpIgnoringAlignment) const { + if (int Res = compareIntegers(isVolatile(), SI->isVolatile())) + return Res; + if (CmpIgnoringAlignment == IgnoreAlignment::No) + if (int Res = compareIntegers(getAlignment(), SI->getAlignment())) + return Res; + if (int Res = compareOrderings(getOrdering(), SI->getOrdering())) + return Res; + return compareIntegers(getSyncScopeID(), SI->getSyncScopeID()); +} + //===----------------------------------------------------------------------===// // AtomicCmpXchgInst Implementation //===----------------------------------------------------------------------===// @@ -1542,6 +1986,25 @@ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID); } +// Function to introduce total ordering between two AtomicCmpXchg Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in AtomicCmpXchgInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int AtomicCmpXchgInst::compareInstSpecificProperties( + const AtomicCmpXchgInst *I) const { + if (int Res = compareIntegers(isVolatile(), I->isVolatile())) + return Res; + if (int Res = compareIntegers(isWeak(), I->isWeak())) + return Res; + if (int Res = compareOrderings(getSuccessOrdering(), I->getSuccessOrdering())) + return Res; + if (int Res = compareOrderings(getFailureOrdering(), I->getFailureOrdering())) + return Res; + return compareIntegers(getSyncScopeID(), I->getSyncScopeID()); +} + //===----------------------------------------------------------------------===// // AtomicRMWInst Implementation //===----------------------------------------------------------------------===// @@ -1623,26 +2086,52 @@ llvm_unreachable("invalid atomicrmw operation"); } +// Function to introduce total ordering between two AtomicRMW Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in AtomicRMWInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int AtomicRMWInst::compareInstSpecificProperties(const AtomicRMWInst *I) const { + if (int Res = compareIntegers(getOperation(), I->getOperation())) + return Res; + if (int Res = compareIntegers(isVolatile(), I->isVolatile())) + return Res; + if (int Res = compareOrderings(getOrdering(), I->getOrdering())) + return Res; + return compareIntegers(getSyncScopeID(), I->getSyncScopeID()); +} + //===----------------------------------------------------------------------===// // FenceInst Implementation //===----------------------------------------------------------------------===// FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SyncScope::ID SSID, - Instruction *InsertBefore) - : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { + SyncScope::ID SSID, Instruction *InsertBefore) + : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) { setOrdering(Ordering); setSyncScopeID(SSID); } FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SyncScope::ID SSID, - BasicBlock *InsertAtEnd) - : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { + SyncScope::ID SSID, BasicBlock *InsertAtEnd) + : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) { setOrdering(Ordering); setSyncScopeID(SSID); } +// Function to introduce total ordering between two Fence Instructions. +// Returns -1 (this instruction < FI) +// +1 (this instruction > FI) +// 0 (this instruction = FI) +// Any addition of operands in FenceInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FenceInst::compareInstSpecificProperties(const FenceInst *FI) const { + if (int Res = compareOrderings(getOrdering(), FI->getOrdering())) + return Res; + return compareIntegers(getSyncScopeID(), FI->getSyncScopeID()); +} + //===----------------------------------------------------------------------===// // GetElementPtrInst Implementation //===----------------------------------------------------------------------===// @@ -1759,6 +2248,17 @@ return cast(this)->accumulateConstantOffset(DL, Offset); } +// Function to introduce total ordering between two GetElementPtr Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in GetElementPtrInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int GetElementPtrInst::compareInstSpecificProperties( + const GetElementPtrInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // ExtractElementInst Implementation //===----------------------------------------------------------------------===// @@ -1778,12 +2278,10 @@ } ExtractElementInst::ExtractElementInst(Value *Val, Value *Index, - const Twine &Name, - BasicBlock *InsertAE) - : Instruction(cast(Val->getType())->getElementType(), - ExtractElement, - OperandTraits::op_begin(this), - 2, InsertAE) { + const Twine &Name, BasicBlock *InsertAE) + : Instruction( + cast(Val->getType())->getElementType(), ExtractElement, + OperandTraits::op_begin(this), 2, InsertAE) { assert(isValidOperands(Val, Index) && "Invalid extractelement instruction operands!"); @@ -1798,6 +2296,17 @@ return true; } +// Function to introduce total ordering between two ExtractElement Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ExtractElementInst class need to be reflected +// here so that the IR comparators stay in sync with the changes to IR. +int ExtractElementInst::compareInstSpecificProperties( + const ExtractElementInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // InsertElementInst Implementation //===----------------------------------------------------------------------===// @@ -1834,7 +2343,7 @@ bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt, const Value *Index) { if (!Vec->getType()->isVectorTy()) - return false; // First operand of insertelement must be vector type. + return false; // First operand of insertelement must be vector type. if (Elt->getType() != cast(Vec->getType())->getElementType()) return false;// Second operand of insertelement must be vector element type. @@ -1844,6 +2353,17 @@ return true; } +// Function to introduce total ordering between two InsertElement Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in InsertElementInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int InsertElementInst::compareInstSpecificProperties( + const InsertElementInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // ShuffleVectorInst Implementation //===----------------------------------------------------------------------===// @@ -2226,6 +2746,25 @@ return isIdentityMaskImpl(getShuffleMask(), NumMaskElts); } +// Function to introduce total ordering between two ShuffleVector Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ShuffleVectorInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int ShuffleVectorInst::compareInstSpecificProperties( + const ShuffleVectorInst *I) const { + ArrayRef LMask = getShuffleMask(); + ArrayRef RMask = I->getShuffleMask(); + if (int Res = compareIntegers(LMask.size(), RMask.size())) + return Res; + for (size_t I = 0, E = LMask.size(); I != E; ++I) { + if (int Res = compareIntegers(LMask[I], RMask[I])) + return Res; + } + return 0; +} + //===----------------------------------------------------------------------===// // InsertValueInst Class //===----------------------------------------------------------------------===// @@ -2241,7 +2780,8 @@ assert(!Idxs.empty() && "InsertValueInst must have at least one index"); assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) == - Val->getType() && "Inserted value must match indexed type!"); + Val->getType() && + "Inserted value must match indexed type!"); Op<0>() = Agg; Op<1>() = Val; @@ -2250,14 +2790,33 @@ } InsertValueInst::InsertValueInst(const InsertValueInst &IVI) - : Instruction(IVI.getType(), InsertValue, - OperandTraits::op_begin(this), 2), - Indices(IVI.Indices) { + : Instruction(IVI.getType(), InsertValue, + OperandTraits::op_begin(this), 2), + Indices(IVI.Indices) { Op<0>() = IVI.getOperand(0); Op<1>() = IVI.getOperand(1); SubclassOptionalData = IVI.SubclassOptionalData; } +// Function to introduce total ordering between two InsertValue Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in InsertValueInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int InsertValueInst::compareInstSpecificProperties( + const InsertValueInst *I) const { + ArrayRef LIndices = getIndices(); + ArrayRef RIndices = I->getIndices(); + if (int Res = compareIntegers(LIndices.size(), RIndices.size())) + return Res; + for (size_t I = 0, E = LIndices.size(); I != E; ++I) { + if (int Res = compareIntegers(LIndices[I], RIndices[I])) + return Res; + } + return 0; +} + //===----------------------------------------------------------------------===// // ExtractValueInst Class //===----------------------------------------------------------------------===// @@ -2307,7 +2866,26 @@ return nullptr; } } - return const_cast(Agg); + return const_cast(Agg); +} + +// Function to introduce total ordering between two ExtractValue Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ExtractValueInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int ExtractValueInst::compareInstSpecificProperties( + const ExtractValueInst *I) const { + ArrayRef LIndices = getIndices(); + ArrayRef RIndices = I->getIndices(); + if (int Res = compareIntegers(LIndices.size(), RIndices.size())) + return Res; + for (size_t I = 0, E = LIndices.size(); I != E; ++I) { + if (int Res = compareIntegers(LIndices[I], RIndices[I])) + return Res; + } + return 0; } //===----------------------------------------------------------------------===// @@ -2363,6 +2941,16 @@ #endif } +// Function to introduce total ordering between two UnaryOperator Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in UnaryOperator class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int UnaryOperator::compareInstSpecificProperties(const UnaryOperator *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // BinaryOperator Class //===----------------------------------------------------------------------===// @@ -2520,15 +3108,15 @@ BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, Instruction *InsertBefore) { Constant *C = Constant::getAllOnesValue(Op->getType()); - return new BinaryOperator(Instruction::Xor, Op, C, - Op->getType(), Name, InsertBefore); + return new BinaryOperator(Instruction::Xor, Op, C, Op->getType(), Name, + InsertBefore); } BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name, BasicBlock *InsertAtEnd) { Constant *AllOnes = Constant::getAllOnesValue(Op->getType()); - return new BinaryOperator(Instruction::Xor, Op, AllOnes, - Op->getType(), Name, InsertAtEnd); + return new BinaryOperator(Instruction::Xor, Op, AllOnes, Op->getType(), Name, + InsertAtEnd); } // Exchange the two operands to this instruction. This instruction is safe to @@ -2542,6 +3130,17 @@ return false; } +// Function to introduce total ordering between two BinaryOperator Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in BinaryOperator class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int BinaryOperator::compareInstSpecificProperties( + const BinaryOperator *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // FPMathOperator Class //===----------------------------------------------------------------------===// @@ -3413,68 +4012,129 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc"); } -ZExtInst::ZExtInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, ZExt, S, Name, InsertBefore) { +// Function to introduce total ordering between two Trunc Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in TruncInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int TruncInst::compareInstSpecificProperties(const TruncInst *I) const { + return 0; +} + +ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, ZExt, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); } -ZExtInst::ZExtInst( - Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd -) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { +ZExtInst::ZExtInst(Value *S, Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : CastInst(Ty, ZExt, S, Name, InsertAtEnd) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt"); } -SExtInst::SExtInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, SExt, S, Name, InsertBefore) { + +// Function to introduce total ordering between two ZExt Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in ZExtInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int ZExtInst::compareInstSpecificProperties(const ZExtInst *I) const { + return 0; +} + +SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, SExt, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); } -SExtInst::SExtInst( - Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd -) : CastInst(Ty, SExt, S, Name, InsertAtEnd) { +SExtInst::SExtInst(Value *S, Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : CastInst(Ty, SExt, S, Name, InsertAtEnd) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt"); } -FPTruncInst::FPTruncInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { +// Function to introduce total ordering between two SExt Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in SExtInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int SExtInst::compareInstSpecificProperties(const SExtInst *I) const { + return 0; +} + +FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, FPTrunc, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); } -FPTruncInst::FPTruncInst( - Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd -) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { +FPTruncInst::FPTruncInst(Value *S, Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc"); } -FPExtInst::FPExtInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, FPExt, S, Name, InsertBefore) { +// Function to introduce total ordering between two FPTrunc Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in FPTruncInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FPTruncInst::compareInstSpecificProperties(const FPTruncInst *I) const { + return 0; +} + +FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, FPExt, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); } -FPExtInst::FPExtInst( - Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd -) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { +FPExtInst::FPExtInst(Value *S, Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : CastInst(Ty, FPExt, S, Name, InsertAtEnd) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt"); } -UIToFPInst::UIToFPInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, UIToFP, S, Name, InsertBefore) { +// Function to introduce total ordering between two FPExt Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in FPExtInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FPExtInst::compareInstSpecificProperties(const FPExtInst *I) const { + return 0; +} + +UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, UIToFP, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); } -UIToFPInst::UIToFPInst( - Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd -) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { +UIToFPInst::UIToFPInst(Value *S, Type *Ty, const Twine &Name, + BasicBlock *InsertAtEnd) + : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP"); } -SIToFPInst::SIToFPInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, SIToFP, S, Name, InsertBefore) { +// Function to introduce total ordering between two UIToFP Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in UIToFPInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int UIToFPInst::compareInstSpecificProperties(const UIToFPInst *I) const { + return 0; +} + +SIToFPInst::SIToFPInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, SIToFP, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); } @@ -3484,9 +4144,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP"); } -FPToUIInst::FPToUIInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, FPToUI, S, Name, InsertBefore) { +// Function to introduce total ordering between two SIToFP Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in SIToFPInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int SIToFPInst::compareInstSpecificProperties(const SIToFPInst *I) const { + return 0; +} + +FPToUIInst::FPToUIInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, FPToUI, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); } @@ -3496,9 +4166,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI"); } -FPToSIInst::FPToSIInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, FPToSI, S, Name, InsertBefore) { +// Function to introduce total ordering between two FPToUI Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in FPToUIInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FPToUIInst::compareInstSpecificProperties(const FPToUIInst *I) const { + return 0; +} + +FPToSIInst::FPToSIInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, FPToSI, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); } @@ -3508,9 +4188,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI"); } -PtrToIntInst::PtrToIntInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { +// Function to introduce total ordering between two FPToSI Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in FPToSIInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FPToSIInst::compareInstSpecificProperties(const FPToSIInst *I) const { + return 0; +} + +PtrToIntInst::PtrToIntInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, PtrToInt, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); } @@ -3520,9 +4210,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt"); } -IntToPtrInst::IntToPtrInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { +// Function to introduce total ordering between two PtrToInt Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in PtrToIntInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int PtrToIntInst::compareInstSpecificProperties(const PtrToIntInst *I) const { + return 0; +} + +IntToPtrInst::IntToPtrInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, IntToPtr, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); } @@ -3532,9 +4232,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr"); } -BitCastInst::BitCastInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, BitCast, S, Name, InsertBefore) { +// Function to introduce total ordering between two IntToPtr Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in IntToPtrInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int IntToPtrInst::compareInstSpecificProperties(const IntToPtrInst *I) const { + return 0; +} + +BitCastInst::BitCastInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, BitCast, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); } @@ -3544,9 +4254,19 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast"); } -AddrSpaceCastInst::AddrSpaceCastInst( - Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore -) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { +// Function to introduce total ordering between two BitCast Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in BitCastInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int BitCastInst::compareInstSpecificProperties(const BitCastInst *I) const { + return 0; +} + +AddrSpaceCastInst::AddrSpaceCastInst(Value *S, Type *Ty, const Twine &Name, + Instruction *InsertBefore) + : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) { assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); } @@ -3556,6 +4276,17 @@ assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast"); } +// Function to introduce total ordering between two AddrSpaceCast Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in AddrSpaceCastInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int AddrSpaceCastInst::compareInstSpecificProperties( + const AddrSpaceCastInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // CmpInst Classes //===----------------------------------------------------------------------===// @@ -3847,10 +4578,19 @@ } bool CmpInst::isTrueWhenEqual(Predicate predicate) { - switch(predicate) { - default: return false; - case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE: - case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true; + switch (predicate) { + default: + return false; + case ICMP_EQ: + case ICMP_UGE: + case ICMP_ULE: + case ICMP_SGE: + case ICMP_SLE: + case FCMP_TRUE: + case FCMP_UEQ: + case FCMP_UGE: + case FCMP_ULE: + return true; } } @@ -3891,6 +4631,16 @@ return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2)); } +// Function to introduce total ordering between two Cmp Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CmpInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CmpInst::compareInstSpecificProperties(const CmpInst *I) const { + return compareIntegers(getPredicate(), I->getPredicate()); +} + //===----------------------------------------------------------------------===// // SwitchInst Implementation //===----------------------------------------------------------------------===// @@ -4114,6 +4864,16 @@ return None; } +// Function to introduce total ordering between two Switch Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in SwitchInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int SwitchInst::compareInstSpecificProperties(const SwitchInst *I) const { + return 0; +} + //===----------------------------------------------------------------------===// // IndirectBrInst Implementation //===----------------------------------------------------------------------===// @@ -4169,46 +4929,109 @@ /// void IndirectBrInst::addDestination(BasicBlock *DestBB) { unsigned OpNo = getNumOperands(); - if (OpNo+1 > ReservedSpace) - growOperands(); // Get more space! + if (OpNo + 1 > ReservedSpace) + growOperands(); // Get more space! // Initialize some new operands. assert(OpNo < ReservedSpace && "Growing didn't work!"); - setNumHungOffUseOperands(OpNo+1); + setNumHungOffUseOperands(OpNo + 1); getOperandList()[OpNo] = DestBB; } /// removeDestination - This method removes the specified successor from the /// indirectbr instruction. void IndirectBrInst::removeDestination(unsigned idx) { - assert(idx < getNumOperands()-1 && "Successor index out of range!"); + assert(idx < getNumOperands() - 1 && "Successor index out of range!"); unsigned NumOps = getNumOperands(); Use *OL = getOperandList(); // Replace this value with the last one. - OL[idx+1] = OL[NumOps-1]; + OL[idx + 1] = OL[NumOps - 1]; // Nuke the last value. - OL[NumOps-1].set(nullptr); - setNumHungOffUseOperands(NumOps-1); + OL[NumOps - 1].set(nullptr); + setNumHungOffUseOperands(NumOps - 1); +} + +// Function to introduce total ordering between two IndirectBr Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in IndirectBrInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int IndirectBrInst::compareInstSpecificProperties( + const IndirectBrInst *I) const { + return 0; } //===----------------------------------------------------------------------===// // FreezeInst Implementation //===----------------------------------------------------------------------===// -FreezeInst::FreezeInst(Value *S, - const Twine &Name, Instruction *InsertBefore) +FreezeInst::FreezeInst(Value *S, const Twine &Name, Instruction *InsertBefore) : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) { setName(Name); } -FreezeInst::FreezeInst(Value *S, - const Twine &Name, BasicBlock *InsertAtEnd) +FreezeInst::FreezeInst(Value *S, const Twine &Name, BasicBlock *InsertAtEnd) : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) { setName(Name); } +// Function to introduce total ordering between two Freeze Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in FreezeInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int FreezeInst::compareInstSpecificProperties(const FreezeInst *I) const { + return 0; +} + +//===----------------------------------------------------------------------===// +// VAArgInst class +//===----------------------------------------------------------------------===// + +// Function to introduce total ordering between two VAArg Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in VAArgInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int VAArgInst::compareInstSpecificProperties(const VAArgInst *I) const { + return 0; +} + +//===----------------------------------------------------------------------===// +// CleanupPadInst class +//===----------------------------------------------------------------------===// + +// Function to introduce total ordering between two CleanupPad Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CleanupPadInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +int CleanupPadInst::compareInstSpecificProperties( + const CleanupPadInst *I) const { + return 0; +} + +//===----------------------------------------------------------------------===// +// CatchPadInst class +//===----------------------------------------------------------------------===// + +// Function to introduce total ordering between two CatchPad Instructions. +// Returns -1 (this instruction < I) +// +1 (this instruction > I) +// 0 (this instruction = I) +// Any addition of operands in CatchPadInst class need to be reflected here +// so that the IR comparators stay in sync with the changes to IR. +bool CatchPadInst::compareInstSpecificProperties(const CatchPadInst *I) const { + const CatchSwitchInst *CSI = getCatchSwitch(); + return CSI->compareInstSpecificProperties(I->getCatchSwitch()); +} + //===----------------------------------------------------------------------===// // cloneImpl() implementations //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp --- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp +++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp @@ -544,127 +544,16 @@ return Res; } - // Check special state that is a part of some instructions. + // Check special state of some instructions which need a different treatment + // compared to that specified in their compareInstSpecificProperties() method. if (const AllocaInst *AI = dyn_cast(L)) { - if (int Res = cmpTypes(AI->getAllocatedType(), - cast(R)->getAllocatedType())) - return Res; - return cmpNumbers(AI->getAlignment(), cast(R)->getAlignment()); - } - if (const LoadInst *LI = dyn_cast(L)) { - if (int Res = cmpNumbers(LI->isVolatile(), cast(R)->isVolatile())) - return Res; - if (int Res = - cmpNumbers(LI->getAlignment(), cast(R)->getAlignment())) - return Res; - if (int Res = - cmpOrderings(LI->getOrdering(), cast(R)->getOrdering())) - return Res; - if (int Res = cmpNumbers(LI->getSyncScopeID(), - cast(R)->getSyncScopeID())) - return Res; - return cmpRangeMetadata( - LI->getMetadata(LLVMContext::MD_range), - cast(R)->getMetadata(LLVMContext::MD_range)); - } - if (const StoreInst *SI = dyn_cast(L)) { - if (int Res = - cmpNumbers(SI->isVolatile(), cast(R)->isVolatile())) - return Res; - if (int Res = - cmpNumbers(SI->getAlignment(), cast(R)->getAlignment())) - return Res; - if (int Res = - cmpOrderings(SI->getOrdering(), cast(R)->getOrdering())) - return Res; - return cmpNumbers(SI->getSyncScopeID(), - cast(R)->getSyncScopeID()); - } - if (const CmpInst *CI = dyn_cast(L)) - return cmpNumbers(CI->getPredicate(), cast(R)->getPredicate()); - if (auto *CBL = dyn_cast(L)) { - auto *CBR = cast(R); - if (int Res = cmpNumbers(CBL->getCallingConv(), CBR->getCallingConv())) - return Res; - if (int Res = cmpAttrs(CBL->getAttributes(), CBR->getAttributes())) - return Res; - if (int Res = cmpOperandBundlesSchema(*CBL, *CBR)) - return Res; - if (const CallInst *CI = dyn_cast(L)) - if (int Res = cmpNumbers(CI->getTailCallKind(), - cast(R)->getTailCallKind())) - return Res; - return cmpRangeMetadata(L->getMetadata(LLVMContext::MD_range), - R->getMetadata(LLVMContext::MD_range)); - } - if (const InsertValueInst *IVI = dyn_cast(L)) { - ArrayRef LIndices = IVI->getIndices(); - ArrayRef RIndices = cast(R)->getIndices(); - if (int Res = cmpNumbers(LIndices.size(), RIndices.size())) - return Res; - for (size_t i = 0, e = LIndices.size(); i != e; ++i) { - if (int Res = cmpNumbers(LIndices[i], RIndices[i])) - return Res; - } - return 0; - } - if (const ExtractValueInst *EVI = dyn_cast(L)) { - ArrayRef LIndices = EVI->getIndices(); - ArrayRef RIndices = cast(R)->getIndices(); - if (int Res = cmpNumbers(LIndices.size(), RIndices.size())) - return Res; - for (size_t i = 0, e = LIndices.size(); i != e; ++i) { - if (int Res = cmpNumbers(LIndices[i], RIndices[i])) - return Res; - } - } - if (const FenceInst *FI = dyn_cast(L)) { if (int Res = - cmpOrderings(FI->getOrdering(), cast(R)->getOrdering())) + cmpNumbers(AI->getAlignment(), cast(R)->getAlignment())) return Res; - return cmpNumbers(FI->getSyncScopeID(), - cast(R)->getSyncScopeID()); - } - if (const AtomicCmpXchgInst *CXI = dyn_cast(L)) { - if (int Res = cmpNumbers(CXI->isVolatile(), - cast(R)->isVolatile())) - return Res; - if (int Res = - cmpNumbers(CXI->isWeak(), cast(R)->isWeak())) - return Res; - if (int Res = - cmpOrderings(CXI->getSuccessOrdering(), - cast(R)->getSuccessOrdering())) - return Res; - if (int Res = - cmpOrderings(CXI->getFailureOrdering(), - cast(R)->getFailureOrdering())) - return Res; - return cmpNumbers(CXI->getSyncScopeID(), - cast(R)->getSyncScopeID()); - } - if (const AtomicRMWInst *RMWI = dyn_cast(L)) { - if (int Res = cmpNumbers(RMWI->getOperation(), - cast(R)->getOperation())) - return Res; - if (int Res = cmpNumbers(RMWI->isVolatile(), - cast(R)->isVolatile())) - return Res; - if (int Res = cmpOrderings(RMWI->getOrdering(), - cast(R)->getOrdering())) - return Res; - return cmpNumbers(RMWI->getSyncScopeID(), - cast(R)->getSyncScopeID()); - } - if (const ShuffleVectorInst *SVI = dyn_cast(L)) { - ArrayRef LMask = SVI->getShuffleMask(); - ArrayRef RMask = cast(R)->getShuffleMask(); - if (int Res = cmpNumbers(LMask.size(), RMask.size())) - return Res; - for (size_t i = 0, e = LMask.size(); i != e; ++i) { - if (int Res = cmpNumbers(LMask[i], RMask[i])) - return Res; - } + // The cmpTypes function differs from that used by + // compareInstSpecificProperties() method. + return cmpTypes(AI->getAllocatedType(), + cast(R)->getAllocatedType()); } if (const PHINode *PNL = dyn_cast(L)) { const PHINode *PNR = cast(R); @@ -677,7 +566,7 @@ return Res; } } - return 0; + return L->compareSpecialState(R); } // Determine whether two GEP operations perform the same underlying arithmetic. diff --git a/llvm/test/Transforms/MergeFunc/gep-base-type.ll b/llvm/test/Transforms/MergeFunc/gep-base-type.ll --- a/llvm/test/Transforms/MergeFunc/gep-base-type.ll +++ b/llvm/test/Transforms/MergeFunc/gep-base-type.ll @@ -25,7 +25,6 @@ ret %struct2* %6 } - define internal %struct1* @Gfunc(%struct1* %P, i64 %i) { ; CHECK-LABEL: @Gfunc( ; CHECK-NEXT: getelementptr @@ -43,4 +42,3 @@ %6 = getelementptr inbounds %"struct1", %"struct1"* %P, i64 %i ret %struct1* %6 } - diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-alloca1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-alloca1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-alloca1.ll @@ -0,0 +1,61 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different allocas are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +%kv1 = type { i32, i32* } +%kv2 = type { i8 } +%kv3 = type { i64, i64* } + +; Size difference. + +; CHECK-LABEL: define void @size1 +; CHECK-NOT: call void @ +define void @size1(i8 *%f) { + %v = alloca %kv1, align 8 + %f_2 = bitcast i8* %f to void (%kv1 *)* + call void %f_2(%kv1 * %v) + call void %f_2(%kv1 * %v) + call void %f_2(%kv1 * %v) + call void %f_2(%kv1 * %v) + ret void +} + +; CHECK-LABEL: define void @size2 +; CHECK-NOT: call void @ +define void @size2(i8 *%f) { + %v = alloca %kv2, align 8 + %f_2 = bitcast i8* %f to void (%kv2 *)* + call void %f_2(%kv2 * %v) + call void %f_2(%kv2 * %v) + call void %f_2(%kv2 * %v) + call void %f_2(%kv2 * %v) + ret void +} + +; Alignment difference. + +; CHECK-LABEL: define void @align1 +; CHECK-NOT: call void @ +define void @align1(i8 *%f) { + %v = alloca %kv3, align 8 + %f_2 = bitcast i8* %f to void (%kv3 *)* + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + ret void +} + +; CHECK-LABEL: define void @align2 +; CHECK-NOT: call void @ +define void @align2(i8 *%f) { + %v = alloca %kv3, align 16 + %f_2 = bitcast i8* %f to void (%kv3 *)* + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + call void %f_2(%kv3 * %v) + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-alloca2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-alloca2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-alloca2.ll @@ -0,0 +1,36 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same allocas are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +%kv1 = type {i8*, i32*} +%kv2 = type {i64*, float*} +%kv3 = type {i64} + +;; Checks if pointer types of different element types, bitwidth of integer types and alignment are considered equal by +;; MergeFunctions and merges both functions + +; CHECK-LABEL: define void @size1 +; CHECK-NOT: call void @ +define void @size1(i8 *%f) { + %v = alloca %kv1, align 8 + %f_2 = bitcast i8* %f to void (%kv1 *)* + call void %f_2(%kv1 * %v) + %u = alloca %kv3, align 16 + %f_3 = bitcast i8* %f to void (%kv3 *)* + call void %f_3(%kv3 * %u) + ret void +} + +; CHECK-LABEL: define void @size2 +; CHECK: call void @ +define void @size2(i8 *%f) { + %v = alloca %kv2, align 8 + %f_2 = bitcast i8* %f to void (%kv2 *)* + call void %f_2(%kv2 * %v) + %u = alloca %kv3, align 16 + %f_3 = bitcast i8* %f to void (%kv3 *)* + call void %f_3(%kv3 * %u) + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw1.ll @@ -0,0 +1,93 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different AtomicRMW Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Operation difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32*) { + %old = atomicrmw add i32* %0, i32 1 acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2(i32*) { + %old = atomicrmw sub i32* %0, i32 1 acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +;; Volatile difference + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3(i32*) { + %old = atomicrmw volatile add i32* %0, i32 1 monotonic + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4(i32*) { + %old = atomicrmw add i32* %0, i32 1 monotonic + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +;; Ordering difference + +; CHECK-LABEL: define void @f5 +; CHECK-NOT: call void @ +define void @f5(i32*) { + %old = atomicrmw sub i32* %0, i32 1 acquire + %dec = sub nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %dec, %val + ret void +} + +; CHECK-LABEL: define void @f6 +; CHECK-NOT: call void @ +define void @f6(i32*) { + %old = atomicrmw sub i32* %0, i32 1 release + %dec = sub nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %dec, %val + ret void +} + +;; SyncScopeID difference + +; CHECK-LABEL: define void @f7 +; CHECK-NOT: call void @ +define void @f7(i32*) { + %old = atomicrmw add i32* %0, i32 1 syncscope("") acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +; CHECK-LABEL: define void @f8 +; CHECK-NOT: call void @ +define void @f8(i32*) { + %old = atomicrmw add i32* %0, i32 1 syncscope("singlethread") acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-atomicrmw2.ll @@ -0,0 +1,28 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same AtomicRMW Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Operation, Volatility, Ordering, SyncScopeID are considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32*) { + %old = atomicrmw volatile add i32* %0, i32 1 syncscope("") acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32*) { + %old = atomicrmw volatile add i32* %0, i32 1 syncscope("") acquire + %inc = add nsw i32 %old, 1 + %val = load i32, i32* %0, align 4 + %eq = icmp eq i32 %inc, %val + ret void +} + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-binOp.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-binOp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-binOp.ll @@ -0,0 +1,55 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same BinaryOperator are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of BinaryOperator Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32 %m, i32 %n, float %u, float %v) { + %1 = add nsw i32 %m, %n + %2 = sub nsw i32 %m, %n + %3 = mul nsw i32 %m, %n + %4 = udiv i32 %m, %n + %5 = sdiv i32 %m, %n + %6 = urem i32 %m, %n + %7 = srem i32 %m, %n + %8 = shl nsw i32 %m, %n + %9 = lshr i32 %m, %n + %10 = ashr i32 %m, %n + %11 = and i32 %m, %n + %12 = or i32 %m, %n + %13 = xor i32 %m, %n + %14 = fadd float %u, %v + %15 = fsub float %u, %v + %16 = fmul float %u, %v + %17 = fdiv float %u, %v + %18 = frem float %u, %v + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32 %m, i32 %n, float %u, float %v) { + %1 = add nsw i32 %m, %n + %2 = sub nsw i32 %m, %n + %3 = mul nsw i32 %m, %n + %4 = udiv i32 %m, %n + %5 = sdiv i32 %m, %n + %6 = urem i32 %m, %n + %7 = srem i32 %m, %n + %8 = shl nsw i32 %m, %n + %9 = lshr i32 %m, %n + %10 = ashr i32 %m, %n + %11 = and i32 %m, %n + %12 = or i32 %m, %n + %13 = xor i32 %m, %n + %14 = fadd float %u, %v + %15 = fsub float %u, %v + %16 = fmul float %u, %v + %17 = fdiv float %u, %v + %18 = frem float %u, %v + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-br.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-br.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-br.ll @@ -0,0 +1,41 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Branch Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; There is no special state being checked for Branch instructions currently. + +; CHECK-LABEL: define i32 @f1 +; CHECK-NOT: call i32 @ +define i32 @f1(i32 %n, i32 %m) nounwind { +entry: + %0 = icmp eq i32 %n, %n + br i1 %0, label %bb1, label %bb2 + +bb1: + br label %bb3 + +bb2: + ret i32 1 + +bb3: + ret i32 0 +} + +; CHECK-LABEL: define i32 @f2 +; CHECK: call i32 @ +define i32 @f2(i32 %n, i32 %m) nounwind { +entry: + %0 = icmp eq i32 %n, %n + br i1 %0, label %bb1, label %bb2 + +bb1: + br label %bb3 + +bb2: + ret i32 1 + +bb3: + ret i32 0 +} \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-call1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-call1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-call1.ll @@ -0,0 +1,130 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different Call Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1() { + %1 = call coldcc i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2() { + %1 = call fastcc i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +;; Parameter Attributes difference + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3() { + %1 = call i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 inreg 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4() { + %1 = call i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 zeroext 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +;; Operand Bundle Schema Difference + +; CHECK-LABEL: define void @f5 +; CHECK-NOT: call void @ +define void @f5() { + %1 = call i8 @dummy(i8 0) ["unknown"(i8 0)] + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f6 +; CHECK-NOT: call void @ +define void @f6() { + %1 = call i8 @dummy(i8 0) ["unknown"(i8 0, i8 1)] + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +;; RangeMetaData Difference + +; CHECK-LABEL: define void @f7 +; CHECK-NOT: call void @ +define void @f7() { + %1 = call i8 @dummy(i8 0), !range !0 + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f8 +; CHECK-NOT: call void @ +define void @f8() { + %1 = call i8 @dummy(i8 0), !range !1 + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +;; TailCallKind Difference + +; CHECK-LABEL: define void @f9 +; CHECK-NOT: call void @ +define void @f9() { + %1 = tail call i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f10 +; CHECK-NOT: call void @ +define void @f10() { + %1 = notail call i8 @dummy(i8 0) + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + %4 = call i8 @dummy(i8 0) + %5 = call i8 @dummy(i8 0) + ret void +} + +!0 = !{i8 0, i8 5} +!1 = !{i8 1, i8 7} + +declare i8 @dummy(i8); \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-call2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-call2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-call2.ll @@ -0,0 +1,30 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Call Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention, Parameter Attributes, OperandBundleSchema, RangeMetaData, TailCallKind are considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1() { + %1 = tail call coldcc i8 @dummy(i8 inreg 0) ["some-bundle"(i8 1)], !range !0 + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2() { + %1 = tail call coldcc i8 @dummy(i8 inreg 0) ["some-bundle"(i8 1)], !range !1 + %2 = call i8 @dummy(i8 0) + %3 = call i8 @dummy(i8 0) + ret void +} + +declare i8 @dummy(i8); + +!0 = !{i8 1, i8 5} +!1 = !{i8 1, i8 5} \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-callbr1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-callbr1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-callbr1.ll @@ -0,0 +1,96 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different CallBr Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr fastcc void asm sideeffect "", "X,X"(i8* blockaddress(@f1, %bb2), i8* blockaddress(@f1, %bb3)) + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void + +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr coldcc void asm sideeffect "", "X,X"(i8* blockaddress(@f2, %bb2), i8* blockaddress(@f2, %bb3)) + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void + +} + +;; OperandBundle Difference + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr void asm sideeffect "", "X,X"(i8* blockaddress(@f3, %bb2), i8* blockaddress(@f3, %bb3)) ["some-bundle"(i32 0)] + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr void asm sideeffect "", "X,X"(i8* blockaddress(@f4, %bb2), i8* blockaddress(@f4, %bb3)) ["unknown"(i32 1)] + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void +} + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-callbr2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-callbr2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-callbr2.ll @@ -0,0 +1,51 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different CallBr Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr fastcc void asm sideeffect "", "X,X"(i8* blockaddress(@f1, %bb2), i8* blockaddress(@f1, %bb3)) ["unknown"(i32 2)] + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void + +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i64 %arg, i64* %arg1, i64 %arg2, i32* %arg3) { +bb: + %tmp5 = or i64 %arg2, %arg + callbr fastcc void asm sideeffect "", "X,X"(i8* blockaddress(@f2, %bb2), i8* blockaddress(@f2, %bb3)) ["unknown"(i32 2)] + to label %bb1 [label %bb2, label %bb3] + +bb1: + br label %bb2 + +bb2: + %tmp8 = trunc i64 %tmp5 to i32 + br label %bb3 + +bb3: + %tmp10 = trunc i64 %tmp5 to i32 + store i32 %tmp10, i32* %arg3 + ret void + +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cast.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cast.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cast.ll @@ -0,0 +1,45 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Cast Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of Cast Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32 %m, i32* %n, double %u, float %v) { + %1 = trunc i32 %m to i16 + %2 = zext i32 %m to i64 + %3 = sext i32 %m to i64 + %4 = fptrunc double %u to float + %5 = fpext float %v to double + %6 = fptoui float %v to i32 + %7 = fptosi double %u to i32 + %8 = uitofp i32 %m to float + %9 = sitofp i32 %m to double + %10 = ptrtoint i32* %n to i8 + %11 = inttoptr i32 %m to i8* + %12 = bitcast i32 %m to <1 x i32> + %13 = addrspacecast i32* %n to i32 addrspace(1)* + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32 %m, i32* %n, double %u, float %v) { + %1 = trunc i32 %m to i16 + %2 = zext i32 %m to i64 + %3 = sext i32 %m to i64 + %4 = fptrunc double %u to float + %5 = fpext float %v to double + %6 = fptoui float %v to i32 + %7 = fptosi double %u to i32 + %8 = uitofp i32 %m to float + %9 = sitofp i32 %m to double + %10 = ptrtoint i32* %n to i8 + %11 = inttoptr i32 %m to i8* + %12 = bitcast i32 %m to <1 x i32> + %13 = addrspacecast i32* %n to i32 addrspace(1)* + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-catchret-cleanuppad-catchpad.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-catchret-cleanuppad-catchpad.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-catchret-cleanuppad-catchpad.ll @@ -0,0 +1,56 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same CatchRet, CleanupPad, CatchPad Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; There is no special state being checked for CatchRet, CleanupPad, CatchPad instructions currently. + +; CHECK-LABEL: define i32 @f1 +; CHECK-NOT: call i32 @ +define i32 @f1() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() + to label %return unwind label %ehcleanup + +ehcleanup: + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %catch.dispatch + +catch.dispatch: + %cs1 = catchswitch within none [label %catch] unwind to caller + +catch: + %1 = catchpad within %cs1 [i8* null, i32 u0x40, i8* null] + catchret from %1 to label %return + +return: + %retval.0 = phi i32 [ 1, %catch ], [ 0, %entry ] + ret i32 %retval.0 +} + +; CHECK-LABEL: define i32 @f2 +; CHECK: call i32 @ +define i32 @f2() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() + to label %return unwind label %ehcleanup + +ehcleanup: + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %catch.dispatch + +catch.dispatch: + %cs1 = catchswitch within none [label %catch] unwind to caller + +catch: + %1 = catchpad within %cs1 [i8* null, i32 u0x40, i8* null] + catchret from %1 to label %return + +return: + %retval.0 = phi i32 [ 1, %catch ], [ 0, %entry ] + ret i32 %retval.0 +} + +declare void @g() +declare i32 @__CxxFrameHandler3(...) \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-catchswitch.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-catchswitch.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-catchswitch.ll @@ -0,0 +1,72 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different CatchSwitch Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Unwinds to caller difference. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + br label %try.cont + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %catch.dispatch + +catch.dispatch: ; preds = %ehcleanup + %cs1 = catchswitch within none [label %catch] unwind label %ehcleanup.1 + +catch: ; preds = %catch.dispatch + %cp2 = catchpad within %cs1 [i8* null, i32 u0x40, i8* null] + catchret from %cp2 to label %catchret.dest + +catchret.dest: ; preds = %catch + br label %try.cont + +try.cont: ; preds = %catchret.dest, %invoke.cont + ret void + +ehcleanup.1: + %cp3 = cleanuppad within none [] + cleanupret from %cp3 unwind to caller +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + br label %try.cont + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %catch.dispatch + +catch.dispatch: ; preds = %ehcleanup + %cs1 = catchswitch within none [label %catch] unwind to caller + +catch: ; preds = %catch.dispatch + %cp2 = catchpad within %cs1 [i8* null, i32 u0x40, i8* null] + catchret from %cp2 to label %catchret.dest + +catchret.dest: ; preds = %catch + br label %try.cont + +try.cont: ; preds = %catchret.dest, %invoke.cont + ret void + +ehcleanup.1: + %cp3 = cleanuppad within none [] + cleanupret from %cp3 unwind to caller +} + +declare void @g() +declare i32 @__CxxFrameHandler3(...) \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret1.ll @@ -0,0 +1,46 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different CleanupRet Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Unwinds to caller difference. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %ehcleanup.1 + +ehcleanup.1: ; preds = %ehcleanup + %1 = cleanuppad within none [] + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind to caller + +ehcleanup.1: ; preds = %ehcleanup + %1 = cleanuppad within none [] + ret void +} + +declare void @g() +declare i32 @__CxxFrameHandler3(...) \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cleanupret2.ll @@ -0,0 +1,47 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same CleanupRet Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Unwinds to caller is considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %ehcleanup.1 + +ehcleanup.1: ; preds = %ehcleanup + %1 = cleanuppad within none [] + cleanupret from %1 unwind to caller +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + invoke void @g() to label %invoke.cont unwind label %ehcleanup + +invoke.cont: ; preds = %entry + ret void + +ehcleanup: ; preds = %entry + %0 = cleanuppad within none [] + cleanupret from %0 unwind label %ehcleanup.1 + +ehcleanup.1: ; preds = %ehcleanup + %1 = cleanuppad within none [] + cleanupret from %1 unwind to caller +} + + +declare void @g() +declare i32 @__CxxFrameHandler3(...) \ No newline at end of file diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg1.ll @@ -0,0 +1,105 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different CmpXChg Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Volatile difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32*) { + %val_success = cmpxchg volatile i32* %0, i32 0, i32 1 acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 acq_rel acquire + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +;; Weak difference + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3(i32*) { + %val_success = cmpxchg weak i32* %0, i32 1, i32 1 acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4(i32*) { + %val_success = cmpxchg i32* %0, i32 1, i32 1 acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +;; Success Ordering difference + +; CHECK-LABEL: define void @f5 +; CHECK-NOT: call void @ +define void @f5(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +; CHECK-LABEL: define void @f6 +; CHECK-NOT: call void @ +define void @f6(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 acquire monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +;; Failure Ordering difference + +; CHECK-LABEL: define i32 @f7 +; CHECK-NOT: call i32 @ +define i32 @f7(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 acq_rel acquire + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret i32 0 +} + +; CHECK-LABEL: define i32 @f8 +; CHECK-NOT: call i32 @ +define i32 @f8(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret i32 0 +} + +;; SyncScopeID difference + +; CHECK-LABEL: define void @f9 +; CHECK-NOT: call void @ +define void @f9(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 syncscope("") acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +; CHECK-LABEL: define void @f10 +; CHECK-NOT: call void @ +define void @f10(i32*) { + %val_success = cmpxchg i32* %0, i32 0, i32 1 syncscope("singlethread") acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpXchg2.ll @@ -0,0 +1,25 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same CmpXChg Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Volatility, isWeak, Success Ordering, Failure Ordering and SyncScopeID are being checked + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32*) { + %val_success = cmpxchg weak i32* %0, i32 0, i32 1 syncscope("") acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32*) { + %val_success = cmpxchg weak i32* %0, i32 0, i32 1 syncscope("") acq_rel monotonic + %value_loaded = extractvalue { i32, i1 } %val_success, 0 + %success = extractvalue { i32, i1 } %val_success, 1 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-cmpinst.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpinst.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-cmpinst.ll @@ -0,0 +1,69 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Cmp Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; The predicate being considered for comparison of Cmp Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32 %m, i32 %n, float %u, float %v) { + %1 = icmp eq i32 %m, %n + %2 = icmp ne i32 %m, %n + %3 = icmp ugt i32 %m, %n + %4 = icmp ult i32 %m, %n + %5 = icmp ule i32 %m, %n + %6 = icmp uge i32 %m, %n + %7 = icmp sgt i32 %m, %n + %8 = icmp sge i32 %m, %n + %9 = icmp slt i32 %m, %n + %10 = icmp sle i32 %m, %n + %11 = fcmp false float %u, %v + %12 = fcmp oeq float %u, %v + %13 = fcmp ogt float %u, %v + %14 = fcmp olt float %u, %v + %15 = fcmp ole float %u, %v + %16 = fcmp one float %u, %v + %17 = fcmp ord float %u, %v + %18 = fcmp ueq float %u, %v + %19 = fcmp ugt float %u, %v + %20 = fcmp uge float %u, %v + %21 = fcmp ult float %u, %v + %22 = fcmp ule float %u, %v + %23 = fcmp une float %u, %v + %24 = fcmp uno float %u, %v + %25 = fcmp true float %u, %v + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32 %m, i32 %n, float %u, float %v) { + %1 = icmp eq i32 %m, %n + %2 = icmp ne i32 %m, %n + %3 = icmp ugt i32 %m, %n + %4 = icmp ult i32 %m, %n + %5 = icmp ule i32 %m, %n + %6 = icmp uge i32 %m, %n + %7 = icmp sgt i32 %m, %n + %8 = icmp sge i32 %m, %n + %9 = icmp slt i32 %m, %n + %10 = icmp sle i32 %m, %n + %11 = fcmp false float %u, %v + %12 = fcmp oeq float %u, %v + %13 = fcmp ogt float %u, %v + %14 = fcmp olt float %u, %v + %15 = fcmp ole float %u, %v + %16 = fcmp one float %u, %v + %17 = fcmp ord float %u, %v + %18 = fcmp ueq float %u, %v + %19 = fcmp ugt float %u, %v + %20 = fcmp uge float %u, %v + %21 = fcmp ult float %u, %v + %22 = fcmp ule float %u, %v + %23 = fcmp une float %u, %v + %24 = fcmp uno float %u, %v + %25 = fcmp true float %u, %v + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-extractelm.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-extractelm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-extractelm.ll @@ -0,0 +1,28 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same ExtractElement insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Currently there is no special state being compared for ExtractElement Instruction. + +; CHECK-LABEL: define i32 @f1 +; CHECK-NOT: call i32 @ +define i32 @f1(<4 x float> %vec) { + %i0 = extractelement <4 x float> %vec, i32 0 + %i1 = extractelement <4 x float> %vec, i32 1 + %i2 = extractelement <4 x float> %vec, i32 2 + %i3 = extractelement <4 x float> %vec, i32 3 + ret i32 0 +} + +; CHECK-LABEL: define i32 @f2 +; CHECK: call i32 @ +define i32 @f2(<4 x float> %vec) { + %i0 = extractelement <4 x float> %vec, i32 0 + %i1 = extractelement <4 x float> %vec, i32 1 + %i2 = extractelement <4 x float> %vec, i32 2 + %i3 = extractelement <4 x float> %vec, i32 3 + ret i32 0 +} + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-extractval1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-extractval1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-extractval1.ll @@ -0,0 +1,45 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different ExtractValue Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Index Size difference. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 0 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 1 + %3 = extractvalue {i8, i32, float, {i8, i16}} %agg, 2 + %4 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 0 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 1 + %3 = extractvalue {i8, i32, float, {i8, i16}} %agg, 2 + %4 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3, 1 + ret void +} + +; Indices difference. + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 0 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3, 0 + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 2 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3, 1 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-extractval2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-extractval2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-extractval2.ll @@ -0,0 +1,29 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same ExtractValue Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Indices are considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 0 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 1 + %3 = extractvalue {i8, i32, float, {i8, i16}} %agg, 2 + %4 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3, 1 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2({i8, i32, float, {i8, i16}} %agg) { + %1 = extractvalue {i8, i32, float, {i8, i16}} %agg, 0 + %2 = extractvalue {i8, i32, float, {i8, i16}} %agg, 1 + %3 = extractvalue {i8, i32, float, {i8, i16}} %agg, 2 + %4 = extractvalue {i8, i32, float, {i8, i16}} %agg, 3, 1 + ret void +} + + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-fence1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-fence1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-fence1.ll @@ -0,0 +1,69 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different Fence Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Ordering difference + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence release + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence acquire + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} + +;; SyncScopeID difference + +; CHECK-LABEL: define void @f3 +; CHECK-NOT: call void @ +define void @f3(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence syncscope("singlethread") release + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} + +; CHECK-LABEL: define void @f4 +; CHECK-NOT: call void @ +define void @f4(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence syncscope("") release + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-fence2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-fence2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-fence2.ll @@ -0,0 +1,37 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Fence Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Ordering and SyncScope are considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence syncscope("") release + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i64 %n) { +entry: + br label %loop +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + fence syncscope("") release + %iv.next = add i64 %iv, 1 + %test = icmp slt i64 %iv, %n + br i1 %test, label %loop, label %exit +exit: + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-fneg.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-fneg.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-fneg.ll @@ -0,0 +1,25 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same FNeg are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of Fneg Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(float %v, <4 x float> %u) { + %1 = fneg float %v + %2 = fneg float %v + %3 = fneg <4 x float> %u + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(float %v, <4 x float> %u) { + %1 = fneg float %v + %2 = fneg float %v + %3 = fneg <4 x float> %u + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-freeze.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-freeze.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-freeze.ll @@ -0,0 +1,25 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Freeze are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of Freeze Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32 %u) { + %1 = freeze i32 %u + %2 = add nsw i32 %u, %u + %3 = freeze i32 %2 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32 %u) { + %1 = freeze i32 %u + %2 = add nsw i32 %u, %u + %3 = freeze i32 %2 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-indirectbr.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-indirectbr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-indirectbr.ll @@ -0,0 +1,57 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same IndirectBr Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of IndirectBr Instructions. + +; CHECK-LABEL: define i32 @f1 +; CHECK-NOT: call i32 @ +define i32 @f1(i32 %val) { +entry: + %c = icmp eq i32 %val, 0 + br i1 %c, label %bb1, label %bb2 + +bb1: + br label %branch.block + + +bb2: + br label %branch.block + +branch.block: + %addr = phi i8* [blockaddress(@f1, %target1), %bb1], [blockaddress(@f1, %target2), %bb2] + indirectbr i8* %addr, [label %target1, label %target2] + +target1: + br label %target2 + +target2: + ret i32 0 +} + +; CHECK-LABEL: define i32 @f2 +; CHECK: call i32 @ +define i32 @f2(i32 %val) { +entry: + %c = icmp eq i32 %val, 0 + br i1 %c, label %bb1, label %bb2 + +bb1: + br label %branch.block + + +bb2: + br label %branch.block + +branch.block: + %addr = phi i8* [blockaddress(@f2, %target1), %bb1], [blockaddress(@f2, %target2), %bb2] + indirectbr i8* %addr, [label %target1, label %target2] + +target1: + br label %target2 + +target2: + ret i32 0 +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-insertelm.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-insertelm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-insertelm.ll @@ -0,0 +1,28 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same InsertElement insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Currently there is no special state being compared for InsertElement Instruction. + +; CHECK-LABEL: define i32 @f1 +; CHECK-NOT: call i32 @ +define i32 @f1(<4 x i32> %vec) { + %i0 = insertelement <4 x i32> %vec, i32 100, i32 0 + %i1 = insertelement <4 x i32> %vec, i32 101, i32 1 + %i2 = insertelement <4 x i32> %vec, i32 102, i32 2 + %i3 = insertelement <4 x i32> %vec, i32 103, i32 3 + ret i32 0 +} + +; CHECK-LABEL: define i32 @f2 +; CHECK: call i32 @ +define i32 @f2(<4 x i32> %vec) { + %i0 = insertelement <4 x i32> %vec, i32 100, i32 0 + %i1 = insertelement <4 x i32> %vec, i32 101, i32 1 + %i2 = insertelement <4 x i32> %vec, i32 102, i32 2 + %i3 = insertelement <4 x i32> %vec, i32 103, i32 3 + ret i32 0 +} + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-insertval1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-insertval1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-insertval1.ll @@ -0,0 +1,27 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different InsertValue Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Indices difference. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1({i8, i32, float, {i8, i16}} %agg) { + %1 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 1, 0 + %2 = insertvalue {i8, i32, float, {i8, i16}} %agg, i32 2, 1 + %3 = insertvalue {i8, i32, float, {i8, i16}} %agg, float 5.0, 2 + %4 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 2, 3, 0 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK-NOT: call void @ +define void @f2({i8, i32, float, {i8, i16}} %agg) { + %1 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 1, 0 + %2 = insertvalue {i8, i32, float, {i8, i16}} %agg, i32 2, 1 + %3 = insertvalue {i8, i32, float, {i8, i16}} %agg, float 5.0, 2 + %4 = insertvalue {i8, i32, float, {i8, i16}} %agg, i16 2, 3, 1 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-insertval2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-insertval2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-insertval2.ll @@ -0,0 +1,29 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same InsertValue Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Indices are considered during comparison + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1({i8, i32, float, {i8, i16}} %agg) { + %1 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 1, 0 + %2 = insertvalue {i8, i32, float, {i8, i16}} %agg, i32 2, 1 + %3 = insertvalue {i8, i32, float, {i8, i16}} %agg, float 5.0, 2 + %4 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 2, 3, 0 + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2({i8, i32, float, {i8, i16}} %agg) { + %1 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 1, 0 + %2 = insertvalue {i8, i32, float, {i8, i16}} %agg, i32 2, 1 + %3 = insertvalue {i8, i32, float, {i8, i16}} %agg, float 5.0, 2 + %4 = insertvalue {i8, i32, float, {i8, i16}} %agg, i8 2, 3, 0 + ret void +} + + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-invoke1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-invoke1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-invoke1.ll @@ -0,0 +1,122 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different Invoke Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention difference + +; CHECK-LABEL: define i8 @f1 +; CHECK-NOT: call i8 @ +define i8 @f1() personality i8* undef { + %out = invoke fastcc i8 @dummy(i8 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f2 +; CHECK-NOT: call i8 @ +define i8 @f2() personality i8* undef { + %out = invoke coldcc i8 @dummy(i8 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +;; Parameter Attributes difference + +; CHECK-LABEL: define i8 @f3 +; CHECK-NOT: call i8 @ +define i8 @f3() personality i8* undef { + %out = invoke i8 @dummy(i8 zeroext 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f4 +; CHECK-NOT: call i8 @ +define i8 @f4() personality i8* undef { + %out = invoke i8 @dummy(i8 inreg 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +;; RangeMetaData Difference + +; CHECK-LABEL: define i8 @f5 +; CHECK-NOT: call i8 @ +define i8 @f5() personality i8* undef { + %out = invoke i8 @dummy(i8 0) to label %next unwind label %lpad, !range !0 + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f6 +; CHECK-NOT: call i8 @ +define i8 @f6() personality i8* undef { + %out = invoke i8 @dummy(i8 0) to label %next unwind label %lpad, !range !1 + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +;; OperandBundleSchema Difference + +; CHECK-LABEL: define i8 @f7 +; CHECK-NOT: call i8 @ +define i8 @f7() personality i8* undef { + %out = invoke i8 @dummy(i8 0) ["some-bundle"(i8 1)] to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f8 +; CHECK-NOT: call i8 @ +define i8 @f8() personality i8* undef { + %out = invoke i8 @dummy(i8 0) ["some-other-bundle"(i8 1)] to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +!0 = !{i8 0, i8 5} +!1 = !{i8 1, i8 7} + +declare i8 @dummy(i8); diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-invoke2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-invoke2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-invoke2.ll @@ -0,0 +1,38 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Invoke Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Calling convention, Parameter Attribues, RangeMetaData, OperandBundleSchema are considered during comparison + +; CHECK-LABEL: define i8 @f1 +; CHECK-NOT: call i8 @ +define i8 @f1() personality i8* undef { + %out = invoke fastcc i8 @dummy(i8 inreg 0) ["some-bundle"(i8 1)] to label %next unwind label %lpad, !range !0 + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f2 +; CHECK: call i8 @ +define i8 @f2() personality i8* undef { + %out = invoke fastcc i8 @dummy(i8 inreg 0) ["some-bundle"(i8 1)] to label %next unwind label %lpad, !range !1 + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +!0 = !{i8 0, i8 5} +!1 = !{i8 0, i8 5} + +declare i8 @dummy(i8); diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-load1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-load1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-load1.ll @@ -0,0 +1,108 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different load Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Volatile difference + +; CHECK-LABEL: define void @load1 +; CHECK-NOT: call void @ +define void @load1() { + %ptr = alloca i32, align 4 + store i32 0, i32* %ptr + %val = load volatile i32, i32* %ptr + ret void +} + +; CHECK-LABEL: define void @load2 +; CHECK-NOT: call void @ +define void @load2() { + %ptr = alloca i32, align 4 + store i32 0, i32* %ptr + %val = load i32, i32* %ptr + ret void +} + +;; Alignment difference + +; CHECK-LABEL: define void @load3 +; CHECK-NOT: call void @ +define void @load3() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load i64, i64* %ptr, align 4 + ret void +} + +; CHECK-LABEL: define void @load4 +; CHECK-NOT: call void @ +define void @load4() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load i64, i64* %ptr, align 8 + ret void +} + +;; Ordering difference + +; CHECK-LABEL: define void @load5 +; CHECK-NOT: call void @ +define void @load5() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load atomic i64, i64* %ptr unordered, align 4 + ret void +} + +; CHECK-LABEL: define void @load6 +; CHECK-NOT: call void @ +define void @load6() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load atomic i64, i64* %ptr acquire, align 4 + ret void +} + +;; MetaData difference + +; CHECK-LABEL: define void @load7 +; CHECK-NOT: call void @ +define void @load7() { + %ptr = alloca i8, align 1 + store i8 0, i8* %ptr + %val = load i8, i8* %ptr, align 1, !range !0 + ret void +} + +; CHECK-LABEL: define void @load8 +; CHECK-NOT: call void @ +define void @load8() { + %ptr = alloca i8, align 1 + store i8 0, i8* %ptr + %val = load i8, i8* %ptr, align 1, !range !1 + ret void +} + +!0 = !{ i8 0, i8 2} +!1 = !{ i8 0, i8 4} + +;; SyncScopeID difference + +; CHECK-LABEL: define void @load9 +; CHECK-NOT: call void @ +define void @load9() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load atomic i64, i64* %ptr syncscope("singlethread") unordered, align 4 + ret void +} + +; CHECK-LABEL: define void @load10 +; CHECK-NOT: call void @ +define void @load10() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr + %val = load atomic i64, i64* %ptr syncscope("") unordered, align 4 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-load2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-load2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-load2.ll @@ -0,0 +1,31 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same load Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Checks if two functions with same load instructions are being merged +;; Volatility, Alignment, Orderings, RangeMetadata, SyncScopeID are being checked + +; CHECK-LABEL: define void @load1 +; CHECK-NOT: call void @ +define void @load1(i8*) { + %ptr = alloca i32, align 4 + store i32 0, i32* %ptr + %val1 = load volatile i32, i32* %ptr, align 4 + %val2 = load atomic i8, i8* %0 syncscope("singlethread") monotonic, align 1, !range !0 + ret void +} + +; CHECK-LABEL: define void @load2 +; CHECK: call void @ +define void @load2(i8*) { + %ptr = alloca i32, align 4 + store i32 0, i32* %ptr + %val1 = load volatile i32, i32* %ptr, align 4 + %val2 = load atomic i8, i8* %0 syncscope("singlethread") monotonic, align 1, !range !0 + ret void +} + +!0 = !{i8 1, i8 3, i8 7, i8 9} +!1 = !{i8 1, i8 3, i8 7, i8 9} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-resume.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-resume.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-resume.ll @@ -0,0 +1,35 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Resume Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being compared for Resume instructions. + +; CHECK-LABEL: define i8 @f1 +; CHECK-NOT: call i8 @ +define i8 @f1() personality i8* undef { + %out = invoke i8 @dummy(i8 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +; CHECK-LABEL: define i8 @f2 +; CHECK: call i8 @ +define i8 @f2() personality i8* undef { + %out = invoke i8 @dummy(i8 0) to label %next unwind label %lpad + +next: + ret i8 %out + +lpad: + %pad = landingpad { i8*, i32 } cleanup + resume { i8*, i32 } zeroinitializer +} + +declare i8 @dummy(i8); diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-ret.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-ret.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-ret.ll @@ -0,0 +1,24 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Ret insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; Currently there is no special state being compared for Ret Instruction. + +; CHECK-LABEL: define i8* @f1 +; CHECK-NOT: call i8* @ +define i8* @f1() { + %v = alloca i8, align 1 + store i8 0, i8* %v, align 1 + ret i8* %v +} + +; CHECK-LABEL: define i8* @f2 +; CHECK: call i8* @ +define i8* @f2() { + %v = alloca i8, align 1 + store i8 0, i8* %v, align 1 + ret i8* %v +} + diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-store1.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-store1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-store1.ll @@ -0,0 +1,77 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two different Store Insts are not treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Volatile difference + +; CHECK-LABEL: define void @store1 +; CHECK-NOT: call void @ +define void @store1() { + %ptr = alloca i32, align 4 + store volatile i32 0, i32* %ptr, align 4 + ret void +} + +; CHECK-LABEL: define void @store2 +; CHECK-NOT: call void @ +define void @store2() { + %ptr = alloca i32, align 4 + store i32 0, i32* %ptr, align 4 + ret void +} + +;; Alignment difference + +; CHECK-LABEL: define void @store3 +; CHECK-NOT: call void @ +define void @store3() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr, align 4 + ret void +} + +; CHECK-LABEL: define void @store4 +; CHECK-NOT: call void @ +define void @store4() { + %ptr = alloca i64, align 4 + store i64 0, i64* %ptr, align 8 + ret void +} + +;; Ordering difference + +; CHECK-LABEL: define void @store5 +; CHECK-NOT: call void @ +define void @store5() { + %ptr = alloca i64, align 4 + store atomic i64 0, i64* %ptr unordered, align 4 + ret void +} + +; CHECK-LABEL: define void @load6 +; CHECK-NOT: call void @ +define void @load6() { + %ptr = alloca i64, align 4 + store atomic i64 0, i64* %ptr monotonic, align 4 + ret void +} + +;; SyncScopeID difference + +; CHECK-LABEL: define void @load9 +; CHECK-NOT: call void @ +define void @load9() { + %ptr = alloca i64, align 4 + store atomic i64 0, i64* %ptr syncscope("singlethread") monotonic, align 4 + ret void +} + +; CHECK-LABEL: define void @load10 +; CHECK-NOT: call void @ +define void @load10() { + %ptr = alloca i64, align 4 + store atomic i64 0, i64* %ptr syncscope("") monotonic, align 4 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-store2.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-store2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-store2.ll @@ -0,0 +1,26 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Store Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Checks if two functions with same Store instructions are being merged +;; Volatility, Alignment, Orderings, SyncScopeID are being checked + +; CHECK-LABEL: define void @store1 +; CHECK-NOT: call void @ +define void @store1(i8*) { + %ptr = alloca i32, align 4 + store volatile i32 0, i32* %ptr + store atomic i8 0, i8* %0 syncscope("singlethread") monotonic, align 1 + ret void +} + +; CHECK-LABEL: define void @store2 +; CHECK: call void @ +define void @store2(i8*) { + %ptr = alloca i32, align 4 + store volatile i32 0, i32* %ptr + store atomic i8 0, i8* %0 syncscope("singlethread") monotonic, align 1 + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-switch.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-switch.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-switch.ll @@ -0,0 +1,45 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Switch Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +;; Currently there is no special state being checked for comparison of Switch Instructions. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32 %val) { + switch i32 %val, label %bb4 [i32 0, label %bb1 + i32 1, label %bb2 + i32 2, label %bb3] +bb1: + br label %bb4 + +bb2: + br label %bb4 + +bb3: + br label %bb4 + +bb4: + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32 %val) { + switch i32 %val, label %bb4 [i32 0, label %bb1 + i32 1, label %bb2 + i32 2, label %bb3] +bb1: + br label %bb4 + +bb2: + br label %bb4 + +bb3: + br label %bb4 + +bb4: + ret void +} diff --git a/llvm/test/Transforms/MergeFunc/ir-cmp-unreachable.ll b/llvm/test/Transforms/MergeFunc/ir-cmp-unreachable.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/MergeFunc/ir-cmp-unreachable.ll @@ -0,0 +1,37 @@ +; RUN: opt -mergefunc -S < %s | FileCheck %s + +;; Make sure that two same Unreachable Insts are treated as equal. + +target datalayout = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" + +; There is no special state being checked for Unreachable Insts currently. + +; CHECK-LABEL: define void @f1 +; CHECK-NOT: call void @ +define void @f1(i32* %p) { +entry: + br label %end + +bb2: + %v2 = load i32, i32* %p, align 4 + %c2 = icmp eq i32 %v2, 0 + unreachable + +end: + ret void +} + +; CHECK-LABEL: define void @f2 +; CHECK: call void @ +define void @f2(i32* %p) { +entry: + br label %end + +bb2: + %v2 = load i32, i32* %p, align 4 + %c2 = icmp eq i32 %v2, 0 + unreachable + +end: + ret void +}