Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -156,6 +156,7 @@ void initializeImplicitNullChecksPass(PassRegistry&); void initializeIndVarSimplifyLegacyPassPass(PassRegistry&); void initializeInductiveRangeCheckEliminationPass(PassRegistry&); +void initializeInferAddressSpacesPass(PassRegistry&); void initializeInferFunctionAttrsLegacyPassPass(PassRegistry&); void initializeInlineCostAnalysisPass(PassRegistry&); void initializeInstCountPass(PassRegistry&); Index: include/llvm/Transforms/Scalar.h =================================================================== --- include/llvm/Transforms/Scalar.h +++ include/llvm/Transforms/Scalar.h @@ -412,6 +412,13 @@ //===----------------------------------------------------------------------===// // +// InferAddressSpaces - Remove addrspacecast instructions. +// +FunctionPass *createInferAddressSpacesPass(); +extern char &InferAddressSpacesID; + +//===----------------------------------------------------------------------===// +// // InstructionSimplifier - Remove redundant instructions. // FunctionPass *createInstructionSimplifierPass(); Index: lib/Target/NVPTX/CMakeLists.txt =================================================================== --- lib/Target/NVPTX/CMakeLists.txt +++ lib/Target/NVPTX/CMakeLists.txt @@ -17,7 +17,6 @@ NVPTXISelDAGToDAG.cpp NVPTXISelLowering.cpp NVPTXImageOptimizer.cpp - NVPTXInferAddressSpaces.cpp NVPTXInstrInfo.cpp NVPTXLowerAggrCopies.cpp NVPTXLowerArgs.cpp Index: lib/Target/NVPTX/NVPTX.h =================================================================== --- lib/Target/NVPTX/NVPTX.h +++ lib/Target/NVPTX/NVPTX.h @@ -45,7 +45,6 @@ llvm::CodeGenOpt::Level OptLevel); ModulePass *createNVPTXAssignValidGlobalNamesPass(); ModulePass *createGenericToNVVMPass(); -FunctionPass *createNVPTXInferAddressSpacesPass(); FunctionPass *createNVVMIntrRangePass(unsigned int SmVersion); FunctionPass *createNVVMReflectPass(); MachineFunctionPass *createNVPTXPrologEpilogPass(); Index: lib/Target/NVPTX/NVPTXTargetMachine.cpp =================================================================== --- lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -50,7 +50,6 @@ void initializeGenericToNVVMPass(PassRegistry&); void initializeNVPTXAllocaHoistingPass(PassRegistry &); void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&); -void initializeNVPTXInferAddressSpacesPass(PassRegistry &); void initializeNVPTXLowerAggrCopiesPass(PassRegistry &); void initializeNVPTXLowerArgsPass(PassRegistry &); void initializeNVPTXLowerAllocaPass(PassRegistry &); @@ -70,7 +69,6 @@ initializeGenericToNVVMPass(PR); initializeNVPTXAllocaHoistingPass(PR); initializeNVPTXAssignValidGlobalNamesPass(PR); - initializeNVPTXInferAddressSpacesPass(PR); initializeNVPTXLowerArgsPass(PR); initializeNVPTXLowerAllocaPass(PR); initializeNVPTXLowerAggrCopiesPass(PR); @@ -190,7 +188,7 @@ // be eliminated by SROA. addPass(createSROAPass()); addPass(createNVPTXLowerAllocaPass()); - addPass(createNVPTXInferAddressSpacesPass()); + addPass(createInferAddressSpacesPass()); } void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() { Index: lib/Transforms/Scalar/CMakeLists.txt =================================================================== --- lib/Transforms/Scalar/CMakeLists.txt +++ lib/Transforms/Scalar/CMakeLists.txt @@ -16,6 +16,7 @@ IVUsersPrinter.cpp InductiveRangeCheckElimination.cpp IndVarSimplify.cpp + InferAddressSpaces.cpp JumpThreading.cpp LICM.cpp LoopAccessAnalysisPrinter.cpp Index: lib/Transforms/Scalar/InferAddressSpaces.cpp =================================================================== --- lib/Transforms/Scalar/InferAddressSpaces.cpp +++ lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -89,7 +89,7 @@ // //===----------------------------------------------------------------------===// -#include "NVPTX.h" +#include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" @@ -103,7 +103,7 @@ #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ValueMapper.h" -#define DEBUG_TYPE "nvptx-infer-addrspace" +#define DEBUG_TYPE "infer-address-spaces" using namespace llvm; @@ -112,8 +112,8 @@ using ValueToAddrSpaceMapTy = DenseMap; -/// \brief NVPTXInferAddressSpaces -class NVPTXInferAddressSpaces: public FunctionPass { +/// \brief InferAddressSpaces +class InferAddressSpaces: public FunctionPass { /// Target specific address space which uses of should be replaced if /// possible. unsigned GenericAddrSpace; @@ -121,7 +121,7 @@ public: static char ID; - NVPTXInferAddressSpaces() : FunctionPass(ID) {} + InferAddressSpaces() : FunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); @@ -162,13 +162,13 @@ }; } // end anonymous namespace -char NVPTXInferAddressSpaces::ID = 0; +char InferAddressSpaces::ID = 0; namespace llvm { -void initializeNVPTXInferAddressSpacesPass(PassRegistry &); +void initializeInferAddressSpacesPass(PassRegistry &); } -INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace", - "Infer address spaces", + +INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", false, false) // Returns true if V is an address expression. @@ -212,9 +212,9 @@ // If V is an unvisited generic address expression, appends V to PostorderStack // and marks it as visited. -void NVPTXInferAddressSpaces::appendsGenericAddressExpressionToPostorderStack( - Value *V, std::vector> *PostorderStack, - DenseSet *Visited) const { +void InferAddressSpaces::appendsGenericAddressExpressionToPostorderStack( + Value *V, std::vector> *PostorderStack, + DenseSet *Visited) const { assert(V->getType()->isPointerTy()); if (isAddressExpression(*V) && V->getType()->getPointerAddressSpace() == GenericAddrSpace) { @@ -226,7 +226,7 @@ // Returns all generic address expressions in function F. The elements are // ordered in postorder. std::vector -NVPTXInferAddressSpaces::collectGenericAddressExpressions(Function &F) const { +InferAddressSpaces::collectGenericAddressExpressions(Function &F) const { // This function implements a non-recursive postorder traversal of a partial // use-def graph of function F. std::vector> PostorderStack; @@ -237,10 +237,10 @@ for (Instruction &I : instructions(F)) { if (isa(I)) { appendsGenericAddressExpressionToPostorderStack( - I.getOperand(0), &PostorderStack, &Visited); + I.getOperand(0), &PostorderStack, &Visited); } else if (isa(I)) { appendsGenericAddressExpressionToPostorderStack( - I.getOperand(1), &PostorderStack, &Visited); + I.getOperand(1), &PostorderStack, &Visited); } } @@ -257,7 +257,7 @@ PostorderStack.back().second = true; for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) { appendsGenericAddressExpressionToPostorderStack( - PtrOperand, &PostorderStack, &Visited); + PtrOperand, &PostorderStack, &Visited); } } return Postorder; @@ -267,16 +267,16 @@ // of OperandUse.get() in the new address space. If the clone is not ready yet, // returns an undef in the new address space as a placeholder. static Value *operandWithNewAddressSpaceOrCreateUndef( - const Use &OperandUse, unsigned NewAddrSpace, - const ValueToValueMapTy &ValueWithNewAddrSpace, - SmallVectorImpl *UndefUsesToFix) { + const Use &OperandUse, unsigned NewAddrSpace, + const ValueToValueMapTy &ValueWithNewAddrSpace, + SmallVectorImpl *UndefUsesToFix) { Value *Operand = OperandUse.get(); if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) return NewOperand; UndefUsesToFix->push_back(&OperandUse); return UndefValue::get( - Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace)); + Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace)); } // Returns a clone of `I` with its operands converted to those specified in @@ -289,11 +289,11 @@ // from a pointer whose type already matches. Therefore, this function returns a // Value* instead of an Instruction*. static Value *cloneInstructionWithNewAddressSpace( - Instruction *I, unsigned NewAddrSpace, - const ValueToValueMapTy &ValueWithNewAddrSpace, - SmallVectorImpl *UndefUsesToFix) { + Instruction *I, unsigned NewAddrSpace, + const ValueToValueMapTy &ValueWithNewAddrSpace, + SmallVectorImpl *UndefUsesToFix) { Type *NewPtrType = - I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); + I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); if (I->getOpcode() == Instruction::AddrSpaceCast) { Value *Src = I->getOperand(0); @@ -313,7 +313,7 @@ NewPointerOperands.push_back(nullptr); else NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef( - OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix)); + OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix)); } switch (I->getOpcode()) { @@ -333,8 +333,8 @@ case Instruction::GetElementPtr: { GetElementPtrInst *GEP = cast(I); GetElementPtrInst *NewGEP = GetElementPtrInst::Create( - GEP->getSourceElementType(), NewPointerOperands[0], - SmallVector(GEP->idx_begin(), GEP->idx_end())); + GEP->getSourceElementType(), NewPointerOperands[0], + SmallVector(GEP->idx_begin(), GEP->idx_end())); NewGEP->setIsInBounds(GEP->isInBounds()); return NewGEP; } @@ -347,10 +347,10 @@ // constant expression `CE` with its operands replaced as specified in // ValueWithNewAddrSpace. static Value *cloneConstantExprWithNewAddressSpace( - ConstantExpr *CE, unsigned NewAddrSpace, - const ValueToValueMapTy &ValueWithNewAddrSpace) { + ConstantExpr *CE, unsigned NewAddrSpace, + const ValueToValueMapTy &ValueWithNewAddrSpace) { Type *TargetType = - CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); + CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace); if (CE->getOpcode() == Instruction::AddrSpaceCast) { // Because CE is generic, the source address space must be specific. @@ -382,8 +382,8 @@ // Needs to specify the source type while constructing a getelementptr // constant expression. return CE->getWithOperands( - NewOperands, TargetType, /*OnlyIfReduced=*/false, - NewOperands[0]->getType()->getPointerElementType()); + NewOperands, TargetType, /*OnlyIfReduced=*/false, + NewOperands[0]->getType()->getPointerElementType()); } return CE->getWithOperands(NewOperands, TargetType); @@ -394,7 +394,7 @@ // expression whose address space needs to be modified, in postorder. // // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix. -Value *NVPTXInferAddressSpaces::cloneValueWithNewAddressSpace( +Value *InferAddressSpaces::cloneValueWithNewAddressSpace( Value *V, unsigned NewAddrSpace, const ValueToValueMapTy &ValueWithNewAddrSpace, SmallVectorImpl *UndefUsesToFix) const { @@ -404,7 +404,7 @@ if (Instruction *I = dyn_cast(V)) { Value *NewV = cloneInstructionWithNewAddressSpace( - I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix); + I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix); if (Instruction *NewI = dyn_cast(NewV)) { if (NewI->getParent() == nullptr) { NewI->insertBefore(I); @@ -415,13 +415,13 @@ } return cloneConstantExprWithNewAddressSpace( - cast(V), NewAddrSpace, ValueWithNewAddrSpace); + cast(V), NewAddrSpace, ValueWithNewAddrSpace); } // Defines the join operation on the address space lattice (see the file header // comments). -unsigned NVPTXInferAddressSpaces::joinAddressSpaces(unsigned AS1, - unsigned AS2) const { +unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1, + unsigned AS2) const { if (AS1 == GenericAddrSpace || AS2 == GenericAddrSpace) return GenericAddrSpace; @@ -434,7 +434,7 @@ return (AS1 == AS2) ? AS1 : GenericAddrSpace; } -bool NVPTXInferAddressSpaces::runOnFunction(Function &F) { +bool InferAddressSpaces::runOnFunction(Function &F) { if (skipFunction(F)) return false; @@ -456,9 +456,9 @@ return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F); } -void NVPTXInferAddressSpaces::inferAddressSpaces( - const std::vector &Postorder, - ValueToAddrSpaceMapTy *InferredAddrSpace) { +void InferAddressSpaces::inferAddressSpaces( + const std::vector &Postorder, + ValueToAddrSpaceMapTy *InferredAddrSpace) { SetVector Worklist(Postorder.begin(), Postorder.end()); // Initially, all expressions are in the uninitialized address space. for (Value *V : Postorder) @@ -500,8 +500,8 @@ } } -Optional NVPTXInferAddressSpaces::updateAddressSpace( - const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) { +Optional InferAddressSpaces::updateAddressSpace( + const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) { assert(InferredAddrSpace.count(&V)); // The new inferred address space equals the join of the address spaces @@ -526,9 +526,9 @@ return NewAS; } -bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces( - const std::vector &Postorder, - const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { +bool InferAddressSpaces::rewriteWithNewAddressSpaces( + const std::vector &Postorder, + const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { // For each address expression to be modified, creates a clone of it with its // pointer operands converted to the new address space. Since the pointer // operands are converted, the clone is naturally in the new address space by @@ -539,7 +539,7 @@ unsigned NewAddrSpace = InferredAddrSpace.lookup(V); if (V->getType()->getPointerAddressSpace() != NewAddrSpace) { ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace( - V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix); + V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix); } } @@ -603,6 +603,6 @@ return true; } -FunctionPass *llvm::createNVPTXInferAddressSpacesPass() { - return new NVPTXInferAddressSpaces(); +FunctionPass *llvm::createInferAddressSpacesPass() { + return new InferAddressSpaces(); } Index: lib/Transforms/Scalar/Scalar.cpp =================================================================== --- lib/Transforms/Scalar/Scalar.cpp +++ lib/Transforms/Scalar/Scalar.cpp @@ -50,6 +50,7 @@ initializeFlattenCFGPassPass(Registry); initializeInductiveRangeCheckEliminationPass(Registry); initializeIndVarSimplifyLegacyPassPass(Registry); + initializeInferAddressSpacesPass(Registry); initializeJumpThreadingPass(Registry); initializeLegacyLICMPassPass(Registry); initializeLegacyLoopSinkPassPass(Registry);