Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -116,9 +116,13 @@ class CodeGenPrepare : public FunctionPass { const TargetMachine *TM; const TargetLowering *TLI; + const TargetRegisterInfo *TRI; const TargetTransformInfo *TTI; const TargetLibraryInfo *TLInfo; + /// Cache UseAA for the current function from the subtarget + bool UseAA; + /// As we scan instructions optimizing them, this is the next instruction /// to optimize. Transforms that can invalidate this should update it. BasicBlock::iterator CurInstIterator; @@ -208,8 +212,12 @@ PromotedInsts.clear(); ModifiedDT = false; - if (TM) - TLI = TM->getSubtargetImpl(F)->getTargetLowering(); + if (TM) { + auto *STI = TM->getSubtargetImpl(F); + TLI = STI->getTargetLowering(); + TRI = STI->getRegisterInfo(); + UseAA = STI->useAA(); + } TLInfo = &getAnalysis().getTLI(); TTI = &getAnalysis().getTTI(F); OptSize = F.optForSize(); @@ -2101,8 +2109,8 @@ /// This encapsulates the logic for matching the target-legal addressing modes. class AddressingModeMatcher { SmallVectorImpl &AddrModeInsts; - const TargetMachine &TM; const TargetLowering &TLI; + const TargetRegisterInfo &TRI; const DataLayout &DL; /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and @@ -2127,14 +2135,13 @@ bool IgnoreProfitability; AddressingModeMatcher(SmallVectorImpl &AMI, - const TargetMachine &TM, Type *AT, unsigned AS, + const TargetLowering *TLI, + const TargetRegisterInfo *TRI, Type *AT, unsigned AS, Instruction *MI, ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT) - : AddrModeInsts(AMI), TM(TM), - TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent()) - ->getTargetLowering()), + : AddrModeInsts(AMI), TLI(*TLI), TRI(*TRI), DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT) { @@ -2149,18 +2156,18 @@ /// optimizations. /// \p PromotedInsts maps the instructions to their type before promotion. /// \p The ongoing transaction where every action should be registered. - static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, - Instruction *MemoryInst, - SmallVectorImpl &AddrModeInsts, - const TargetMachine &TM, - const SetOfInstrs &InsertedInsts, - InstrToOrigTy &PromotedInsts, - TypePromotionTransaction &TPT) { + static ExtAddrMode + Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, + SmallVectorImpl &AddrModeInsts, + const TargetLowering *TLI, const TargetRegisterInfo *TRI, + const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, + TypePromotionTransaction &TPT) { ExtAddrMode Result; - bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS, - MemoryInst, Result, InsertedInsts, - PromotedInsts, TPT).matchAddr(V, 0); + bool Success = + AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, MemoryInst, + Result, InsertedInsts, PromotedInsts, TPT) + .matchAddr(V, 0); (void)Success; assert(Success && "Couldn't select *anything*?"); return Result; } @@ -2983,10 +2990,9 @@ /// Check to see if all uses of OpVal by the specified inline asm call are due /// to memory operands. If so, return true, otherwise return false. static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, - const TargetMachine &TM) { + const TargetLowering *TLI, + const TargetRegisterInfo *TRI) { const Function *F = CI->getParent()->getParent(); - const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering(); - const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo(); TargetLowering::AsmOperandInfoVector TargetConstraints = TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI, ImmutableCallSite(CI)); @@ -3013,7 +3019,8 @@ static bool FindAllMemoryUses( Instruction *I, SmallVectorImpl> &MemoryUses, - SmallPtrSetImpl &ConsideredInsts, const TargetMachine &TM) { + SmallPtrSetImpl &ConsideredInsts, const TargetLowering *TLI, + const TargetRegisterInfo *TRI) { // If we already considered this instruction, we're done. if (!ConsideredInsts.insert(I).second) return false; @@ -3043,12 +3050,12 @@ if (!IA) return true; // If this is a memory operand, we're cool, otherwise bail out. - if (!IsOperandAMemoryOperand(CI, IA, I, TM)) + if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) return true; continue; } - if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM)) + if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI)) return true; } @@ -3136,7 +3143,7 @@ // uses. SmallVector, 16> MemoryUses; SmallPtrSet ConsideredInsts; - if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM)) + if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, &TLI, &TRI)) return false; // Has a non-memory, non-foldable use! // Now that we know that all uses of this instruction are part of a chain of @@ -3163,9 +3170,9 @@ ExtAddrMode Result; TypePromotionTransaction::ConstRestorationPt LastKnownGood = TPT.getRestorationPoint(); - AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS, - MemoryInst, Result, InsertedInsts, - PromotedInsts, TPT); + AddressingModeMatcher Matcher(MatchedAddrModeInsts, &TLI, &TRI, + AddressAccessTy, AS, MemoryInst, Result, + InsertedInsts, PromotedInsts, TPT); Matcher.IgnoreProfitability = true; bool Success = Matcher.matchAddr(Address, 0); (void)Success; assert(Success && "Couldn't select *anything*?"); @@ -3245,8 +3252,8 @@ // For non-PHIs, determine the addressing mode being computed. SmallVector NewAddrModeInsts; ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( - V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM, - InsertedInsts, PromotedInsts, TPT); + V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, TLI, TRI, + InsertedInsts, PromotedInsts, TPT); // This check is broken into two cases with very similar code to avoid using // getNumUses() as much as possible. Some values have a lot of uses, so @@ -3321,9 +3328,7 @@ if (SunkAddr->getType() != Addr->getType()) SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); } else if (AddrSinkUsingGEPs || - (!AddrSinkUsingGEPs.getNumOccurrences() && TM && - TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) - ->useAA())) { + (!AddrSinkUsingGEPs.getNumOccurrences() && UseAA)) { // By default, we use the GEP-based method when AA is used later. This // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " @@ -3546,8 +3551,6 @@ bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { bool MadeChange = false; - const TargetRegisterInfo *TRI = - TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo(); TargetLowering::AsmOperandInfoVector TargetConstraints = TLI->ParseConstraints(*DL, TRI, CS); unsigned ArgNo = 0;