Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -1206,102 +1206,6 @@ CI->eraseFromParent(); } -bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) { - BasicBlock *BB = CI->getParent(); - - // Lower inline assembly if we can. - // If we found an inline asm expession, and if the target knows how to - // lower it to normal LLVM code, do so now. - if (TLI && isa(CI->getCalledValue())) { - if (TLI->ExpandInlineAsm(CI)) { - // Avoid invalidating the iterator. - CurInstIterator = BB->begin(); - // Avoid processing instructions out of order, which could cause - // reuse before a value is defined. - SunkAddrs.clear(); - return true; - } - // Sink address computing for memory operands into the block. - if (OptimizeInlineAsmInst(CI)) - return true; - } - - IntrinsicInst *II = dyn_cast(CI); - if (II) { - switch (II->getIntrinsicID()) { - default: break; - case Intrinsic::objectsize: { - // Lower all uses of llvm.objectsize.* - bool Min = (cast(II->getArgOperand(1))->getZExtValue() == 1); - Type *ReturnTy = CI->getType(); - Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); - - // Substituting this can cause recursive simplifications, which can - // invalidate our iterator. Use a WeakVH to hold onto it in case this - // happens. - WeakVH IterHandle(CurInstIterator); - - replaceAndRecursivelySimplify(CI, RetVal, - TLI ? TLI->getDataLayout() : nullptr, - TLInfo, ModifiedDT ? nullptr : DT); - - // If the iterator instruction was recursively deleted, start over at the - // start of the block. - if (IterHandle != CurInstIterator) { - CurInstIterator = BB->begin(); - SunkAddrs.clear(); - } - return true; - } - case Intrinsic::masked_load: { - // Scalarize unsupported vector masked load - if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) { - ScalarizeMaskedLoad(CI); - ModifiedDT = true; - return true; - } - return false; - } - case Intrinsic::masked_store: { - if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) { - ScalarizeMaskedStore(CI); - ModifiedDT = true; - return true; - } - return false; - } - } - - if (TLI) { - SmallVector PtrOps; - Type *AccessTy; - if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) - while (!PtrOps.empty()) - if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) - return true; - } - } - - // From here on out we're working with named functions. - if (!CI->getCalledFunction()) return false; - - // We'll need DataLayout from here on out. - const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; - if (!TD) return false; - - // Lower all default uses of _chk calls. This is very similar - // to what InstCombineCalls does, but here we are only lowering calls - // to fortified library functions (e.g. __memcpy_chk) that have the default - // "don't know" as the objectsize. Anything else should be left alone. - FortifiedLibCallSimplifier Simplifier(TD, TLInfo, true); - if (Value *V = Simplifier.optimizeCall(CI)) { - CI->replaceAllUsesWith(V); - CI->eraseFromParent(); - return true; - } - return false; -} - /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return /// instructions to the predecessor to enable tail call optimizations. The /// case it is currently looking for is: @@ -3363,6 +3267,144 @@ return true; } +bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) { + BasicBlock *BB = CI->getParent(); + Value *Addr = CI->getArgOperand(0); + if (IsNonLocalValue(Addr, CI->getParent())) { + Value *&SunkAddr = SunkAddrs[Addr]; + if (SunkAddr) { + llvm::errs() << "AAAA\n"; + } else { + Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); + Value *V = CI->getArgOperand(0); + SmallVector NewAddrModeInsts; + TypePromotionTransaction TPT; + ExtAddrMode Mode = AddressingModeMatcher::Match( + V, V->getType(), CI, NewAddrModeInsts, *TLI, InsertedTruncsSet, + PromotedInsts, TPT); + // llvm::errs() << Mode.BaseReg << " " << Mode.Scale << " " << Mode.BaseGV + // << " " << Mode.BaseOffs << "\n"; + IRBuilder<> Builder(CI); + Value *Result = nullptr; + if (Mode.BaseReg) { + Value *V = Mode.BaseReg; + if (V->getType()->isPointerTy()) + V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); + if (V->getType() != IntPtrTy) + V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); + Result = V; + } + + if (Mode.BaseOffs) { + Value *V = ConstantInt::get(IntPtrTy, Mode.BaseOffs); + if (Result) + Result = Builder.CreateAdd(Result, V, "sunkaddr"); + else + Result = V; + } + if (!Result) + SunkAddr = Constant::getNullValue(Addr->getType()); + else + SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); + CI->replaceUsesOfWith(Addr, SunkAddr); + } + RecursivelyDeleteTriviallyDeadInstructions(Addr, TLInfo); + return true; + } + + // Lower inline assembly if we can. + // If we found an inline asm expession, and if the target knows how to + // lower it to normal LLVM code, do so now. + if (TLI && isa(CI->getCalledValue())) { + if (TLI->ExpandInlineAsm(CI)) { + // Avoid invalidating the iterator. + CurInstIterator = BB->begin(); + // Avoid processing instructions out of order, which could cause + // reuse before a value is defined. + SunkAddrs.clear(); + return true; + } + // Sink address computing for memory operands into the block. + if (OptimizeInlineAsmInst(CI)) + return true; + } + + IntrinsicInst *II = dyn_cast(CI); + if (II) { + switch (II->getIntrinsicID()) { + default: break; + case Intrinsic::objectsize: { + // Lower all uses of llvm.objectsize.* + bool Min = (cast(II->getArgOperand(1))->getZExtValue() == 1); + Type *ReturnTy = CI->getType(); + Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); + + // Substituting this can cause recursive simplifications, which can + // invalidate our iterator. Use a WeakVH to hold onto it in case this + // happens. + WeakVH IterHandle(CurInstIterator); + + replaceAndRecursivelySimplify(CI, RetVal, + TLI ? TLI->getDataLayout() : nullptr, + TLInfo, ModifiedDT ? nullptr : DT); + + // If the iterator instruction was recursively deleted, start over at the + // start of the block. + if (IterHandle != CurInstIterator) { + CurInstIterator = BB->begin(); + SunkAddrs.clear(); + } + return true; + } + case Intrinsic::masked_load: { + // Scalarize unsupported vector masked load + if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) { + ScalarizeMaskedLoad(CI); + ModifiedDT = true; + return true; + } + return false; + } + case Intrinsic::masked_store: { + if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) { + ScalarizeMaskedStore(CI); + ModifiedDT = true; + return true; + } + return false; + } + } + + if (TLI) { + SmallVector PtrOps; + Type *AccessTy; + if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) + while (!PtrOps.empty()) + if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) + return true; + } + } + + // From here on out we're working with named functions. + if (!CI->getCalledFunction()) return false; + + // We'll need DataLayout from here on out. + const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; + if (!TD) return false; + + // Lower all default uses of _chk calls. This is very similar + // to what InstCombineCalls does, but here we are only lowering calls + // to fortified library functions (e.g. __memcpy_chk) that have the default + // "don't know" as the objectsize. Anything else should be left alone. + FortifiedLibCallSimplifier Simplifier(TD, TLInfo, true); + if (Value *V = Simplifier.optimizeCall(CI)) { + CI->replaceAllUsesWith(V); + CI->eraseFromParent(); + return true; + } + return false; +} + /// OptimizeInlineAsmInst - If there are any memory operands, use /// OptimizeMemoryInst to sink their address computing into the block when /// possible / profitable. Index: lib/CodeGen/RegisterCoalescer.cpp =================================================================== --- lib/CodeGen/RegisterCoalescer.cpp +++ lib/CodeGen/RegisterCoalescer.cpp @@ -840,10 +840,14 @@ IsDefCopy = true; return false; } - if (!TII->isAsCheapAsAMove(DefMI)) - return false; - if (!TII->isTriviallyReMaterializable(DefMI, AA)) - return false; + llvm::errs() << "AAA\n---\n"; + DefMI->dump(); + llvm::errs() << TII->isAsCheapAsAMove(DefMI) << " " + << TII->isTriviallyReMaterializable(DefMI, AA) << "\n"; + //if (!TII->isAsCheapAsAMove(DefMI)) + // return false; + //if (!TII->isTriviallyReMaterializable(DefMI, AA)) + // return false; bool SawStore = false; if (!DefMI->isSafeToMove(TII, AA, SawStore)) return false;