Index: include/llvm/Analysis/Loads.h =================================================================== --- include/llvm/Analysis/Loads.h +++ include/llvm/Analysis/Loads.h @@ -61,6 +61,38 @@ /// to scan in the block, used by FindAvailableLoadedValue(). extern cl::opt DefMaxInstsToScan; +/// Scan the ScanBB block backwards checking to see if we have the value at +/// the memory address \p Ptr of type \p AccessTy locally available within a +/// small number of instructions. If the value is available, return it. +/// +/// If not, return the iterator for the last validated instruction that the +/// value would be live through. If we scanned the entire block and didn't +/// find something that invalidates *Ptr or provides it, ScanFrom would be +/// left at begin() and this returns null. ScanFrom could also be left +/// +/// MaxInstsToScan specifies the maximum instructions to scan in the block. +/// If it is set to 0, it will scan the whole block. You can also optionally +/// specify an alias analysis implementation, which makes this more precise. +/// +/// If AATags is non-null and a load or store is found, the AA tags from the +/// load or store are recorded there. If there are no AA tags or if no access +/// is found, it is left unmodified. +/// +/// IsAtomicMemOp specifies the atomicity of the memory operation that accesses +/// \p *Ptr. We verify atomicity constraints are satisfied when value forwarding +/// from another memory operation that has value \p *Ptr available. +/// +/// Note that we assume the \p *Ptr is accessed through a non-volatile but +/// potentially atomic load. Any other constraints should be verified at the +/// caller. +Value *FindAvailableLoadedValue(Value *Ptr, Type *AccessTy, bool IsAtomicMemOp, + BasicBlock *ScanBB, + BasicBlock::iterator &ScanFrom, + unsigned MaxInstsToScan, + AliasAnalysis *AA = nullptr, + AAMDNodes *AATags = nullptr, + bool *IsLoadCSE = nullptr); + /// \brief Scan backwards to see if we have the value of the given load /// available locally within a small number of instructions. /// Index: lib/Analysis/Loads.cpp =================================================================== --- lib/Analysis/Loads.cpp +++ lib/Analysis/Loads.cpp @@ -300,27 +300,41 @@ "to scan backward from a given instruction, when searching for " "available loaded value")); -Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, + +/// Scan the ScanBB block backwards checking to see if we have the value at +/// the memory address \p Ptr of type \p AccessTy locally available within a +/// small number of instructions. If the value is available, return it. +/// +/// If not, return the iterator for the last validated instruction that the +/// value would be live through. If we scanned the entire block and didn't +/// find something that invalidates *Ptr or provides it, ScanFrom would be +/// left at begin() and this returns null. ScanFrom could also be left +/// +/// MaxInstsToScan specifies the maximum instructions to scan in the block. +/// If it is set to 0, it will scan the whole block. You can also optionally +/// specify an alias analysis implementation, which makes this more precise. +/// +/// If AATags is non-null and a load or store is found, the AA tags from the +/// load or store are recorded there. If there are no AA tags or if no access +/// is found, it is left unmodified. +/// +/// IsAtomicMemOp specifies the atomicity of the memory operation that accesses +/// \p *Ptr. We verify atomicity constraints are satisfied when value forwarding +/// from another memory operation that has value \p *Ptr available. +/// +/// Note that we assume the \p *Ptr is accessed through a non-volatile but +/// potentially atomic load. Any other constraints should be verified at the +/// caller. +Value *llvm::FindAvailableLoadedValue(Value *Ptr, Type *AccessTy, + bool IsAtomicMemOp, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, AliasAnalysis *AA, AAMDNodes *AATags, bool *IsLoadCSE) { + if (MaxInstsToScan == 0) MaxInstsToScan = ~0U; - - Value *Ptr = Load->getPointerOperand(); - Type *AccessTy = Load->getType(); - - // We can never remove a volatile load - if (Load->isVolatile()) - return nullptr; - - // Anything stronger than unordered is currently unimplemented. - if (!Load->isUnordered()) - return nullptr; - const DataLayout &DL = ScanBB->getModule()->getDataLayout(); - // Try to get the store size for the type. uint64_t AccessSize = DL.getTypeStoreSize(AccessTy); @@ -351,7 +365,7 @@ // We can value forward from an atomic to a non-atomic, but not the // other way around. - if (LI->isAtomic() < Load->isAtomic()) + if (LI->isAtomic() < IsAtomicMemOp) return nullptr; if (AATags) @@ -372,7 +386,7 @@ // We can value forward from an atomic to a non-atomic, but not the // other way around. - if (SI->isAtomic() < Load->isAtomic()) + if (SI->isAtomic() < IsAtomicMemOp) return nullptr; if (AATags) @@ -416,3 +430,24 @@ // block. return nullptr; } + + +Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, + BasicBlock::iterator &ScanFrom, + unsigned MaxInstsToScan, + AliasAnalysis *AA, AAMDNodes *AATags, + bool *IsLoadCSE) { + + // We can never remove a volatile load + if (Load->isVolatile()) + return nullptr; + + // Anything stronger than unordered is currently unimplemented. + if (!Load->isUnordered()) + return nullptr; + + // Return the full value of the load if available. + return FindAvailableLoadedValue(Load->getPointerOperand(), Load->getType(), + Load->isAtomic(), ScanBB, ScanFrom, + MaxInstsToScan, AA, AATags, IsLoadCSE); +} Index: lib/Transforms/InstCombine/InstCombineCasts.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCasts.cpp +++ lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -14,9 +14,10 @@ #include "InstCombineInternal.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/PatternMatch.h" -#include "llvm/Analysis/TargetLibraryInfo.h" using namespace llvm; using namespace PatternMatch; @@ -576,6 +577,26 @@ if (Instruction *I = foldVecTruncToExtElt(CI, *this, DL)) return I; + // When trunc operand is a widened load, see if we can get the value from a + // previous store/load + if (auto *LI = dyn_cast(Src)) { + BasicBlock::iterator BBI(*LI); + + // Scan a few instructions up from LI and if we find a partial load/store + // of Type DestTy that feeds into LI, we can replace all uses of the trunc + // with the load/store value. + // This replacement can be done only in the case of non-volatile loads. If + // the load is atomic, its only use should be the trunc instruction. We + // don't want to allow other users of LI to see a value that is out of sync + // with the value we're folding the trunc to (in case of a race). + if (!LI->isVolatile() && (!LI->isAtomic() || LI->hasOneUse())) + if (Value *AvailableVal = FindAvailableLoadedValue( + LI->getPointerOperand(), DestTy, LI->isAtomic(), LI->getParent(), + BBI, DefMaxInstsToScan)) + return replaceInstUsesWith( + CI, Builder->CreateBitOrPointerCast(AvailableVal, CI.getType(), + CI.getName() + ".cast")); + } return nullptr; } Index: test/Transforms/InstCombine/trunc.ll =================================================================== --- test/Transforms/InstCombine/trunc.ll +++ test/Transforms/InstCombine/trunc.ll @@ -181,3 +181,108 @@ bb2: unreachable } + + +declare void @consume(i8) readonly +define i1 @trunc_load_store(i8* align 2 %a) { + store i8 0, i8 *%a, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load i16, i16* %bca, align 2 + %lowhalf.1 = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf.1) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_load_store +; CHECK-NOT: trunc +; CHECK: call void @consume(i8 0) +} + + +; The trunc can be replaced with the load value. +define i1 @trunc_load_load(i8* align 2 %a) { + %pload = load i8, i8* %a, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load i16, i16* %bca, align 2 + %lowhalf = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf) + call void @consume(i8 %pload) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_load_load +; CHECK-NOT: trunc +} + +; Store and load to same memory location address generated through GEP. +; trunc can be removed by using the store value. +define void @trunc_with_gep_memaccess(i16* align 2 %p) { + %t0 = getelementptr i16, i16* %p, i32 1 + store i16 2, i16* %t0 + %t1 = getelementptr i16, i16* %p, i32 1 + %x = load i16, i16* %t1 + %lowhalf = trunc i16 %x to i8 + call void @consume(i8 %lowhalf) + ret void +; CHECK-LABEL: @trunc_with_gep_memaccess +; CHECK-NOT: trunc +; CHECK: call void @consume(i8 2) +} + +; trunc should not be replaced since atomic load %wide.load has more than one use. +; different values can be seen by the uses of %wide.load in case of race. +define i1 @trunc_atomic_loads(i8* align 2 %a) { + %pload = load atomic i8, i8* %a unordered, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load atomic i16, i16* %bca unordered, align 2 + %lowhalf = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf) + call void @consume(i8 %pload) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_atomic_loads +; CHECK: trunc +} + +; trunc can be replaced since atomic load has single use. +; atomic load is also removed since use is removed. +define void @trunc_atomic_single_load(i8* align 2 %a) { + %pload = load atomic i8, i8* %a unordered, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load atomic i16, i16* %bca unordered, align 2 + %lowhalf = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf) + call void @consume(i8 %pload) + ret void +; CHECK-LABEL: @trunc_atomic_single_load +; CHECK-NOT: trunc +; CHECK-NOT: %wide.load = load atomic i16, i16* %bca unordered, align 2 +} + +; trunc cannot be replaced since store size is not trunc result size +define i1 @trunc_different_size_load(i16 * align 2 %a) { + store i16 0, i16 *%a, align 2 + %bca = bitcast i16* %a to i32* + %wide.load = load i32, i32* %bca, align 2 + %lowhalf = trunc i32 %wide.load to i8 + call void @consume(i8 %lowhalf) + %cmp.2 = icmp ult i32 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_different_size_load +; CHECK: %lowhalf = trunc i32 %wide.load to i8 +} + +declare void @consume_f(float) readonly +; bitcast required since trunc result type and %fload are different types. +; so replace the trunc with bitcast. +define i1 @trunc_avoid_bitcast(float* %b) { + %fload = load float, float* %b + %bca = bitcast float* %b to i64* + %iload = load i64, i64* %bca + %low32 = trunc i64 %iload to i32 + call void @consume_f(float %fload) + %cmp.2 = icmp ult i32 %low32, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_avoid_bitcast +; CHECK-NOT: %low32 = trunc i64 %iload to i32 +; CHECK: %low32.cast = bitcast float %fload to i32 +; CHECK: %cmp.2 = icmp ult i32 %low32.cast, 256 +}