diff --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h --- a/llvm/include/llvm/Analysis/ConstantFolding.h +++ b/llvm/include/llvm/Analysis/ConstantFolding.h @@ -128,6 +128,17 @@ Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2, ArrayRef Mask); +/// Extract value of C at the given Offset reinterpreted as Ty. If bits past +/// the end of C are accessed, they are assumed to be poison and may take any +/// value. +Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, + const DataLayout &DL); + +/// Extract value of C reinterpreted as Ty. Same as previous API with zero +/// offset. +Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, + const DataLayout &DL); + /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would /// produce if it is constant and determinable. If this is not determinable, /// return null. diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -668,8 +668,11 @@ return C; } -Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, - const DataLayout &DL) { +} // end anonymous namespace + +Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty, + const APInt &Offset, + const DataLayout &DL) { if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL)) if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL)) return Result; @@ -681,7 +684,10 @@ return nullptr; } -} // end anonymous namespace +Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty, + const DataLayout &DL) { + return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL); +} Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL) { diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -511,8 +511,11 @@ if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL)) return Val; - if (auto *C = dyn_cast(Val)) - return ConstantFoldLoadThroughBitcast(C, AccessTy, DL); + TypeSize StoreSize = DL.getTypeStoreSize(Val->getType()); + TypeSize LoadSize = DL.getTypeStoreSize(AccessTy); + if (TypeSize::isKnownLE(LoadSize, StoreSize)) + if (auto *C = dyn_cast(Val)) + return ConstantFoldLoadFromConst(C, AccessTy, DL); } return nullptr; diff --git a/llvm/test/Transforms/InstCombine/load-store-forward.ll b/llvm/test/Transforms/InstCombine/load-store-forward.ll --- a/llvm/test/Transforms/InstCombine/load-store-forward.ll +++ b/llvm/test/Transforms/InstCombine/load-store-forward.ll @@ -2,15 +2,14 @@ ; RUN: opt -S -instcombine < %s | FileCheck %s --check-prefixes=CHECK,LITTLE ; RUN: opt -S -instcombine -data-layout="E" < %s | FileCheck %s --check-prefixes=CHECK,BIG -; Some cases where store to load forwarding is principally possible, -; but is non-trivial. - define i8 @load_smaller_int(i16* %p) { -; CHECK-LABEL: @load_smaller_int( -; CHECK-NEXT: store i16 258, i16* [[P:%.*]], align 2 -; CHECK-NEXT: [[P2:%.*]] = bitcast i16* [[P]] to i8* -; CHECK-NEXT: [[LOAD:%.*]] = load i8, i8* [[P2]], align 1 -; CHECK-NEXT: ret i8 [[LOAD]] +; LITTLE-LABEL: @load_smaller_int( +; LITTLE-NEXT: store i16 258, i16* [[P:%.*]], align 2 +; LITTLE-NEXT: ret i8 2 +; +; BIG-LABEL: @load_smaller_int( +; BIG-NEXT: store i16 258, i16* [[P:%.*]], align 2 +; BIG-NEXT: ret i8 1 ; store i16 258, i16* %p %p2 = bitcast i16* %p to i8*