diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -18,6 +18,7 @@ #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" @@ -5823,6 +5824,78 @@ return ::SimplifyFreezeInst(Op0, Q); } +static Constant *ConstructLoadOperandConstant(Value *Op) { + SmallVector Worklist; + // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop. + SmallPtrSet Visited; + Worklist.push_back(Op); + while (true) { + Value *CurOp = Worklist.back(); + if (!Visited.insert(CurOp).second) + return nullptr; + if (isa(CurOp)) + break; + if (auto *BC = dyn_cast(CurOp)) { + Worklist.push_back(BC->getOperand(0)); + } else if (auto *GEP = dyn_cast(CurOp)) { + for (unsigned I = 1; I != GEP->getNumOperands(); ++I) { + if (!isa(GEP->getOperand(I))) + return nullptr; + } + Worklist.push_back(GEP->getOperand(0)); + } else if (auto *II = dyn_cast(CurOp)) { + if (II->isLaunderOrStripInvariantGroup()) + Worklist.push_back(II->getOperand(0)); + else + return nullptr; + } else { + return nullptr; + } + } + + Constant *NewOp = cast(Worklist.pop_back_val()); + while (!Worklist.empty()) { + Value *CurOp = Worklist.pop_back_val(); + if (isa(CurOp)) { + NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType()); + } else if (auto *GEP = dyn_cast(CurOp)) { + SmallVector Idxs; + Idxs.reserve(GEP->getNumOperands() - 1); + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { + Idxs.push_back(cast(GEP->getOperand(I))); + } + NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp, + Idxs, GEP->isInBounds(), + GEP->getInRangeIndex()); + } else { + assert(isa(CurOp) && + cast(CurOp)->isLaunderOrStripInvariantGroup() && + "expected invariant group intrinsic"); + NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType()); + } + } + return NewOp; +} + +static Value *SimplifyLoadInst(LoadInst *LI, const SimplifyQuery &Q) { + if (LI->isVolatile()) + return nullptr; + + if (auto *C = ConstantFoldInstruction(LI, Q.DL)) + return C; + + // The following only catches more cases than ConstantFoldInstruction() if the + // load operand wasn't a constant. Specifically, invariant.group intrinsics. + if (isa(LI->getPointerOperand())) + return nullptr; + + if (auto *C = dyn_cast_or_null( + ConstructLoadOperandConstant(LI->getPointerOperand()))) + return ConstantFoldLoadFromConstPtr(C, LI->getType(), Q.DL); + + return nullptr; +} + /// See if we can compute a simplified version of this instruction. /// If not, this returns null. @@ -5979,6 +6052,9 @@ // No simplifications for Alloca and it can't be constant folded. Result = nullptr; break; + case Instruction::Load: + Result = SimplifyLoadInst(cast(I), Q); + break; } /// If called on unreachable code, the above logic may report that the diff --git a/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/invalid-load-operand-infinite-loop.ll @@ -0,0 +1,51 @@ +; RUN: opt -passes=jump-threading -S < %s | FileCheck %s +; CHECK: @main + +%struct.wobble = type { i8 } + +define i32 @main() local_unnamed_addr personality i8* undef { +bb12: + br i1 false, label %bb13, label %bb28 + +bb13: ; preds = %bb12 + br label %bb14 + +bb14: ; preds = %bb26, %bb13 + %tmp15 = phi i8* [ %tmp27, %bb26 ], [ undef, %bb13 ] + %tmp16 = icmp slt i32 5, undef + %tmp17 = select i1 false, i1 true, i1 %tmp16 + br label %bb18 + +bb18: ; preds = %bb14 + br i1 %tmp17, label %bb19, label %bb21 + +bb19: ; preds = %bb18 + %tmp20 = or i32 undef, 4 + br label %bb21 + +bb21: ; preds = %bb19, %bb18 + %tmp22 = load i8, i8* %tmp15, align 1 + br label %bb23 + +bb23: ; preds = %bb21 + br i1 %tmp17, label %bb24, label %bb25 + +bb24: ; preds = %bb23 + br label %bb25 + +bb25: ; preds = %bb24, %bb23 + invoke void undef(%struct.wobble* undef, i32 0, i32 undef, i8 %tmp22) + to label %bb26 unwind label %bb33 + +bb26: ; preds = %bb25 + %tmp27 = getelementptr inbounds i8, i8* %tmp15, i64 1 + br label %bb14 + +bb28: ; preds = %bb12 + unreachable + +bb33: ; preds = %bb25 + %tmp34 = landingpad { i8*, i32 } + cleanup + unreachable +} diff --git a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll --- a/llvm/test/Transforms/InstSimplify/invariant.group-load.ll +++ b/llvm/test/Transforms/InstSimplify/invariant.group-load.ll @@ -9,11 +9,7 @@ define i64 @f() { ; CHECK-LABEL: @f( -; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*)) -; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8 -; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64* -; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4 -; CHECK-NEXT: ret i64 [[D]] +; CHECK-NEXT: ret i64 3 ; %p = bitcast { i64, i64 }* @A to i8* %a = call i8* @llvm.strip.invariant.group.p0i8(i8* %p) @@ -25,11 +21,7 @@ define i64 @g() { ; CHECK-LABEL: @g( -; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*)) -; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8 -; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64* -; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4 -; CHECK-NEXT: ret i64 [[D]] +; CHECK-NEXT: ret i64 3 ; %p = bitcast { i64, i64 }* @A to i8* %a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)