Index: llvm/trunk/lib/Analysis/ConstantFolding.cpp =================================================================== --- llvm/trunk/lib/Analysis/ConstantFolding.cpp +++ llvm/trunk/lib/Analysis/ConstantFolding.cpp @@ -960,10 +960,8 @@ NewIdxs.size() > *LastIRIndex) { InRangeIndex = LastIRIndex; for (unsigned I = 0; I <= *LastIRIndex; ++I) - if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) { - InRangeIndex = None; - break; - } + if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) + return nullptr; } // Create a GEP. @@ -985,11 +983,6 @@ /// returned, if not, null is returned. Note that this function can fail when /// attempting to fold instructions like loads and stores, which have no /// constant expression form. -/// -/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/inrange -/// etc information, due to only being passed an opcode and operands. Constant -/// folding using this function strips this information. -/// Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, ArrayRef Ops, const DataLayout &DL, Index: llvm/trunk/test/Analysis/ConstantFolding/gep.ll =================================================================== --- llvm/trunk/test/Analysis/ConstantFolding/gep.ll +++ llvm/trunk/test/Analysis/ConstantFolding/gep.ll @@ -8,23 +8,20 @@ @vt = external global [3 x i8*] -; CHECK: define i32 (...)* @f0() -define i32 (...)* @f0() { - ; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 2) to i32 (...)**) - %load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 1) to i32 (...)**), i64 1) - ret i32 (...)* %load +; CHECK: define i32 (...)** @f0() +define i32 (...)** @f0() { + ; CHECK-NEXT: ret i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 2) to i32 (...)** + ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 1) to i32 (...)**), i64 1) } -; CHECK: define i32 (...)* @f1() -define i32 (...)* @f1() { - ; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, i64 2) to i32 (...)**) - %load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1) - ret i32 (...)* %load +; CHECK: define i32 (...)** @f1() +define i32 (...)** @f1() { + ; CHECK-NEXT: ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1) + ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1) } -; CHECK: define i32 (...)* @f2() -define i32 (...)* @f2() { - ; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr ([3 x i8*], [3 x i8*]* @vt, i64 1, i64 1) to i32 (...)**) - %load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3) - ret i32 (...)* %load +; CHECK: define i32 (...)** @f2() +define i32 (...)** @f2() { + ; CHECK-NEXT: ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3) + ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3) } Index: llvm/trunk/test/Other/optimize-inrange-gep.ll =================================================================== --- llvm/trunk/test/Other/optimize-inrange-gep.ll +++ llvm/trunk/test/Other/optimize-inrange-gep.ll @@ -0,0 +1,18 @@ +; RUN: opt -O0 -S < %s | FileCheck %s +; RUN: opt -O1 -S < %s | FileCheck %s +; RUN: opt -O2 -S < %s | FileCheck %s +; RUN: opt -O3 -S < %s | FileCheck %s +; RUN: opt -Os -S < %s | FileCheck %s +; RUN: opt -Oz -S < %s | FileCheck %s + +target datalayout = "e-p:64:64" + +; Make sure that optimizations do not optimize inrange GEP. + +@vtable = constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* null, i8* null] } + +define void @foo(i8*** %p) { + ;CHECK: store i8** getelementptr {{.*}} ({ [3 x i8*] }, { [3 x i8*] }* @vtable, i{{.*}} 0, inrange i32 0, i{{.*}} 3), i8*** %p + store i8** getelementptr ({ [3 x i8*] }, { [3 x i8*] }* @vtable, i32 0, inrange i32 0, i32 3), i8*** %p + ret void +}