Index: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1862,12 +1862,18 @@ return nullptr; } -static bool isNeverEqualToUnescapedAlloc(Value *V) { +static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI, + Instruction *AI) { if (isa(V)) return true; if (auto *LI = dyn_cast(V)) return isa(LI->getPointerOperand()); - return false; + // Two distinct allocations will never be equal. + // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking + // through bitcasts of V can cause + // the result statement below to be true, even when AI and V (ex: + // i8* ->i32* ->i8* of AI) are the same allocations. + return isAllocLikeFn(V, TLI) && V != AI; } static bool @@ -1894,12 +1900,12 @@ case Instruction::ICmp: { ICmpInst *ICI = cast(I); // We can fold eq/ne comparisons with null to false/true, respectively. - // We fold comparisons in some conditions provided the alloc has not - // escaped. + // We also fold comparisons in some conditions provided the alloc has + // not escaped. if (!ICI->isEquality()) return false; unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0; - if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex))) + if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI)) return false; Users.emplace_back(I); continue; Index: llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll +++ llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll @@ -40,3 +40,53 @@ ret i32 %rt ; CHECK: ret i32 %rt } + +define i1 @compare_distinct_mallocs() { + %m = call i8* @malloc(i64 4) + %n = call i8* @malloc(i64 4) + %cmp = icmp eq i8* %m, %n + ret i1 %cmp + ; CHECK-LABEL: compare_distinct_mallocs + ; CHECK: ret i1 false +} + +; the compare is folded to true since the folding compare looks through bitcasts. +; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc +define i1 @compare_samepointer_under_bitcast() { + %m = call i8* @malloc(i64 4) + %bc = bitcast i8* %m to i32* + %bcback = bitcast i32* %bc to i8* + %cmp = icmp eq i8* %m, %bcback + ret i1 %cmp +; CHECK-LABEL: compare_samepointer_under_bitcast +; CHECK: ret i1 true +} + +; the compare is folded to true since the folding compare looks through bitcasts. +; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc +define i1 @compare_samepointer_escaped() { + %m = call i8* @malloc(i64 4) + %bc = bitcast i8* %m to i32* + %bcback = bitcast i32* %bc to i8* + %cmp = icmp eq i8* %m, %bcback + call void @f() [ "deopt"(i8* %m) ] + ret i1 %cmp +; CHECK-LABEL: compare_samepointer_escaped +; CHECK-NEXT: %m = call i8* @malloc(i64 4) +; CHECK-NEXT: call void @f() [ "deopt"(i8* %m) ] +; CHECK: ret i1 true +} + +; The malloc call for %m cannot be elided since it is used in the call to function f. +; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations +define i1 @compare_distinct_pointer_escape() { + %m = call i8* @malloc(i64 4) + %n = call i8* @malloc(i64 4) + tail call void @f() [ "deopt"(i8* %m) ] + %cmp = icmp ne i8* %m, %n + ret i1 %cmp +; CHECK-LABEL: compare_distinct_pointer_escape +; CHECK-NEXT: %m = call i8* @malloc(i64 4) +; CHECK-NEXT: tail call void @f() [ "deopt"(i8* %m) ] +; CHECK-NEXT: ret i1 true +}