diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -1010,10 +1010,13 @@ return ModRefInfo::NoModRef; } - // The semantics of memcpy intrinsics either exactly overlap or do not - // overlap, i.e., source and destination of any given memcpy are either - // no-alias or must-alias. - if (auto *Inst = dyn_cast(Call)) { + // Ideally, there should be no need to special case for memcpy/memove + // intrinsics here since general machinery (based on memory attributes) should + // already handle it just fine. Unfortunately, it doesn't due to deficiency in + // operand bundles support. At the moment it's not clear if complexity behind + // enhancing general mechanism worths it. + // TODO: Consider improving operand bundles support in general mechanism. + if (auto *Inst = dyn_cast(Call)) { AliasResult SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); AliasResult DestAA = diff --git a/llvm/test/Analysis/BasicAA/deoptimize.ll b/llvm/test/Analysis/BasicAA/deoptimize.ll --- a/llvm/test/Analysis/BasicAA/deoptimize.ll +++ b/llvm/test/Analysis/BasicAA/deoptimize.ll @@ -39,7 +39,7 @@ ; CHECK-LABEL: Function: test_memmove_with_deopt: ; CHECK: Just Mod: Ptr: i8* %A <-> call void @llvm.memmove.p0i8.p0i8.i64(i8* %A, i8* %B, i64 -1, i1 false) [ "deopt"() ] ; CHECK: Just Ref: Ptr: i8* %B <-> call void @llvm.memmove.p0i8.p0i8.i64(i8* %A, i8* %B, i64 -1, i1 false) [ "deopt"() ] -; CHECK: Both ModRef: Ptr: i32* @G1 <-> call void @llvm.memmove.p0i8.p0i8.i64(i8* %A, i8* %B, i64 -1, i1 false) [ "deopt"() ] +; CHECK: Just Ref: Ptr: i32* @G1 <-> call void @llvm.memmove.p0i8.p0i8.i64(i8* %A, i8* %B, i64 -1, i1 false) [ "deopt"() ] %A = alloca i8 %B = alloca i8