Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -56,6 +56,10 @@ cl::desc("The number of instructions to scan in a block in memory " "dependency analysis (default = 100)")); +static cl::opt InvariantStart( + "invariant-start-ignore", cl::Hidden, cl::init(true), + cl::desc("Treat invariant.start intrinsic as not causing dependencies")); + static cl::opt BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), cl::desc("The number of blocks to scan during memory " @@ -131,7 +135,6 @@ switch (II->getIntrinsicID()) { case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: - case Intrinsic::invariant_start: II->getAAMetadata(AAInfo); Loc = MemoryLocation( II->getArgOperand(1), @@ -139,13 +142,21 @@ // These intrinsics don't really modify the memory, but returning Mod // will allow them to be handled conservatively. return MRI_Mod; + case Intrinsic::invariant_start: + II->getAAMetadata(AAInfo); + Loc = MemoryLocation( + II->getArgOperand(1), + cast(II->getArgOperand(0))->getZExtValue(), AAInfo); + // We treat invariant_start as read-only operation, and not modifying the + // memory. FIXME: We could do the same for invariant.end. + return MRI_Ref; case Intrinsic::invariant_end: II->getAAMetadata(AAInfo); Loc = MemoryLocation( II->getArgOperand(2), cast(II->getArgOperand(1))->getZExtValue(), AAInfo); - // These intrinsics don't really modify the memory, but returning Mod - // will allow them to be handled conservatively. + // This intrinsic does not really modify the memory, but returning Mod + // will allow it to be handled conservatively. return MRI_Mod; default: break; @@ -467,11 +478,27 @@ while (ScanIt != BB->begin()) { Instruction *Inst = &*--ScanIt; - if (IntrinsicInst *II = dyn_cast(Inst)) + if (IntrinsicInst *II = dyn_cast(Inst)) { // Debug intrinsics don't (and can't) cause dependencies. if (isa(II)) continue; + // Invariant.start is considered a read-only operation. + // invariant.start intrinsic does not cause dependencies if we are allowed + // to ignore may-aliases with read-only operations, i.e. isLoad is true. + // We need the isLoad check to avoid this incorrect DSE transformation: + // store 1, P + // invariant.start(P,..) + // store 2, P + // + // (incorrectly) transformed to: + // + // invariant.start(P,..) + // store 2, P + if (isLoad && InvariantStart && + II->getIntrinsicID() == Intrinsic::invariant_start) + continue; + } // Limit the amount of scanning we do so we don't end up with quadratic // running time on extreme testcases. --Limit; Index: test/Transforms/DeadStoreElimination/invariant.start.ll =================================================================== --- /dev/null +++ test/Transforms/DeadStoreElimination/invariant.start.ll @@ -0,0 +1,34 @@ +; Test to make sure llvm.invariant.start calls are not treated as clobbers. +; RUN: opt < %s -basicaa -dse -S | FileCheck %s + +declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly + +; We cannot remove the store 1 to %p. +; FIXME: By the semantics of invariant.start, the store 3 to p is unreachable. +define void @test(i8 *%p) { + store i8 1, i8* %p, align 4 + %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %p) + store i8 3, i8* %p, align 4 + ret void +; CHECK-LABEL: @test( +; CHECK-NEXT: store i8 1, i8* %p, align 4 +; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %p) +; CHECK-NEXT: store i8 3, i8* %p, align 4 +; CHECK-NEXT: ret void +} + +; FIXME: We should be able to remove the first store to p, even though p and q may alias. +define void @test2(i8* %p, i8* %q) { + store i8 1, i8* %p, align 4 + store i8 2, i8* %q, align 4 + %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %q) + store i8 3, i8* %p, align 4 + ret void +; CHECK-LABEL: @test2( +; CHECK-NEXT: store i8 1, i8* %p, align 4 +; CHECK-NEXT: store i8 2, i8* %q, align 4 +; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %q) +; CHECK-NEXT: store i8 3, i8* %p, align 4 +; CHECK-NEXT: ret void +} + Index: test/Transforms/GVN/invariant.start.ll =================================================================== --- /dev/null +++ test/Transforms/GVN/invariant.start.ll @@ -0,0 +1,42 @@ +; Test to make sure llvm.invariant.start calls are not treated as clobbers. +; RUN: opt < %s -gvn -S | FileCheck %s + + +declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly +declare {}* @llvm.invariant.start.p1i8(i64, i8 addrspace(1)* nocapture) nounwind readonly + +; We forward store to the load across the invariant.start intrinsic +define i8 @forward_store() { +; CHECK-LABEL: @forward_store + %a = alloca i8 + store i8 0, i8* %a + %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) + ; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) + %r = load i8, i8* %a + ; CHECK-NOT: load + ret i8 %r + ; CHECK: ret i8 0 +} + +declare i8 @dummy(i8* nocapture) nounwind readonly + +; We forward store to the load in the non-local analysis case, +; i.e. invariant.start is in another basic block. +define i8 @forward_store_nonlocal(i1 %cond) { +; CHECK-LABEL: forward_store_nonlocal +; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) +; CHECK: ret i8 0 +; CHECK: ret i8 %val + %a = alloca i8 + store i8 0, i8* %a + %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) + br i1 %cond, label %loadblock, label %exit + +loadblock: + %r = load i8, i8* %a + ret i8 %r + +exit: + %val = call i8 @dummy(i8* %a) + ret i8 %val +} Index: test/Transforms/MemCpyOpt/invariant.start.ll =================================================================== --- /dev/null +++ test/Transforms/MemCpyOpt/invariant.start.ll @@ -0,0 +1,46 @@ +; MemCpy optimizations should take place even in presence of invariant.start +; RUN: opt < %s -basicaa -memcpyopt -dse -S | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" +target triple = "i686-apple-darwin9" + +%0 = type { x86_fp80, x86_fp80 } +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) + +declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly + +; FIXME: The invariant.start does not modify %P. +; The intermediate alloca and one of the memcpy's should be eliminated, the +; other should be transformed to a memmove. +define void @test1(i8* %P, i8* %Q) nounwind { + %memtmp = alloca %0, align 16 + %R = bitcast %0* %memtmp to i8* + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %R, i8* %P, i32 32, i32 16, i1 false) + %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %P) + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %Q, i8* %R, i32 32, i32 16, i1 false) + ret void +; CHECK-LABEL: @test1( +; CHECK-NEXT: %memtmp = alloca %0, align 16 +; CHECK-NEXT: %R = bitcast %0* %memtmp to i8* +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %R, i8* %P, i32 32, i32 16, i1 false) +; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %P) +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %Q, i8* %R, i32 32, i32 16, i1 false) +; CHECK-NEXT: ret void +} + + +; The invariant.start intrinsic does not inhibit tranforming the memcpy to a +; memset. +define void @test2(i8* %dst1, i8* %dst2, i8 %c) { +; CHECK-LABEL: define void @test2( +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* %dst1, i8 %c, i64 128, i32 1, i1 false) +; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %dst1) +; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* %dst2, i8 %c, i64 128, i32 8, i1 false) +; CHECK-NEXT: ret void + call void @llvm.memset.p0i8.i64(i8* %dst1, i8 %c, i64 128, i32 1, i1 false) + %i = call {}* @llvm.invariant.start.p0i8(i64 32, i8* %dst1) + call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst2, i8* %dst1, i64 128, i32 8, i1 false) + ret void +}