diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -482,6 +482,14 @@ if (EnableOpt) PM.add(createAMDGPUPromoteAllocaToVector()); }); + + Builder.addExtension( + PassManagerBuilder::EP_LoopOptimizerEnd, + [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { + // Add SROA after loop unrolling as the promotable patterns exposed + // after small loop is fully unrolled. + PM.add(createSROAPass()); + }); } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/AMDGPU/extra-sroa-after-unroll.ll b/llvm/test/CodeGen/AMDGPU/extra-sroa-after-unroll.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/extra-sroa-after-unroll.ll @@ -0,0 +1,90 @@ +; RUN: opt -O1 -mtriple=amdgcn-amd-hsa -S -o - %s | FileCheck -check-prefixes=ALL,O1 %s +; RUN: opt -O2 -mtriple=amdgcn-amd-hsa -S -o - %s | FileCheck -check-prefixes=ALL,O2 %s +; RUN: opt -O3 -mtriple=amdgcn-amd-hsa -S -o - %s | FileCheck -check-prefixes=ALL,O3 %s +target datalayout = "A5" +;target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7" +;target triple = "amdgcn-amd-amdhsa" + +; ALL-LABEL: t0 +; O1-NOT: alloca +; O2-NOT: alloca +; O3-NOT: alloca +define protected amdgpu_kernel void @t0(i32 addrspace(1)* %p.coerce) #0 { +entry: + %p = alloca i32*, align 8, addrspace(5) + %p.ascast = addrspacecast i32* addrspace(5)* %p to i32** + %p.addr = alloca i32*, align 8, addrspace(5) + %p.addr.ascast = addrspacecast i32* addrspace(5)* %p.addr to i32** + %t = alloca [27 x i32], align 16, addrspace(5) + %t.ascast = addrspacecast [27 x i32] addrspace(5)* %t to [27 x i32]* + %sum = alloca i32, align 4, addrspace(5) + %sum.ascast = addrspacecast i32 addrspace(5)* %sum to i32* + %i = alloca i32, align 4, addrspace(5) + %i.ascast = addrspacecast i32 addrspace(5)* %i to i32* + %cleanup.dest.slot = alloca i32, align 4, addrspace(5) + %0 = addrspacecast i32 addrspace(1)* %p.coerce to i32* + store i32* %0, i32** %p.ascast, align 8 + %p1 = load i32*, i32** %p.ascast, align 8 + store i32* %p1, i32** %p.addr.ascast, align 8 + %1 = bitcast [27 x i32] addrspace(5)* %t to i8 addrspace(5)* + call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %1) + %arraydecay = getelementptr inbounds [27 x i32], [27 x i32]* %t.ascast, i64 0, i64 0 + %2 = load i32*, i32** %p.addr.ascast, align 8 + call void @copy(i32* %arraydecay, i32* %2, i32 27) + %3 = bitcast i32 addrspace(5)* %sum to i8 addrspace(5)* + call void @llvm.lifetime.start.p5i8(i64 4, i8 addrspace(5)* %3) + store i32 0, i32* %sum.ascast, align 4 + %4 = bitcast i32 addrspace(5)* %i to i8 addrspace(5)* + call void @llvm.lifetime.start.p5i8(i64 4, i8 addrspace(5)* %4) + store i32 0, i32* %i.ascast, align 4 + br label %for.cond + +for.cond: ; preds = %for.inc, %entry + %5 = load i32, i32* %i.ascast, align 4 + %cmp = icmp slt i32 %5, 27 + br i1 %cmp, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond + %6 = bitcast i32 addrspace(5)* %i to i8 addrspace(5)* + call void @llvm.lifetime.end.p5i8(i64 4, i8 addrspace(5)* %6) + br label %for.end + +for.body: ; preds = %for.cond + %7 = load i32, i32* %i.ascast, align 4 + %idxprom = sext i32 %7 to i64 + %arrayidx = getelementptr inbounds [27 x i32], [27 x i32]* %t.ascast, i64 0, i64 %idxprom + %8 = load i32, i32* %arrayidx, align 4 + %9 = load i32, i32* %sum.ascast, align 4 + %add = add nsw i32 %9, %8 + store i32 %add, i32* %sum.ascast, align 4 + br label %for.inc + +for.inc: ; preds = %for.body + %10 = load i32, i32* %i.ascast, align 4 + %inc = add nsw i32 %10, 1 + store i32 %inc, i32* %i.ascast, align 4 + br label %for.cond + +for.end: ; preds = %for.cond.cleanup + %11 = load i32, i32* %sum.ascast, align 4 + %12 = load i32*, i32** %p.addr.ascast, align 8 + store i32 %11, i32* %12, align 4 + %13 = bitcast i32 addrspace(5)* %sum to i8 addrspace(5)* + call void @llvm.lifetime.end.p5i8(i64 4, i8 addrspace(5)* %13) + %14 = bitcast [27 x i32] addrspace(5)* %t to i8 addrspace(5)* + call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %14) + ret void +} + +define internal void @copy(i32* %d, i32* %s, i32 %N) { +entry: + %d8 = bitcast i32* %d to i8* + %s8 = bitcast i32* %s to i8* + %N8 = mul i32 %N, 4 + tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d8, i8* %s8, i32 %N8, i1 false) + ret void +} + +declare void @llvm.lifetime.start.p5i8(i64 immarg, i8 addrspace(5)* nocapture) +declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture) +declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) diff --git a/llvm/test/CodeGen/AMDGPU/opt-pipeline.ll b/llvm/test/CodeGen/AMDGPU/opt-pipeline.ll --- a/llvm/test/CodeGen/AMDGPU/opt-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/opt-pipeline.ll @@ -163,7 +163,9 @@ ; GCN-O1-NEXT: Recognize loop idioms ; GCN-O1-NEXT: Delete dead loops ; GCN-O1-NEXT: Unroll loops +; GCN-O1-NEXT: SROA ; GCN-O1-NEXT: Phi Values Analysis +; GCN-O1-NEXT: Function Alias Analysis Results ; GCN-O1-NEXT: Memory Dependence Analysis ; GCN-O1-NEXT: MemCpy Optimization ; GCN-O1-NEXT: Sparse Conditional Constant Propagation @@ -481,6 +483,8 @@ ; GCN-O2-NEXT: Recognize loop idioms ; GCN-O2-NEXT: Delete dead loops ; GCN-O2-NEXT: Unroll loops +; GCN-O2-NEXT: SROA +; GCN-O2-NEXT: Function Alias Analysis Results ; GCN-O2-NEXT: MergedLoadStoreMotion ; GCN-O2-NEXT: Phi Values Analysis ; GCN-O2-NEXT: Function Alias Analysis Results @@ -839,6 +843,8 @@ ; GCN-O3-NEXT: Recognize loop idioms ; GCN-O3-NEXT: Delete dead loops ; GCN-O3-NEXT: Unroll loops +; GCN-O3-NEXT: SROA +; GCN-O3-NEXT: Function Alias Analysis Results ; GCN-O3-NEXT: MergedLoadStoreMotion ; GCN-O3-NEXT: Phi Values Analysis ; GCN-O3-NEXT: Function Alias Analysis Results