Index: lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -36,7 +36,8 @@ #include "llvm/Support/raw_os_ostream.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" -#include +#include "llvm/Transforms/Scalar/GVN.h" +#include "llvm/CodeGen/Passes.h" using namespace llvm; @@ -171,8 +172,9 @@ return nullptr; } + void addEarlyCSEOrGVNPass(); + void addStraightLineScalarOptimizationPasses(); void addIRPasses() override; - void addCodeGenPrepare() override; bool addPreISel() override; bool addInstSelector() override; bool addGCPasses() override; @@ -216,6 +218,30 @@ }); } +void AMDGPUPassConfig::addEarlyCSEOrGVNPass() { + if (getOptLevel() == CodeGenOpt::Aggressive) + addPass(createGVNPass()); + else + addPass(createEarlyCSEPass()); +} + +void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() { + addPass(createSeparateConstOffsetFromGEPPass()); + addPass(createSpeculativeExecutionPass()); + // ReassociateGEPs exposes more opportunites for SLSR. See + // the example in reassociate-geps-and-slsr.ll. + addPass(createStraightLineStrengthReducePass()); + // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or + // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE + // for some of our benchmarks. + addEarlyCSEOrGVNPass(); + // Run NaryReassociate after EarlyCSE/GVN to be more effective. + addPass(createNaryReassociatePass()); + // NaryReassociate on GEPs creates redundant common expressions, so run + // EarlyCSE after it. + addPass(createEarlyCSEPass()); +} + void AMDGPUPassConfig::addIRPasses() { // There is no reason to run these. disablePass(&StackMapLivenessID); @@ -235,17 +261,31 @@ // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments. addPass(createAMDGPUOpenCLImageTypeLoweringPass()); - TargetPassConfig::addIRPasses(); -} - -void AMDGPUPassConfig::addCodeGenPrepare() { const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine(); const AMDGPUSubtarget &ST = *TM.getSubtargetImpl(); if (TM.getOptLevel() > CodeGenOpt::None && ST.isPromoteAllocaEnabled()) { - addPass(createAMDGPUPromoteAlloca(&TM)); + addPass(createAMDGPUPromoteAlloca(&TM), true); addPass(createSROAPass()); } - TargetPassConfig::addCodeGenPrepare(); + + addStraightLineScalarOptimizationPasses(); + + TargetPassConfig::addIRPasses(); + + // EarlyCSE is not always strong enough to clean up what LSR produces. For + // example, GVN can combine + // + // %0 = add %a, %b + // %1 = add %b, %a + // + // and + // + // %0 = shl nsw %a, 2 + // %1 = shl %a, 2 + // + // but EarlyCSE can do neither of them. + if (getOptLevel() != CodeGenOpt::None) + addEarlyCSEOrGVNPass(); } bool Index: test/CodeGen/AMDGPU/local-memory-two-objects.ll =================================================================== --- test/CodeGen/AMDGPU/local-memory-two-objects.ll +++ test/CodeGen/AMDGPU/local-memory-two-objects.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s -; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI %s -; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s @local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4 @local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4 @@ -12,15 +12,14 @@ ; GCN: .long 47180 ; GCN-NEXT: .long 32900 -; EG: {{^}}local_memory_two_objects: + +; FUNC-LABEL: {{^}}local_memory_two_objects: ; We would like to check the lds writes are using different ; addresses, but due to variations in the scheduler, we can't do ; this consistently on evergreen GPUs. ; EG: LDS_WRITE ; EG: LDS_WRITE -; GCN: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]] -; GCN-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]] ; GROUP_BARRIER must be the last instruction in a clause ; EG: GROUP_BARRIER @@ -30,10 +29,32 @@ ; constant offsets. ; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]] ; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]] -; SI: v_add_i32_e32 [[SIPTR:v[0-9]+]], vcc, 16, v{{[0-9]+}} -; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]] -; CI-DAG: ds_read_b32 {{v[0-9]+}}, [[ADDRR:v[0-9]+]] offset:16 -; CI-DAG: ds_read_b32 {{v[0-9]+}}, [[ADDRR]] + + +; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0 +; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16 +; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}} + + +; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]] + +; SI-DAG: ds_write_b32 [[ADDRW]], +; SI-DAG: ds_write_b32 [[ADDRW_OFF]], + +; GCN: s_barrier + +; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]] +; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]] + +; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]] +; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]] + +; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]] + +; CI-DAG: ds_read_b32 {{v[0-9]+}}, [[SUB]] offset:12 +; CI-DAG: ds_read_b32 {{v[0-9]+}}, [[SUB]] offset:28 + + define void @local_memory_two_objects(i32 addrspace(1)* %out) { entry: Index: test/CodeGen/AMDGPU/predicates.ll =================================================================== --- test/CodeGen/AMDGPU/predicates.ll +++ test/CodeGen/AMDGPU/predicates.ll @@ -1,27 +1,27 @@ -; RUN: llc < %s -march=r600 -mattr=disable-irstructurizer -mcpu=redwood | FileCheck %s +; RUN: llc -spec-exec-max-speculation-cost=0 -march=r600 -mattr=disable-irstructurizer -mcpu=redwood < %s | FileCheck %s ; These tests make sure the compiler is optimizing branches using predicates ; when it is legal to do so. -; CHECK: {{^}}simple_if: +; CHECK-LABEL: {{^}}simple_if: ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred, ; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel define void @simple_if(i32 addrspace(1)* %out, i32 %in) { entry: - %0 = icmp sgt i32 %in, 0 - br i1 %0, label %IF, label %ENDIF + %cmp0 = icmp sgt i32 %in, 0 + br i1 %cmp0, label %IF, label %ENDIF IF: - %1 = shl i32 %in, 1 + %tmp1 = shl i32 %in, 1 br label %ENDIF ENDIF: - %2 = phi i32 [ %in, %entry ], [ %1, %IF ] - store i32 %2, i32 addrspace(1)* %out + %tmp2 = phi i32 [ %in, %entry ], [ %tmp1, %IF ] + store i32 %tmp2, i32 addrspace(1)* %out ret void } -; CHECK: {{^}}simple_if_else: +; CHECK-LABEL: {{^}}simple_if_else: ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred, ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel @@ -44,7 +44,7 @@ ret void } -; CHECK: {{^}}nested_if: +; CHECK-LABEL: {{^}}nested_if: ; CHECK: ALU_PUSH_BEFORE ; CHECK: JUMP ; CHECK: POP @@ -71,7 +71,7 @@ ret void } -; CHECK: {{^}}nested_if_else: +; CHECK-LABEL: {{^}}nested_if_else: ; CHECK: ALU_PUSH_BEFORE ; CHECK: JUMP ; CHECK: POP Index: test/CodeGen/AMDGPU/setcc-opt.ll =================================================================== --- test/CodeGen/AMDGPU/setcc-opt.ll +++ test/CodeGen/AMDGPU/setcc-opt.ll @@ -1,4 +1,4 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s ; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s @@ -36,38 +36,6 @@ ret void } -; This really folds away to false -; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1: -; GCN: v_cmp_eq_i32_e32 vcc, -; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc -; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}} -; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1, -; GCN-NEXT: buffer_store_byte [[TMP]] -; GCN-NEXT: s_endpgm -define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { - %icmp0 = icmp eq i32 %a, %b - %ext = sext i1 %icmp0 to i32 - %icmp1 = icmp eq i32 %ext, 1 - store i1 %icmp1, i1 addrspace(1)* %out - ret void -} - -; This really folds away to true -; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1: -; GCN: v_cmp_ne_i32_e32 vcc, -; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc -; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}} -; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1, -; GCN-NEXT: buffer_store_byte [[TMP]] -; GCN-NEXT: s_endpgm -define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { - %icmp0 = icmp ne i32 %a, %b - %ext = sext i1 %icmp0 to i32 - %icmp1 = icmp ne i32 %ext, 1 - store i1 %icmp1, i1 addrspace(1)* %out - ret void -} - ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_neg1: ; GCN-NOT: v_cmp ; GCN: v_cmp_eq_i32_e32 vcc, @@ -177,24 +145,6 @@ ret void } -; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k: -; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb -; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc -; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c -; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30 -; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]] -; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}} -; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc -; GCN: buffer_store_byte -; GCN: s_endpgm -define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { - %icmp0 = icmp ne i32 %a, %b - %ext = sext i1 %icmp0 to i32 - %icmp1 = icmp ne i32 %ext, 2 - store i1 %icmp1, i1 addrspace(1)* %out - ret void -} - ; FUNC-LABEL: {{^}}cmp_zext_k_i8max: ; SI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; VI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c @@ -294,3 +244,40 @@ store i1 %icmp1, i1 addrspace(1)* %out ret void } + +; FIXME: These cases should really be able fold to true/false in +; DAGCombiner + +; This really folds away to false +; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}} +; GCN: buffer_store_byte [[K]] +define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { + %icmp0 = icmp eq i32 %a, %b + %ext = sext i1 %icmp0 to i32 + %icmp1 = icmp eq i32 %ext, 1 + store i1 %icmp1, i1 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}} +; GCN: buffer_store_byte [[K]] +define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { + %icmp0 = icmp ne i32 %a, %b + %ext = sext i1 %icmp0 to i32 + %icmp1 = icmp ne i32 %ext, 1 + store i1 %icmp1, i1 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}} +; GCN: buffer_store_byte [[K]] +define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { + %icmp0 = icmp ne i32 %a, %b + %ext = sext i1 %icmp0 to i32 + %icmp1 = icmp ne i32 %ext, 2 + store i1 %icmp1, i1 addrspace(1)* %out + ret void +} Index: test/CodeGen/AMDGPU/uniform-cfg.ll =================================================================== --- test/CodeGen/AMDGPU/uniform-cfg.ll +++ test/CodeGen/AMDGPU/uniform-cfg.ll @@ -224,16 +224,17 @@ ; SI: buffer_store ; SI: {{^}}[[EXIT]]: ; SI: s_endpgm -define void @icmp_users_different_blocks(i32 %cond, i32 addrspace(1)* %out) { +define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) { bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0 - %tmp1 = icmp sgt i32 %cond, 0 - br i1 %tmp1, label %bb2, label %bb9 + %cmp0 = icmp sgt i32 %cond0, 0 + %cmp1 = icmp sgt i32 %cond1, 0 + br i1 %cmp0, label %bb2, label %bb9 bb2: ; preds = %bb - %tmp2 = sext i1 %tmp1 to i32 + %tmp2 = sext i1 %cmp1 to i32 %tmp3 = add i32 %tmp2, %tmp - br i1 %tmp1, label %bb9, label %bb7 + br i1 %cmp1, label %bb9, label %bb7 bb7: ; preds = %bb5 store i32 %tmp3, i32 addrspace(1)* %out Index: test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll =================================================================== --- test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll +++ test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll @@ -76,7 +76,8 @@ ; CHECK: br i1 ; CHECK: bb: -; CHECK: getelementptr i8, i8 addrspace(1)* %t, i32 %lsr.iv +; CHECK: %idxprom = sext i32 %lsr.iv1 to i64 +; CHECK: getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom define void @global_gep_user(i32 %arg0) nounwind { entry: br label %bb11