Index: llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -404,6 +404,11 @@ } } +static Value *buildMul(IRBuilder<> &B, Value *LHS, Value *RHS) { + const ConstantInt *CI = dyn_cast(LHS); + return (CI && CI->isOne()) ? RHS : B.CreateMul(LHS, RHS); +} + void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I, AtomicRMWInst::BinOp Op, unsigned ValIdx, @@ -523,7 +528,7 @@ // old value times the number of active lanes. Value *const Ctpop = B.CreateIntCast( B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); - NewV = B.CreateMul(V, Ctpop); + NewV = buildMul(B, V, Ctpop); break; } @@ -543,7 +548,7 @@ // old value times the parity of the number of active lanes. Value *const Ctpop = B.CreateIntCast( B.CreateUnaryIntrinsic(Intrinsic::ctpop, Ballot), Ty, false); - NewV = B.CreateMul(V, B.CreateAnd(Ctpop, 1)); + NewV = buildMul(B, V, B.CreateAnd(Ctpop, 1)); break; } } @@ -622,7 +627,7 @@ llvm_unreachable("Unhandled atomic op"); case AtomicRMWInst::Add: case AtomicRMWInst::Sub: - LaneOffset = B.CreateMul(V, Mbcnt); + LaneOffset = buildMul(B, V, Mbcnt); break; case AtomicRMWInst::And: case AtomicRMWInst::Or: @@ -633,7 +638,7 @@ LaneOffset = B.CreateSelect(Cond, Identity, V); break; case AtomicRMWInst::Xor: - LaneOffset = B.CreateMul(V, B.CreateAnd(Mbcnt, 1)); + LaneOffset = buildMul(B, V, B.CreateAnd(Mbcnt, 1)); break; } } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/atomic_optimizations_mul_one.ll @@ -0,0 +1,176 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: opt -S -mtriple=amdgcn-- -amdgpu-atomic-optimizer -verify-machineinstrs %s | FileCheck -check-prefix=IR %s +; RUN: llc -global-isel -mtriple=amdgcn-- -amdgpu-atomic-optimizations -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; IR-NOT: mul i32 1, %[[REG:[0-9]+]] +; IR-NOT: mul i32 %[[REG:[0-9]+]], 1 + +declare i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32, <4 x i32>, i32, i32, i32, i32 immarg) +declare i32 @llvm.amdgcn.struct.buffer.atomic.sub.i32(i32, <4 x i32>, i32, i32, i32, i32 immarg) +declare i32 @llvm.amdgcn.struct.buffer.atomic.xor.i32(i32, <4 x i32>, i32, i32, i32, i32 immarg) +declare void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32 immarg) + +define amdgpu_cs void @atomic_add(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_add: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[4:5], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s4, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s5, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GCN-NEXT: s_cbranch_execz BB0_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5] +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: buffer_atomic_add v0, v1, s[0:3], 0 idxen glc +; GCN-NEXT: BB0_2: +; GCN-NEXT: s_endpgm +.entry: + call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + ret void +} + +define amdgpu_cs void @atomic_add_and_format(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_add_and_format: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[6:7], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: ; implicit-def: $vgpr1 +; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GCN-NEXT: s_cbranch_execz BB1_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7] +; GCN-NEXT: v_mov_b32_e32 v1, s6 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: buffer_atomic_add v1, v2, s[0:3], 0 idxen glc +; GCN-NEXT: BB1_2: +; GCN-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_readfirstlane_b32 s4, v1 +; GCN-NEXT: v_add_i32_e32 v4, vcc, s4, v0 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen +; GCN-NEXT: s_endpgm +.entry: + %a = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> %arg, <4 x i32> %arg, i32 %a, i32 0, i32 0, i32 0) + ret void +} + +define amdgpu_cs void @atomic_sub(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_sub: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[4:5], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s4, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s5, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GCN-NEXT: s_cbranch_execz BB2_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5] +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: buffer_atomic_sub v0, v1, s[0:3], 0 idxen glc +; GCN-NEXT: BB2_2: +; GCN-NEXT: s_endpgm +.entry: + call i32 @llvm.amdgcn.struct.buffer.atomic.sub.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + ret void +} + +define amdgpu_cs void @atomic_sub_and_format(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_sub_and_format: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[6:7], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: ; implicit-def: $vgpr1 +; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GCN-NEXT: s_cbranch_execz BB3_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7] +; GCN-NEXT: v_mov_b32_e32 v1, s6 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: buffer_atomic_sub v1, v2, s[0:3], 0 idxen glc +; GCN-NEXT: BB3_2: +; GCN-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_readfirstlane_b32 s4, v1 +; GCN-NEXT: v_sub_i32_e32 v4, vcc, s4, v0 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen +; GCN-NEXT: s_endpgm +.entry: + %a = call i32 @llvm.amdgcn.struct.buffer.atomic.sub.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> %arg, <4 x i32> %arg, i32 %a, i32 0, i32 0, i32 0) + ret void +} + +define amdgpu_cs void @atomic_xor(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_xor: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[4:5], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s4, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s5, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: s_and_saveexec_b64 s[6:7], vcc +; GCN-NEXT: s_cbranch_execz BB4_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s4, s[4:5] +; GCN-NEXT: s_and_b32 s4, s4, 1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: buffer_atomic_xor v0, v1, s[0:3], 0 idxen glc +; GCN-NEXT: BB4_2: +; GCN-NEXT: s_endpgm +.entry: + call i32 @llvm.amdgcn.struct.buffer.atomic.xor.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + ret void +} + +define amdgpu_cs void @atomic_xor_and_format(<4 x i32> inreg %arg) { +; GCN-LABEL: atomic_xor_and_format: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: s_mov_b64 s[6:7], exec +; GCN-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 +; GCN-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s7, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: ; implicit-def: $vgpr1 +; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GCN-NEXT: s_cbranch_execz BB5_2 +; GCN-NEXT: ; %bb.1: +; GCN-NEXT: s_bcnt1_i32_b64 s6, s[6:7] +; GCN-NEXT: s_and_b32 s6, s6, 1 +; GCN-NEXT: v_mov_b32_e32 v1, s6 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: buffer_atomic_xor v1, v2, s[0:3], 0 idxen glc +; GCN-NEXT: BB5_2: +; GCN-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_readfirstlane_b32 s4, v1 +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_xor_b32_e32 v4, s4, v0 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v3, s3 +; GCN-NEXT: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen +; GCN-NEXT: s_endpgm +.entry: + %a = call i32 @llvm.amdgcn.struct.buffer.atomic.xor.i32(i32 1, <4 x i32> %arg, i32 0, i32 0, i32 0, i32 0) + call void @llvm.amdgcn.struct.buffer.store.format.v4i32(<4 x i32> %arg, <4 x i32> %arg, i32 %a, i32 0, i32 0, i32 0) + ret void +}