Index: llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -17,6 +17,7 @@ #include "AMDGPUTargetMachine.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/LegacyDivergenceAnalysis.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/ValueTracking.h" @@ -152,6 +153,10 @@ /// SelectionDAG has an issue where an and asserting the bits are known bool replaceMulWithMul24(BinaryOperator &I) const; + /// Perform same function as equivalently named function in DAGCombiner. Since + /// we expand some divisions here, we need to perform this before obscuring. + bool foldBinOpIntoSelect(BinaryOperator &I) const; + /// Expands 24 bit div or rem. Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den, @@ -525,6 +530,53 @@ return true; } +bool AMDGPUCodeGenPrepare::foldBinOpIntoSelect(BinaryOperator &BO) const { + // Don't do this unless the old select is going away. We want to eliminate the + // binary operator, not replace a binop with a select. + int SelOpNo = 0; + SelectInst *Sel = dyn_cast(BO.getOperand(0)); + if (!Sel || !Sel->hasOneUse()) { + SelOpNo = 1; + Sel = dyn_cast(BO.getOperand(1)); + } + + if (!Sel || !Sel->hasOneUse()) + return false; + + Constant *CT = dyn_cast(Sel->getTrueValue()); + Constant *CF = dyn_cast(Sel->getFalseValue()); + Constant *CBO = dyn_cast(BO.getOperand(SelOpNo ^ 1)); + if (!CBO || !CT || !CF) + return false; + + // TODO: Handle special 0/-1 cases DAG combine does, although we only really + // need to handle divisions here. + Constant *FoldedT = SelOpNo ? + ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) : + ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL); + if (isa(FoldedT)) + return false; + + Constant *FoldedF = SelOpNo ? + ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) : + ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL); + if (isa(FoldedF)) + return false; + + IRBuilder<> Builder(&BO); + Builder.SetCurrentDebugLocation(BO.getDebugLoc()); + if (const FPMathOperator *FPOp = dyn_cast(&BO)) + Builder.setFastMathFlags(FPOp->getFastMathFlags()); + + Value *NewSelect = Builder.CreateSelect(Sel->getCondition(), + FoldedT, FoldedF); + NewSelect->takeName(&BO); + BO.replaceAllUsesWith(NewSelect); + BO.eraseFromParent(); + Sel->eraseFromParent(); + return true; +} + static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv, bool HasDenormals) { const ConstantFP *CNum = dyn_cast(Num); if (!CNum) @@ -891,6 +943,9 @@ if (UseMul24Intrin && replaceMulWithMul24(I)) return true; + if (foldBinOpIntoSelect(I)) + return true; + bool Changed = false; Instruction::BinaryOps Opc = I.getOpcode(); Type *Ty = I.getType(); Index: llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll @@ -0,0 +1,380 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-codegenprepare %s | FileCheck -check-prefix=IR %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck -check-prefix=GCN %s + +define i32 @select_sdiv_lhs_const_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_lhs_const_i32( +; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i32 200000, i32 125000 +; IR-NEXT: ret i32 [[OP]] +; +; GCN-LABEL: select_sdiv_lhs_const_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x1e848 +; GCN-NEXT: v_mov_b32_e32 v2, 0x30d40 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 5, i32 8 + %op = sdiv i32 1000000, %select + ret i32 %op +} + +define i32 @select_sdiv_rhs_const_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_rhs_const_i32( +; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i32 1000, i32 10000 +; IR-NEXT: ret i32 [[OP]] +; +; GCN-LABEL: select_sdiv_rhs_const_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x2710 +; GCN-NEXT: v_mov_b32_e32 v2, 0x3e8 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 42000, i32 420000 + %op = sdiv i32 %select, 42 + ret i32 %op +} + +define <2 x i32> @select_sdiv_lhs_const_v2i32(i1 %cond) { +; IR-LABEL: @select_sdiv_lhs_const_v2i32( +; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], <2 x i32> , <2 x i32> +; IR-NEXT: ret <2 x i32> [[OP]] +; +; GCN-LABEL: select_sdiv_lhs_const_v2i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x22b +; GCN-NEXT: v_mov_b32_e32 v2, 0x29a +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: v_mov_b32_e32 v1, 0x594 +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, <2 x i32> , <2 x i32> + %op = sdiv <2 x i32> , %select + ret <2 x i32> %op +} + +define <2 x i32> @select_sdiv_rhs_const_v2i32(i1 %cond) { +; IR-LABEL: @select_sdiv_rhs_const_v2i32( +; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], <2 x i32> , <2 x i32> +; IR-NEXT: ret <2 x i32> [[OP]] +; +; GCN-LABEL: select_sdiv_rhs_const_v2i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x3661c +; GCN-NEXT: v_mov_b32_e32 v2, 0x307dd +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: v_mov_b32_e32 v1, 0x23b02a +; GCN-NEXT: v_mov_b32_e32 v2, 0x13e3a0c +; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, <2 x i32> , <2 x i32> + %op = sdiv <2 x i32> %select, + ret <2 x i32> %op +} + +@gv = external addrspace(1) global i32 + +define i32 @select_sdiv_lhs_opaque_const0_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_lhs_opaque_const0_i32( +; IR-NEXT: [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 5 +; IR-NEXT: [[TMP1:%.*]] = ashr i32 [[SELECT]], 31 +; IR-NEXT: [[TMP2:%.*]] = xor i32 0, [[TMP1]] +; IR-NEXT: [[TMP3:%.*]] = add i32 [[SELECT]], [[TMP1]] +; IR-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1]] +; IR-NEXT: [[TMP5:%.*]] = uitofp i32 [[TMP4]] to float +; IR-NEXT: [[TMP6:%.*]] = fdiv fast float 1.000000e+00, [[TMP5]] +; IR-NEXT: [[TMP7:%.*]] = fmul fast float [[TMP6]], 0x41F0000000000000 +; IR-NEXT: [[TMP8:%.*]] = fptoui float [[TMP7]] to i32 +; IR-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +; IR-NEXT: [[TMP10:%.*]] = zext i32 [[TMP4]] to i64 +; IR-NEXT: [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]] +; IR-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32 +; IR-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP11]], 32 +; IR-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32 +; IR-NEXT: [[TMP15:%.*]] = sub i32 0, [[TMP12]] +; IR-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP14]], 0 +; IR-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 [[TMP12]] +; IR-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64 +; IR-NEXT: [[TMP19:%.*]] = zext i32 [[TMP8]] to i64 +; IR-NEXT: [[TMP20:%.*]] = mul i64 [[TMP18]], [[TMP19]] +; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-NEXT: [[TMP22:%.*]] = lshr i64 [[TMP20]], 32 +; IR-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32 +; IR-NEXT: [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP23]] +; IR-NEXT: [[TMP25:%.*]] = sub i32 [[TMP8]], [[TMP23]] +; IR-NEXT: [[TMP26:%.*]] = select i1 [[TMP16]], i32 [[TMP24]], i32 [[TMP25]] +; IR-NEXT: [[TMP27:%.*]] = zext i32 [[TMP26]] to i64 +; IR-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 1000000 +; IR-NEXT: [[TMP29:%.*]] = trunc i64 [[TMP28]] to i32 +; IR-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP28]], 32 +; IR-NEXT: [[TMP31:%.*]] = trunc i64 [[TMP30]] to i32 +; IR-NEXT: [[TMP32:%.*]] = mul i32 [[TMP31]], [[TMP4]] +; IR-NEXT: [[TMP33:%.*]] = sub i32 1000000, [[TMP32]] +; IR-NEXT: [[TMP34:%.*]] = icmp uge i32 [[TMP33]], [[TMP4]] +; IR-NEXT: [[TMP35:%.*]] = select i1 [[TMP34]], i32 -1, i32 0 +; IR-NEXT: [[TMP36:%.*]] = icmp uge i32 1000000, [[TMP32]] +; IR-NEXT: [[TMP37:%.*]] = select i1 [[TMP36]], i32 -1, i32 0 +; IR-NEXT: [[TMP38:%.*]] = and i32 [[TMP35]], [[TMP37]] +; IR-NEXT: [[TMP39:%.*]] = icmp eq i32 [[TMP38]], 0 +; IR-NEXT: [[TMP40:%.*]] = add i32 [[TMP31]], 1 +; IR-NEXT: [[TMP41:%.*]] = sub i32 [[TMP31]], 1 +; IR-NEXT: [[TMP42:%.*]] = select i1 [[TMP39]], i32 [[TMP31]], i32 [[TMP40]] +; IR-NEXT: [[TMP43:%.*]] = select i1 [[TMP36]], i32 [[TMP42]], i32 [[TMP41]] +; IR-NEXT: [[TMP44:%.*]] = xor i32 [[TMP43]], [[TMP2]] +; IR-NEXT: [[TMP45:%.*]] = sub i32 [[TMP44]], [[TMP2]] +; IR-NEXT: ret i32 [[TMP45]] +; +; GCN-LABEL: select_sdiv_lhs_opaque_const0_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_getpc_b64 s[4:5] +; GCN-NEXT: s_add_u32 s4, s4, gv@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s5, s5, gv@gotpcrel32@hi+4 +; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_mov_b32 s6, 0xf4240 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: v_cndmask_b32_e32 v0, 5, v1, vcc +; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: v_cvt_f32_u32_e32 v2, v0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GCN-NEXT: v_mul_f32_e32 v2, 0x4f800000, v2 +; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GCN-NEXT: v_mul_lo_u32 v3, v2, v0 +; GCN-NEXT: v_mul_hi_u32 v4, v2, v0 +; GCN-NEXT: v_sub_u32_e32 v5, vcc, 0, v3 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GCN-NEXT: v_mul_hi_u32 v3, v3, v2 +; GCN-NEXT: v_add_u32_e64 v4, s[4:5], v2, v3 +; GCN-NEXT: v_sub_u32_e64 v2, s[4:5], v2, v3 +; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GCN-NEXT: v_mul_hi_u32 v2, v2, s6 +; GCN-NEXT: s_mov_b32 s4, 0xf4241 +; GCN-NEXT: v_mul_lo_u32 v3, v2, v0 +; GCN-NEXT: v_add_u32_e32 v4, vcc, 1, v2 +; GCN-NEXT: v_add_u32_e32 v5, vcc, -1, v2 +; GCN-NEXT: v_sub_u32_e32 v6, vcc, s6, v3 +; GCN-NEXT: v_cmp_gt_u32_e32 vcc, s4, v3 +; GCN-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v0 +; GCN-NEXT: s_and_b64 s[4:5], s[4:5], vcc +; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5] +; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 5 + %op = sdiv i32 1000000, %select + ret i32 %op +} + +define i32 @select_sdiv_lhs_opaque_const1_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_lhs_opaque_const1_i32( +; IR-NEXT: [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 5, i32 ptrtoint (i32 addrspace(1)* @gv to i32) +; IR-NEXT: [[TMP1:%.*]] = ashr i32 [[SELECT]], 31 +; IR-NEXT: [[TMP2:%.*]] = xor i32 0, [[TMP1]] +; IR-NEXT: [[TMP3:%.*]] = add i32 [[SELECT]], [[TMP1]] +; IR-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1]] +; IR-NEXT: [[TMP5:%.*]] = uitofp i32 [[TMP4]] to float +; IR-NEXT: [[TMP6:%.*]] = fdiv fast float 1.000000e+00, [[TMP5]] +; IR-NEXT: [[TMP7:%.*]] = fmul fast float [[TMP6]], 0x41F0000000000000 +; IR-NEXT: [[TMP8:%.*]] = fptoui float [[TMP7]] to i32 +; IR-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +; IR-NEXT: [[TMP10:%.*]] = zext i32 [[TMP4]] to i64 +; IR-NEXT: [[TMP11:%.*]] = mul i64 [[TMP9]], [[TMP10]] +; IR-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP11]] to i32 +; IR-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP11]], 32 +; IR-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32 +; IR-NEXT: [[TMP15:%.*]] = sub i32 0, [[TMP12]] +; IR-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP14]], 0 +; IR-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP15]], i32 [[TMP12]] +; IR-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64 +; IR-NEXT: [[TMP19:%.*]] = zext i32 [[TMP8]] to i64 +; IR-NEXT: [[TMP20:%.*]] = mul i64 [[TMP18]], [[TMP19]] +; IR-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP20]] to i32 +; IR-NEXT: [[TMP22:%.*]] = lshr i64 [[TMP20]], 32 +; IR-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i32 +; IR-NEXT: [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP23]] +; IR-NEXT: [[TMP25:%.*]] = sub i32 [[TMP8]], [[TMP23]] +; IR-NEXT: [[TMP26:%.*]] = select i1 [[TMP16]], i32 [[TMP24]], i32 [[TMP25]] +; IR-NEXT: [[TMP27:%.*]] = zext i32 [[TMP26]] to i64 +; IR-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 1000000 +; IR-NEXT: [[TMP29:%.*]] = trunc i64 [[TMP28]] to i32 +; IR-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP28]], 32 +; IR-NEXT: [[TMP31:%.*]] = trunc i64 [[TMP30]] to i32 +; IR-NEXT: [[TMP32:%.*]] = mul i32 [[TMP31]], [[TMP4]] +; IR-NEXT: [[TMP33:%.*]] = sub i32 1000000, [[TMP32]] +; IR-NEXT: [[TMP34:%.*]] = icmp uge i32 [[TMP33]], [[TMP4]] +; IR-NEXT: [[TMP35:%.*]] = select i1 [[TMP34]], i32 -1, i32 0 +; IR-NEXT: [[TMP36:%.*]] = icmp uge i32 1000000, [[TMP32]] +; IR-NEXT: [[TMP37:%.*]] = select i1 [[TMP36]], i32 -1, i32 0 +; IR-NEXT: [[TMP38:%.*]] = and i32 [[TMP35]], [[TMP37]] +; IR-NEXT: [[TMP39:%.*]] = icmp eq i32 [[TMP38]], 0 +; IR-NEXT: [[TMP40:%.*]] = add i32 [[TMP31]], 1 +; IR-NEXT: [[TMP41:%.*]] = sub i32 [[TMP31]], 1 +; IR-NEXT: [[TMP42:%.*]] = select i1 [[TMP39]], i32 [[TMP31]], i32 [[TMP40]] +; IR-NEXT: [[TMP43:%.*]] = select i1 [[TMP36]], i32 [[TMP42]], i32 [[TMP41]] +; IR-NEXT: [[TMP44:%.*]] = xor i32 [[TMP43]], [[TMP2]] +; IR-NEXT: [[TMP45:%.*]] = sub i32 [[TMP44]], [[TMP2]] +; IR-NEXT: ret i32 [[TMP45]] +; +; GCN-LABEL: select_sdiv_lhs_opaque_const1_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_getpc_b64 s[4:5] +; GCN-NEXT: s_add_u32 s4, s4, gv@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s5, s5, gv@gotpcrel32@hi+4 +; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_mov_b32 s6, 0xf4240 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: v_cndmask_b32_e64 v0, v1, 5, vcc +; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: v_cvt_f32_u32_e32 v2, v0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GCN-NEXT: v_mul_f32_e32 v2, 0x4f800000, v2 +; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GCN-NEXT: v_mul_lo_u32 v3, v2, v0 +; GCN-NEXT: v_mul_hi_u32 v4, v2, v0 +; GCN-NEXT: v_sub_u32_e32 v5, vcc, 0, v3 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc +; GCN-NEXT: v_mul_hi_u32 v3, v3, v2 +; GCN-NEXT: v_add_u32_e64 v4, s[4:5], v2, v3 +; GCN-NEXT: v_sub_u32_e64 v2, s[4:5], v2, v3 +; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GCN-NEXT: v_mul_hi_u32 v2, v2, s6 +; GCN-NEXT: s_mov_b32 s4, 0xf4241 +; GCN-NEXT: v_mul_lo_u32 v3, v2, v0 +; GCN-NEXT: v_add_u32_e32 v4, vcc, 1, v2 +; GCN-NEXT: v_add_u32_e32 v5, vcc, -1, v2 +; GCN-NEXT: v_sub_u32_e32 v6, vcc, s6, v3 +; GCN-NEXT: v_cmp_gt_u32_e32 vcc, s4, v3 +; GCN-NEXT: v_cmp_ge_u32_e64 s[4:5], v6, v0 +; GCN-NEXT: s_and_b64 s[4:5], s[4:5], vcc +; GCN-NEXT: v_cndmask_b32_e64 v0, v2, v4, s[4:5] +; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc +; GCN-NEXT: v_xor_b32_e32 v0, v0, v1 +; GCN-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 5, i32 ptrtoint (i32 addrspace(1)* @gv to i32) + %op = sdiv i32 1000000, %select + ret i32 %op +} + +define i32 @select_sdiv_rhs_opaque_const0_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_rhs_opaque_const0_i32( +; IR-NEXT: [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 234234 +; IR-NEXT: [[OP:%.*]] = sdiv i32 [[SELECT]], 42 +; IR-NEXT: ret i32 [[OP]] +; +; GCN-LABEL: select_sdiv_rhs_opaque_const0_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_getpc_b64 s[4:5] +; GCN-NEXT: s_add_u32 s4, s4, gv@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s5, s5, gv@gotpcrel32@hi+4 +; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x392fa +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_mov_b32 s5, 0x30c30c31 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v2, s4 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: v_mul_hi_i32 v0, v0, s5 +; GCN-NEXT: v_lshrrev_b32_e32 v1, 31, v0 +; GCN-NEXT: v_ashrrev_i32_e32 v0, 3, v0 +; GCN-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 ptrtoint (i32 addrspace(1)* @gv to i32), i32 234234 + %op = sdiv i32 %select, 42 + ret i32 %op +} + +define i32 @select_sdiv_rhs_opaque_const1_i32(i1 %cond) { +; IR-LABEL: @select_sdiv_rhs_opaque_const1_i32( +; IR-NEXT: [[SELECT:%.*]] = select i1 [[COND:%.*]], i32 42000, i32 ptrtoint (i32 addrspace(1)* @gv to i32) +; IR-NEXT: [[OP:%.*]] = sdiv i32 [[SELECT]], 42 +; IR-NEXT: ret i32 [[OP]] +; +; GCN-LABEL: select_sdiv_rhs_opaque_const1_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_getpc_b64 s[4:5] +; GCN-NEXT: s_add_u32 s4, s4, gv@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s5, s5, gv@gotpcrel32@hi+4 +; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0xa410 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_mov_b32 s5, 0x30c30c31 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v2, s4 +; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; GCN-NEXT: v_mul_hi_i32 v0, v0, s5 +; GCN-NEXT: v_lshrrev_b32_e32 v1, 31, v0 +; GCN-NEXT: v_ashrrev_i32_e32 v0, 3, v0 +; GCN-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 42000, i32 ptrtoint (i32 addrspace(1)* @gv to i32) + %op = sdiv i32 %select, 42 + ret i32 %op +} + +define i32 @select_add_lhs_const_i32(i1 %cond) { +; IR-LABEL: @select_add_lhs_const_i32( +; IR-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i32 1000005, i32 1000008 +; IR-NEXT: ret i32 [[OP]] +; +; GCN-LABEL: select_add_lhs_const_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0xf4248 +; GCN-NEXT: v_mov_b32_e32 v2, 0xf4245 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, i32 5, i32 8 + %op = add i32 1000000, %select + ret i32 %op +} + +define float @select_fadd_lhs_const_i32_fmf(i1 %cond) { +; IR-LABEL: @select_fadd_lhs_const_i32_fmf( +; IR-NEXT: [[OP:%.*]] = select nnan nsz i1 [[COND:%.*]], float 3.000000e+00, float 5.000000e+00 +; IR-NEXT: ret float [[OP]] + +; GCN-LABEL: select_fadd_lhs_const_i32_fmf: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_mov_b32_e32 v1, 0x40a00000 +; GCN-NEXT: v_mov_b32_e32 v2, 0x40400000 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] + %select = select i1 %cond, float 2.0, float 4.0 + %op = fadd nnan nsz float 1.0, %select + ret float %op +} Index: llvm/test/CodeGen/AMDGPU/dagcombine-select.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/dagcombine-select.ll +++ llvm/test/CodeGen/AMDGPU/dagcombine-select.ll @@ -155,18 +155,27 @@ ret void } -; GCN-LABEL: {{^}}sdiv_constant_sel_constants: +; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i64: ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0, -define amdgpu_kernel void @sdiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) { +define amdgpu_kernel void @sdiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 121, i64 23 %bo = sdiv i64 120, %sel store i64 %bo, i64 addrspace(1)* %p, align 8 ret void } -; GCN-LABEL: {{^}}udiv_constant_sel_constants: +; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i32: +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 8, 26, +define amdgpu_kernel void @sdiv_constant_sel_constants_i32(i32 addrspace(1)* %p, i1 %cond) { + %sel = select i1 %cond, i32 7, i32 23 + %bo = sdiv i32 184, %sel + store i32 %bo, i32 addrspace(1)* %p, align 8 + ret void +} + +; GCN-LABEL: {{^}}udiv_constant_sel_constants_i64: ; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0, -define amdgpu_kernel void @udiv_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) { +define amdgpu_kernel void @udiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) { %sel = select i1 %cond, i64 -4, i64 23 %bo = udiv i64 120, %sel store i64 %bo, i64 addrspace(1)* %p, align 8 Index: llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll +++ llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll @@ -439,8 +439,8 @@ ; GCN-LABEL: {{^}}add_select_fneg_negk_negk_f32: ; GCN: buffer_load_dword [[X:v[0-9]+]] -; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s -; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[X]], [[SELECT]] +; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], 1.0, 2.0, s +; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]] define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 { %x = load volatile float, float addrspace(1)* undef %cmp = icmp eq i32 %c, 0