diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2453,87 +2453,119 @@ SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const { - // Unsigned - // cul2f(ulong u) - //{ - // uint lz = clz(u); - // uint e = (u != 0) ? 127U + 63U - lz : 0; - // u = (u << lz) & 0x7fffffffffffffffUL; - // ulong t = u & 0xffffffffffUL; - // uint v = (e << 23) | (uint)(u >> 40); - // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); - // return as_float(v + r); - //} - // Signed - // cl2f(long l) - //{ - // long s = l >> 63; - // float r = cul2f((l + s) ^ s); - // return s ? -r : r; - //} + // The regular method coverting a 64-bit integer to float roughly consists of + // 2 steps: normalization and rounding. In fact, after normalization, the + // conversion from a 64-bit integer to a float is essentially the same as the + // one from a 32-bit integer. The only difference is that it has more + // trailing bits to be rounded. To leverage the native 32-bit conversion, a + // 64-bit integer could be preprocessed and fit into a 32-bit integer then + // converted into the correct float number. The basic steps for the unsigned + // conversion are illustrated in the following pseudo code: + // + // f32 uitofp(i64 u) { + // i32 hi, lo = split(u); + // // Only count the leading zeros in hi as we have native support of the + // // conversion from i32 to f32. If hi is all 0s, the conversion is + // // reduced to a 32-bit one automatically. + // i32 shamt = clz(hi); // Return 32 if hi is all 0s. + // u <<= shamt; + // hi, lo = split(u); + // hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo. + // // convert it as a 32-bit integer and scale the result back. + // return uitofp(hi) * 2^(32 - shamt); + // } + // + // The signed one follows the same principle but uses 'ffbh_i32' to count its + // sign bits instead. If 'ffbh_i32' is not available, its absolute value is + // converted instead followed by negation based its sign bit. SDLoc SL(Op); SDValue Src = Op.getOperand(0); - SDValue L = Src; - - SDValue S; - if (Signed) { - const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); - S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); - - SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); - L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); - } - - EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), - *DAG.getContext(), MVT::f32); - + EVT SetCCVT = + getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); - SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); - SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); - LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); - - SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); - SDValue E = DAG.getSelect(SL, MVT::i32, - DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), - DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), - ZeroI32); - - SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, - DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), - DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); - - SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, - DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); - - SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, - U, DAG.getConstant(40, SL, MVT::i64)); - SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, - DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), - DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); - - SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); - SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); - SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); - - SDValue One = DAG.getConstant(1, SL, MVT::i32); - - SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); - - SDValue R = DAG.getSelect(SL, MVT::i32, - RCmp, - One, - DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); - R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); - R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); - - if (!Signed) - return R; - - SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); - return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); + SDValue Lo, Hi; + std::tie(Lo, Hi) = split64BitValue(Src, DAG); + SDValue Sign; + SDValue ShAmt; + if (Signed && Subtarget->isGCN()) { + // We also need to consider the sign bit in Lo if Hi has just sign bits, + // i.e. Hi is 0 or -1. However, that only needs to take the MSB into + // account. + SDValue HasSameSign = + DAG.getSetCC(SL, SetCCVT, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi), + ZeroI32, ISD::SETGE); + SDValue MaxShAmt = DAG.getSelect(SL, MVT::i32, HasSameSign, + DAG.getConstant(33, SL, MVT::i32), + DAG.getConstant(32, SL, MVT::i32)); + // Count the leading sign bits. + ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi); + ShAmt = DAG.getSelect(SL, MVT::i32, + DAG.getSetCC(SL, SetCCVT, ShAmt, + DAG.getAllOnesConstant(SL, MVT::i32), + ISD::SETNE), + ShAmt, MaxShAmt); + // The shift amount for signed integers is [1, 33]. + // Different from unsigned conversion, the shift should be one bit less to + // preserve the sign bit. + ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt, + DAG.getConstant(1, SL, MVT::i32)); + } else { + if (Signed) { + // Without 'ffbh_i32', only leading zeros could be counted. Take the + // absolute value first. + Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src, + DAG.getConstant(63, SL, MVT::i64)); + SDValue Abs = + DAG.getNode(ISD::XOR, SL, MVT::i64, + DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign); + std::tie(Lo, Hi) = split64BitValue(Abs, DAG); + } + // Count the leading zeros. + ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi); + // The shift amount for signed integers is [0, 32]. + } + // Normalize the given 64-bit integer. + SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt); + // Split it again. + std::tie(Lo, Hi) = split64BitValue(Norm, DAG); + // Calculate the adjust bit for rounding. + SDValue Adjust = DAG.getSelect( + SL, MVT::i32, DAG.getSetCC(SL, SetCCVT, Lo, ZeroI32, ISD::SETNE), + DAG.getConstant(1, SL, MVT::i32), ZeroI32); + // Get the 32-bit normalized integer. + Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust); + // Convert the normalized 32-bit integer into f32. + unsigned Opc = + (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; + SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm); + + // Finally, need to scale back the converted floating number as the original + // 64-bit integer is converted as a 32-bit one. + ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32), + ShAmt); + // On GCN, use LDEXP directly. + if (Subtarget->isGCN()) + return DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f32, FVal, ShAmt); + + // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent + // part directly to emulate the multiplication of 2^ShAmt. That 8-bit + // exponent is enough to avoid overflowing into the sign bit. + SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt, + DAG.getConstant(23, SL, MVT::i32)); + SDValue IVal = + DAG.getNode(ISD::ADD, SL, MVT::i32, + DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp); + if (Signed) { + // Set the sign bit. + Sign = DAG.getNode(ISD::SHL, SL, MVT::i32, + DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign), + DAG.getConstant(31, SL, MVT::i32)); + IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign); + } + return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal); } SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -812,10 +812,9 @@ // TODO: Split s1->s64 during regbankselect for VALU. auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) - .legalFor({{S32, S32}, {S64, S32}, {S16, S32}}) - .lowerFor({{S32, S64}}) - .lowerIf(typeIs(1, S1)) - .customFor({{S64, S64}}); + .legalFor({{S32, S32}, {S64, S32}, {S16, S32}}) + .lowerIf(typeIs(1, S1)) + .customFor({{S32, S64}, {S64, S64}}); if (ST.has16BitInsts()) IToFP.legalFor({{S16, S16}}); IToFP.clampScalar(1, S32, S64) @@ -2063,24 +2062,60 @@ const LLT S64 = LLT::scalar(64); const LLT S32 = LLT::scalar(32); + const LLT S1 = LLT::scalar(1); - assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64); + assert(MRI.getType(Src) == S64); auto Unmerge = B.buildUnmerge({S32, S32}, Src); + auto ThirtyTwo = B.buildConstant(S32, 32); - auto CvtHi = Signed ? - B.buildSITOFP(S64, Unmerge.getReg(1)) : - B.buildUITOFP(S64, Unmerge.getReg(1)); + if (MRI.getType(Dst) == S64) { + auto CvtHi = Signed ? B.buildSITOFP(S64, Unmerge.getReg(1)) + : B.buildUITOFP(S64, Unmerge.getReg(1)); - auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0)); + auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0)); + auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false) + .addUse(CvtHi.getReg(0)) + .addUse(ThirtyTwo.getReg(0)); - auto ThirtyTwo = B.buildConstant(S32, 32); - auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false) - .addUse(CvtHi.getReg(0)) - .addUse(ThirtyTwo.getReg(0)); + // TODO: Should this propagate fast-math-flags? + B.buildFAdd(Dst, LdExp, CvtLo); + MI.eraseFromParent(); + return true; + } - // TODO: Should this propagate fast-math-flags? - B.buildFAdd(Dst, LdExp, CvtLo); + assert(MRI.getType(Dst) == S32); + + auto Zero = B.buildConstant(S32, 0); + auto One = B.buildConstant(S32, 1); + auto AllOnes = B.buildConstant(S32, -1); + + MachineInstrBuilder ShAmt; + if (Signed) { + auto ThirtyThree = B.buildConstant(S32, 33); + auto X = B.buildXor(S32, Unmerge.getReg(0), Unmerge.getReg(1)); + auto HasSameSign = B.buildICmp(CmpInst::ICMP_SGE, S1, X, Zero); + auto MaxShAmt = B.buildSelect(S32, HasSameSign, ThirtyThree, ThirtyTwo); + auto LS = B.buildIntrinsic(Intrinsic::amdgcn_sffbh, {S32}, + /*HasSideEffects=*/false) + .addUse(Unmerge.getReg(1)); + auto NotAllSameBits = B.buildICmp(CmpInst::ICMP_NE, S1, LS, AllOnes); + auto LS2 = B.buildSelect(S32, NotAllSameBits, LS, MaxShAmt); + ShAmt = B.buildSub(S32, LS2, One); + } else + ShAmt = B.buildCTLZ(S32, Unmerge.getReg(1)); + auto Norm = B.buildShl(S64, Src, ShAmt); + auto Unmerge2 = B.buildUnmerge({S32, S32}, Norm); + auto NotAllZeros = + B.buildICmp(CmpInst::ICMP_NE, S1, Unmerge2.getReg(0), Zero); + auto Adjust = B.buildSelect(S32, NotAllZeros, One, Zero); + auto Norm2 = B.buildOr(S32, Unmerge2.getReg(1), Adjust); + auto FVal = Signed ? B.buildSITOFP(S32, Norm2) : B.buildUITOFP(S32, Norm2); + auto Scale = B.buildSub(S32, ThirtyTwo, ShAmt); + B.buildIntrinsic(Intrinsic::amdgcn_ldexp, ArrayRef{Dst}, + /*HasSideEffects=*/false) + .addUse(FVal.getReg(0)) + .addUse(Scale.getReg(0)); MI.eraseFromParent(); return true; } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll @@ -1082,65 +1082,37 @@ ; SI-LABEL: v_test_sitofp_i64_byte_to_f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_movk_i32 s6, 0xff -; SI-NEXT: v_and_b32_e32 v0, s6, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0, v0 -; SI-NEXT: v_ffbh_u32_e32 v2, v0 -; SI-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v2 -; SI-NEXT: v_ffbh_u32_e32 v3, v1 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; SI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc -; SI-NEXT: v_mov_b32_e32 v3, 0xbe -; SI-NEXT: v_sub_i32_e32 v4, vcc, v3, v2 -; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 -; SI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; SI-NEXT: v_and_b32_e32 v1, 0x7fffffff, v3 -; SI-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc -; SI-NEXT: s_mov_b32 s4, 0 -; SI-NEXT: v_and_b32_e32 v3, s6, v3 -; SI-NEXT: s_movk_i32 s5, 0x80 -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 23, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3] -; SI-NEXT: v_and_b32_e32 v1, 1, v0 -; SI-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] -; SI-NEXT: v_cndmask_b32_e64 v1, v1, 1, vcc -; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; SI-NEXT: v_ffbh_i32_e32 v2, 0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v2 +; SI-NEXT: v_cndmask_b32_e32 v2, 33, v2, vcc +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_subrev_i32_e32 v2, vcc, 1, v2 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], v2 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_cvt_f32_i32_e32 v0, v0 +; SI-NEXT: v_sub_i32_e32 v1, vcc, 32, v2 +; SI-NEXT: v_ldexp_f32_e32 v0, v0, v1 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_test_sitofp_i64_byte_to_f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_movk_i32 s6, 0xff -; VI-NEXT: v_and_b32_e32 v0, s6, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0, v0 -; VI-NEXT: v_ffbh_u32_e32 v2, v0 -; VI-NEXT: v_addc_u32_e64 v1, s[4:5], 0, 0, vcc -; VI-NEXT: v_add_u32_e32 v2, vcc, 32, v2 -; VI-NEXT: v_ffbh_u32_e32 v3, v1 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; VI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc -; VI-NEXT: v_mov_b32_e32 v3, 0xbe -; VI-NEXT: v_sub_u32_e32 v4, vcc, v3, v2 -; VI-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1] -; VI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; VI-NEXT: v_and_b32_e32 v1, 0x7fffffff, v3 -; VI-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc -; VI-NEXT: s_mov_b32 s4, 0 -; VI-NEXT: v_and_b32_e32 v3, s6, v3 -; VI-NEXT: s_movk_i32 s5, 0x80 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v0, 23, v0 -; VI-NEXT: v_or_b32_e32 v0, v0, v1 -; VI-NEXT: v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3] -; VI-NEXT: v_and_b32_e32 v1, 1, v0 -; VI-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] -; VI-NEXT: v_cndmask_b32_e64 v1, v1, 1, vcc -; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: v_ffbh_i32_e32 v2, 0 +; VI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v2 +; VI-NEXT: v_cndmask_b32_e32 v2, 33, v2, vcc +; VI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v2 +; VI-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1] +; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; VI-NEXT: v_or_b32_e32 v0, v1, v0 +; VI-NEXT: v_cvt_f32_i32_e32 v0, v0 +; VI-NEXT: v_sub_u32_e32 v1, vcc, 32, v2 +; VI-NEXT: v_ldexp_f32 v0, v0, v1 ; VI-NEXT: s_setpc_b64 s[30:31] %masked = and i64 %arg0, 255 %itofp = sitofp i64 %masked to float @@ -1151,61 +1123,16 @@ ; SI-LABEL: v_test_uitofp_i64_byte_to_f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_movk_i32 s4, 0xff -; SI-NEXT: v_and_b32_e32 v0, s4, v0 -; SI-NEXT: v_ffbh_u32_e32 v2, v0 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v2 -; SI-NEXT: v_ffbh_u32_e32 v3, 0 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, 0, 0 -; SI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc -; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: v_sub_i32_e32 v4, vcc, 0xbe, v2 -; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v2 -; SI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; SI-NEXT: v_and_b32_e32 v1, 0x7fffffff, v3 -; SI-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc -; SI-NEXT: v_and_b32_e32 v3, s4, v3 -; SI-NEXT: s_mov_b32 s4, 0 -; SI-NEXT: s_movk_i32 s5, 0x80 -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 23, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3] -; SI-NEXT: v_and_b32_e32 v1, 1, v0 -; SI-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] -; SI-NEXT: v_cndmask_b32_e64 v1, v1, 1, vcc -; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_ldexp_f32_e64 v0, v0, 0 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_test_uitofp_i64_byte_to_f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_movk_i32 s4, 0xff -; VI-NEXT: v_and_b32_e32 v0, s4, v0 -; VI-NEXT: v_ffbh_u32_e32 v2, v0 -; VI-NEXT: v_add_u32_e32 v2, vcc, 32, v2 -; VI-NEXT: v_ffbh_u32_e32 v3, 0 -; VI-NEXT: v_cmp_eq_u32_e64 vcc, 0, 0 -; VI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc -; VI-NEXT: v_mov_b32_e32 v1, 0 -; VI-NEXT: v_sub_u32_e32 v4, vcc, 0xbe, v2 -; VI-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1] -; VI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; VI-NEXT: v_and_b32_e32 v1, 0x7fffffff, v3 -; VI-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc -; VI-NEXT: v_and_b32_e32 v3, s4, v3 -; VI-NEXT: s_mov_b32 s4, 0 -; VI-NEXT: s_movk_i32 s5, 0x80 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v0, 23, v0 -; VI-NEXT: v_or_b32_e32 v0, v0, v1 -; VI-NEXT: v_cmp_eq_u64_e32 vcc, s[4:5], v[2:3] -; VI-NEXT: v_and_b32_e32 v1, 1, v0 -; VI-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] -; VI-NEXT: v_cndmask_b32_e64 v1, v1, 1, vcc -; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_ldexp_f32 v0, v0, 0 ; VI-NEXT: s_setpc_b64 s[30:31] %masked = and i64 %arg0, 255 %itofp = uitofp i64 %masked to float diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sitofp.mir @@ -98,84 +98,52 @@ ; GFX6-LABEL: name: test_sitofp_s64_to_s32 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX6: $vgpr0 = COPY [[SELECT3]](s32) + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX6: $vgpr0 = COPY [[INT1]](s32) ; GFX8-LABEL: name: test_sitofp_s64_to_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX8: $vgpr0 = COPY [[SELECT3]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX8: $vgpr0 = COPY [[INT1]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s32) = G_SITOFP %0 $vgpr0 = COPY %1 @@ -190,18 +158,18 @@ ; GFX6-LABEL: name: test_sitofp_s64_to_s64 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32) ; GFX6: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32) - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32) ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]] ; GFX6: $vgpr0_vgpr1 = COPY [[FADD]](s64) ; GFX8-LABEL: name: test_sitofp_s64_to_s64 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[UV1]](s32) ; GFX8: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32) - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s64), [[C]](s32) ; GFX8: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP]] ; GFX8: $vgpr0_vgpr1 = COPY [[FADD]](s64) @@ -450,86 +418,54 @@ ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; GFX6: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 33 - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C]](s32) ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64) - ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX6: $vgpr0 = COPY [[SELECT3]](s32) + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX6: $vgpr0 = COPY [[INT1]](s32) ; GFX8-LABEL: name: test_sitofp_s33_to_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 33 - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C]](s32) ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT_INREG]](s64) - ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX8: $vgpr0 = COPY [[SELECT3]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX8: $vgpr0 = COPY [[INT1]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s33) = G_TRUNC %0 %2:_(s32) = G_SITOFP %1 @@ -544,86 +480,54 @@ ; GFX6-LABEL: name: test_sitofp_s64_to_s16 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32) + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX8-LABEL: name: test_sitofp_s64_to_s16 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) - ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]] - ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]] - ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[UV1]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV1]](s32) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SUB]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT2]] + ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 @@ -641,153 +545,99 @@ ; GFX6-LABEL: name: test_sitofp_v2s64_to_v2s16 ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV]], [[C]](s32) ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64) - ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]] - ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]] - ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX6: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32) - ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32) + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32) + ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT2]] + ; GFX6: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) - ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64) - ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV6]], [[UV8]] - ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV7]], [[UV9]], [[UADDO3]] - ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32) - ; GFX6: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[ASHR1]] - ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR1]](s64) - ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF1]] - ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR1]](s64), [[C2]] - ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[SUB1]], [[C1]] - ; GFX6: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[XOR1]], [[CTLZ_ZERO_UNDEF1]](s32) - ; GFX6: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C4]] - ; GFX6: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C5]] - ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C6]](s32) - ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT4]], [[C7]](s32) - ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64) - ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]] - ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C8]] - ; GFX6: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C8]] - ; GFX6: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C9]] - ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP6]](s1), [[AND5]], [[C1]] - ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C9]], [[SELECT5]] - ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT6]] - ; GFX6: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[ADD1]] - ; GFX6: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR1]](s64), [[C2]] - ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s1), [[FNEG1]], [[ADD1]] - ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT7]](s32) + ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR1]](s32), [[C1]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C4]], [[C]] + ; GFX6: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32) + ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT2]](s32), [[C3]] + ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[INT2]], [[SELECT3]] + ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SELECT4]], [[C2]] + ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB2]](s32) + ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64) + ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]] + ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C2]], [[C1]] + ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT5]] + ; GFX6: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32) + ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB2]] + ; GFX6: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32) + ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32) ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16) ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16) - ; GFX6: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C10]](s32) - ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C5]](s32) + ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; GFX6: $vgpr0 = COPY [[BITCAST]](<2 x s16>) ; GFX8-LABEL: name: test_sitofp_v2s64_to_v2s16 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63 - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV]], [[C]](s32) ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64) - ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64) - ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]] - ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]] - ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32) - ; GFX8: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV]], [[ASHR]] + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR]](s64) - ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR]](s64), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[XOR]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C5]] - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C6]](s32) - ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C8]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C8]] - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C1]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[ADD]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR]](s64), [[C2]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[FNEG]], [[ADD]] - ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT3]](s32) - ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 33 + ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV2]], [[UV3]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV3]](s32) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT]](s32), [[C3]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[INT]], [[SELECT]] + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[C2]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32) + ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT2]] + ; GFX8: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[OR]](s32) + ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB]] + ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP]](s32), [[SUB1]](s32) + ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) - ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64) - ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV6]], [[UV8]] - ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV7]], [[UV9]], [[UADDO3]] - ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32) - ; GFX8: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[ASHR1]] - ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[XOR1]](s64) - ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF1]] - ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[XOR1]](s64), [[C2]] - ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[SUB1]], [[C1]] - ; GFX8: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[XOR1]], [[CTLZ_ZERO_UNDEF1]](s32) - ; GFX8: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C4]] - ; GFX8: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C5]] - ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C6]](s32) - ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT4]], [[C7]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64) - ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]] - ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C8]] - ; GFX8: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C8]] - ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C9]] - ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP6]](s1), [[AND5]], [[C1]] - ; GFX8: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C9]], [[SELECT5]] - ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT6]] - ; GFX8: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[ADD1]] - ; GFX8: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ASHR1]](s64), [[C2]] - ; GFX8: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s1), [[FNEG1]], [[ADD1]] - ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[SELECT7]](s32) + ; GFX8: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV6]], [[UV7]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sge), [[XOR1]](s32), [[C1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C4]], [[C]] + ; GFX8: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sffbh), [[UV7]](s32) + ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[INT2]](s32), [[C3]] + ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[INT2]], [[SELECT3]] + ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SELECT4]], [[C2]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB2]](s32) + ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64) + ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]] + ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[C2]], [[C1]] + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT5]] + ; GFX8: [[SITOFP1:%[0-9]+]]:_(s32) = G_SITOFP [[OR1]](s32) + ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SUB2]] + ; GFX8: [[INT3:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[SITOFP1]](s32), [[SUB3]](s32) + ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT3]](s32) ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16) ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16) - ; GFX8: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C10]](s32) - ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C5]](s32) + ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; GFX8: $vgpr0 = COPY [[BITCAST]](<2 x s16>) %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uitofp.mir @@ -73,62 +73,40 @@ ; GFX6-LABEL: name: test_uitofp_s64_to_s32 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64) - ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: $vgpr0 = COPY [[ADD]](s32) + ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX6: $vgpr0 = COPY [[INT]](s32) ; GFX8-LABEL: name: test_uitofp_s64_to_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64) - ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: $vgpr0 = COPY [[ADD]](s32) + ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX8: $vgpr0 = COPY [[INT]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s32) = G_UITOFP %0 $vgpr0 = COPY %1 @@ -143,18 +121,18 @@ ; GFX6-LABEL: name: test_uitofp_s64_to_s64 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV1]](s32) ; GFX6: [[UITOFP1:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32) - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX6: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s64), [[C]](s32) ; GFX6: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP1]] ; GFX6: $vgpr0_vgpr1 = COPY [[FADD]](s64) ; GFX8-LABEL: name: test_uitofp_s64_to_s64 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[UV1]](s32) ; GFX8: [[UITOFP1:%[0-9]+]]:_(s64) = G_UITOFP [[UV]](s32) - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 ; GFX8: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s64), [[C]](s32) ; GFX8: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[INT]], [[UITOFP1]] ; GFX8: $vgpr0_vgpr1 = COPY [[FADD]](s64) @@ -415,65 +393,43 @@ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934591 ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] - ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s64) - ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[AND]](s64), [[C2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C5]] - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C6]](s32) - ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND2]](s64), [[C8]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND2]](s64), [[C8]] - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND3]], [[C1]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: $vgpr0 = COPY [[ADD]](s32) + ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64) + ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C2]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[SELECT]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C2]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C3]], [[C2]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SELECT]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX6: $vgpr0 = COPY [[INT]](s32) ; GFX8-LABEL: name: test_uitofp_s33_to_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8589934591 ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]] - ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[AND]](s64) - ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[AND]](s64), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C1]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND2:%[0-9]+]]:_(s64) = G_AND [[AND1]], [[C5]] - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[C6]](s32) - ; GFX8: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C7]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND2]](s64), [[C8]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND2]](s64), [[C8]] - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C9]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND3]], [[C1]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C9]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: $vgpr0 = COPY [[ADD]](s32) + ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64) + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C2]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[CTLZ_ZERO_UNDEF]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND]], [[SELECT]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C2]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C3]], [[C2]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SELECT]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX8: $vgpr0 = COPY [[INT]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s33) = G_TRUNC %0 %2:_(s32) = G_UITOFP %1 @@ -488,64 +444,42 @@ ; GFX6-LABEL: name: test_uitofp_s64_to_s16 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64) - ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32) + ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32) ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX8-LABEL: name: test_uitofp_s64_to_s16 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY]](s64) - ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[C1]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32) + ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s32) + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[SELECT]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[C1]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV3]], [[SELECT1]] + ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32) ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s64) = COPY $vgpr0_vgpr1 @@ -563,111 +497,79 @@ ; GFX6-LABEL: name: test_sitofp_v2s64_to_v2s16 ; GFX6: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) - ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s64) - ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[C1]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX6: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX6: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX6: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX6: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX6: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX6: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32) - ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s64) - ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF1]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[C1]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SUB1]], [[C]] - ; GFX6: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[CTLZ_ZERO_UNDEF1]](s32) - ; GFX6: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C3]] - ; GFX6: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C4]] - ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C5]](s32) - ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT3]], [[C6]](s32) - ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64) - ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]] - ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C7]] - ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C7]] - ; GFX6: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C8]] - ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[AND5]], [[C]] - ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C8]], [[SELECT4]] - ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT5]] - ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD1]](s32) + ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64) + ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX6: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV3]](s32) + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C1]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SELECT]](s32) + ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT1]] + ; GFX6: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX6: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32) + ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) + ; GFX6: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV7]](s32) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[C1]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[CTLZ_ZERO_UNDEF1]] + ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SELECT2]](s32) + ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64) + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C2]], [[C1]] + ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT3]] + ; GFX6: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[OR1]](s32) + ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT2]] + ; GFX6: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP1]](s32), [[SUB1]](s32) + ; GFX6: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16) ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16) - ; GFX6: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C9]](s32) - ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32) + ; GFX6: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; GFX6: $vgpr0 = COPY [[BITCAST]](<2 x s16>) ; GFX8-LABEL: name: test_sitofp_v2s64_to_v2s16 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) - ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV]](s64) - ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 190 - ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF]] - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[C1]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB]], [[C]] - ; GFX8: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[CTLZ_ZERO_UNDEF]](s32) - ; GFX8: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C3]] - ; GFX8: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1099511627775 - ; GFX8: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C4]] - ; GFX8: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 40 - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[C5]](s32) - ; GFX8: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 23 - ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SELECT]], [[C6]](s32) - ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR]](s64) - ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL1]], [[TRUNC]] - ; GFX8: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 549755813888 - ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND1]](s64), [[C7]] - ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND1]](s64), [[C7]] - ; GFX8: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C8]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[AND2]], [[C]] - ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C8]], [[SELECT1]] - ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[OR]], [[SELECT2]] - ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD]](s32) - ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV1]](s64) - ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[CTLZ_ZERO_UNDEF1]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[C1]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SUB1]], [[C]] - ; GFX8: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[CTLZ_ZERO_UNDEF1]](s32) - ; GFX8: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SHL2]], [[C3]] - ; GFX8: [[AND4:%[0-9]+]]:_(s64) = G_AND [[AND3]], [[C4]] - ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C5]](s32) - ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SELECT3]], [[C6]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[LSHR1]](s64) - ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[SHL3]], [[TRUNC1]] - ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[AND4]](s64), [[C7]] - ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AND4]](s64), [[C7]] - ; GFX8: [[AND5:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C8]] - ; GFX8: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[AND5]], [[C]] - ; GFX8: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C8]], [[SELECT4]] - ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[OR1]], [[SELECT5]] - ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[ADD1]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV3]](s32) + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C1]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[CTLZ_ZERO_UNDEF]] + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SELECT]](s32) + ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV4]](s32), [[C1]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[C1]] + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV5]], [[SELECT1]] + ; GFX8: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[OR]](s32) + ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT]] + ; GFX8: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP]](s32), [[SUB]](s32) + ; GFX8: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT]](s32) + ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64) + ; GFX8: [[CTLZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[UV7]](s32) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[C1]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[CTLZ_ZERO_UNDEF1]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SELECT2]](s32) + ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SHL1]](s64) + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV8]](s32), [[C1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C2]], [[C1]] + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV9]], [[SELECT3]] + ; GFX8: [[UITOFP1:%[0-9]+]]:_(s32) = G_UITOFP [[OR1]](s32) + ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SELECT2]] + ; GFX8: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), [[UITOFP1]](s32), [[SUB1]](s32) + ; GFX8: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[INT1]](s32) ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC]](s16) ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC1]](s16) - ; GFX8: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C9]](s32) - ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C3]](s32) + ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) ; GFX8: $vgpr0 = COPY [[BITCAST]](<2 x s16>) %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 diff --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll --- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI,FUNC %s ; FIXME: This should be merged with sint_to_fp.ll, but s_sint_to_fp_v2i64 crashes on r600 @@ -12,19 +12,19 @@ ; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f16: ; GCN: {{buffer|flat}}_load_dwordx2 - -; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}} -; GCN: v_xor_b32 - -; GCN: v_ffbh_u32 -; GCN: v_ffbh_u32 -; GCN: v_cndmask -; GCN: v_cndmask - -; GCN-DAG: v_cmp_eq_u64 -; GCN-DAG: v_cmp_gt_u64 - -; GCN: v_cndmask_b32_e64 [[SIGN_SEL:v[0-9]+]], v{{[0-9]+}}, -v{{[0-9]+}} +; GCN: s_waitcnt vmcnt(0) +; GCN: v_ffbh_i32_e32 [[LZ:v[0-9]+]], [[HI:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[FLAG:.+]], -1, [[LZ]] +; GCN: v_cndmask_b32_e32 [[SHAMT:v[0-9]+]], {{v[0-9]+}}, [[LZ]], [[FLAG]] +; SI: v_add_i32_e32 [[AMT:v[0-9]+]], {{.*}}, -1, [[SHAMT]] +; SI: v_lshl_b64 {{.*}}, {{.*}}, [[AMT]] +; VI: v_add_u32_e32 [[AMT:v[0-9]+]], {{.*}}, -1, [[SHAMT]] +; VI: v_lshlrev_b64 {{.*}}, [[AMT]], {{.*}} +; GCN: v_cvt_f32_i32_e32 [[FLT:v[0-9]+]], {{v[0-9]+}} +; SI: v_sub_i32_e32 [[EXP:v[0-9]+]], {{.*}}, 33, [[SHAMT]] +; SI: v_ldexp_f32_e32 [[SIGN_SEL:v[0-9]+]], [[FLT]], [[EXP]] +; VI: v_sub_u32_e32 [[EXP:v[0-9]+]], {{.*}}, 33, [[SHAMT]] +; VI: v_ldexp_f32 [[SIGN_SEL:v[0-9]+]], [[FLT]], [[EXP]] ; GCN: v_cvt_f16_f32_e32 [[SIGN_SEL_F16:v[0-9]+]], [[SIGN_SEL]] ; GCN: {{buffer|flat}}_store_short {{.*}}[[SIGN_SEL_F16]] define amdgpu_kernel void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 { @@ -46,19 +46,19 @@ ; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f32: ; GCN: {{buffer|flat}}_load_dwordx2 - -; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}} -; GCN: v_xor_b32 - -; GCN: v_ffbh_u32 -; GCN: v_ffbh_u32 -; GCN: v_cndmask -; GCN: v_cndmask - -; GCN-DAG: v_cmp_eq_u64 -; GCN-DAG: v_cmp_gt_u64 - -; GCN: v_cndmask_b32_e64 [[SIGN_SEL:v[0-9]+]], v{{[0-9]+}}, -v{{[0-9]+}} +; GCN: s_waitcnt +; GCN: v_ffbh_i32_e32 [[LZ:v[0-9]+]], [[HI:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[FLAG:.+]], -1, [[LZ]] +; GCN: v_cndmask_b32_e32 [[SHAMT:v[0-9]+]], {{v[0-9]+}}, [[LZ]], [[FLAG]] +; SI: v_add_i32_e32 [[AMT:v[0-9]+]], {{.*}}, -1, [[SHAMT]] +; SI: v_lshl_b64 {{.*}}, {{.*}}, [[AMT]] +; VI: v_add_u32_e32 [[AMT:v[0-9]+]], {{.*}}, -1, [[SHAMT]] +; VI: v_lshlrev_b64 {{.*}}, [[AMT]], {{.*}} +; GCN: v_cvt_f32_i32_e32 [[FLT:v[0-9]+]], {{v[0-9]+}} +; SI: v_sub_i32_e32 [[EXP:v[0-9]+]], {{.*}}, 33, [[SHAMT]] +; SI: v_ldexp_f32_e32 [[SIGN_SEL:v[0-9]+]], [[FLT]], [[EXP]] +; VI: v_sub_u32_e32 [[EXP:v[0-9]+]], {{.*}}, 33, [[SHAMT]] +; VI: v_ldexp_f32 [[SIGN_SEL:v[0-9]+]], [[FLT]], [[EXP]] ; GCN: {{buffer|flat}}_store_dword {{.*}}[[SIGN_SEL]] define amdgpu_kernel void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll --- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI,FUNC %s ; FIXME: This should be merged with uint_to_fp.ll, but s_uint_to_fp_v2i64 crashes on r600 @@ -12,16 +12,17 @@ ; FUNC-LABEL: {{^}}v_uint_to_fp_i64_to_f16: ; GCN: {{buffer|flat}}_load_dwordx2 - -; GCN: v_ffbh_u32 -; GCN: v_ffbh_u32 -; GCN: v_cndmask -; GCN: v_cndmask - -; GCN-DAG: v_cmp_eq_u64 -; GCN-DAG: v_cmp_gt_u64 - -; GCN: v_add_{{[iu]}}32_e32 [[VR:v[0-9]+]] +; GCN: s_waitcnt vmcnt(0) +; GCN: v_ffbh_u32_e32 [[LZ:v[0-9]+]], [[HI:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[FLAG:.+]], 0, [[HI]] +; GCN: v_cndmask_b32_e32 [[AMT:v[0-9]+]], 32, [[LZ]], [[FLAG]] +; SI: v_lshl_b64 {{.*}}, {{.*}}, [[AMT]] +; VI: v_lshlrev_b64 {{.*}}, [[AMT]], {{.*}} +; GCN: v_cvt_f32_u32_e32 [[FLT:v[0-9]+]], {{v[0-9]+}} +; SI: v_sub_i32_e32 [[EXP:v[0-9]+]], {{.*}}, 32, [[AMT]] +; VI: v_sub_u32_e32 [[EXP:v[0-9]+]], {{.*}}, 32, [[AMT]] +; SI: v_ldexp_f32_e32 [[VR:v[0-9]+]], [[FLT]], [[EXP]] +; VI: v_ldexp_f32 [[VR:v[0-9]+]], [[FLT]], [[EXP]] ; GCN: v_cvt_f16_f32_e32 [[VR_F16:v[0-9]+]], [[VR]] ; GCN: {{buffer|flat}}_store_short {{.*}}[[VR_F16]] define amdgpu_kernel void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 { @@ -43,16 +44,17 @@ ; FUNC-LABEL: {{^}}v_uint_to_fp_i64_to_f32: ; GCN: {{buffer|flat}}_load_dwordx2 - -; GCN: v_ffbh_u32 -; GCN: v_ffbh_u32 -; GCN: v_cndmask -; GCN: v_cndmask - -; GCN-DAG: v_cmp_eq_u64 -; GCN-DAG: v_cmp_gt_u64 - -; GCN: v_add_{{[iu]}}32_e32 [[VR:v[0-9]+]] +; GCN: s_waitcnt +; GCN: v_ffbh_u32_e32 [[LZ:v[0-9]+]], [[HI:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[FLAG:.+]], 0, [[HI]] +; GCN: v_cndmask_b32_e32 [[AMT:v[0-9]+]], 32, [[LZ]], [[FLAG]] +; SI: v_lshl_b64 {{.*}}, {{.*}}, [[AMT]] +; VI: v_lshlrev_b64 {{.*}}, [[AMT]], {{.*}} +; GCN: v_cvt_f32_u32_e32 [[FLT:v[0-9]+]], {{v[0-9]+}} +; SI: v_sub_i32_e32 [[EXP:v[0-9]+]], {{.*}}, 32, [[AMT]] +; VI: v_sub_u32_e32 [[EXP:v[0-9]+]], {{.*}}, 32, [[AMT]] +; SI: v_ldexp_f32_e32 [[VR:v[0-9]+]], [[FLT]], [[EXP]] +; VI: v_ldexp_f32 [[VR:v[0-9]+]], [[FLT]], [[EXP]] ; GCN: {{buffer|flat}}_store_dword {{.*}}[[VR]] define amdgpu_kernel void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)* %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll --- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll +++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll @@ -118,13 +118,8 @@ ; FIXME: Repeated here to test r600 ; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f32: ; R600: FFBH_UINT -; R600: FFBH_UINT -; R600: CNDE_INT ; R600: CNDE_INT - -; R600-DAG: SETGT_UINT -; R600-DAG: SETGT_UINT -; R600-DAG: SETE_INT +; R600: UINT_TO_FLT define amdgpu_kernel void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 { entry: