diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2442,24 +2442,6 @@ return getConstant(NewVal, SDLoc(V), V.getValueType()); break; } - case ISD::SRL: - // Only look at single-use SRLs. - if (!V.getNode()->hasOneUse()) - break; - if (auto *RHSC = dyn_cast(V.getOperand(1))) { - // See if we can recursively simplify the LHS. - unsigned Amt = RHSC->getZExtValue(); - - // Watch out for shift count overflow though. - if (Amt >= DemandedBits.getBitWidth()) - break; - APInt SrcDemandedBits = DemandedBits << Amt; - if (SDValue SimplifyLHS = - GetDemandedBits(V.getOperand(0), SrcDemandedBits)) - return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS, - V.getOperand(1)); - } - break; } return SDValue(); } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1525,6 +1525,16 @@ // low bits known zero. Known.Zero.setLowBits(ShAmt); + // Attempt to avoid multi-use ops if we don't need anything from them. + if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { + SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( + Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); + if (DemandedOp0) { + SDValue NewOp = TLO.DAG.getNode(ISD::SHL, dl, VT, DemandedOp0, Op1); + return TLO.CombineTo(Op, NewOp); + } + } + // Try shrinking the operation as long as the shift amount will still be // in range. if ((ShAmt < DemandedBits.getActiveBits()) && @@ -1594,6 +1604,16 @@ Known.One.lshrInPlace(ShAmt); // High bits known zero. Known.Zero.setHighBits(ShAmt); + + // Attempt to avoid multi-use ops if we don't need anything from them. + if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { + SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( + Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); + if (DemandedOp0) { + SDValue NewOp = TLO.DAG.getNode(ISD::SRL, dl, VT, DemandedOp0, Op1); + return TLO.CombineTo(Op, NewOp); + } + } } break; } diff --git a/llvm/test/CodeGen/AArch64/parity.ll b/llvm/test/CodeGen/AArch64/parity.ll --- a/llvm/test/CodeGen/AArch64/parity.ll +++ b/llvm/test/CodeGen/AArch64/parity.ll @@ -47,8 +47,8 @@ ; CHECK-LABEL: parity_17: ; CHECK: // %bb.0: ; CHECK-NEXT: and w8, w0, #0x1ffff -; CHECK-NEXT: eor w8, w8, w8, lsr #16 -; CHECK-NEXT: eor w8, w8, w8, lsr #8 +; CHECK-NEXT: eor w9, w8, w8, lsr #16 +; CHECK-NEXT: eor w8, w9, w8, lsr #8 ; CHECK-NEXT: eor w8, w8, w8, lsr #4 ; CHECK-NEXT: eor w8, w8, w8, lsr #2 ; CHECK-NEXT: eor w8, w8, w8, lsr #1 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll @@ -6755,77 +6755,73 @@ ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_bfe_i32 s10, s2, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s10 +; GFX6-NEXT: v_mov_b32_e32 v2, s0 +; GFX6-NEXT: v_alignbit_b32 v2, s1, v2, 30 +; GFX6-NEXT: s_bfe_i32 s1, s0, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s1 +; GFX6-NEXT: s_xor_b32 s1, s10, s1 +; GFX6-NEXT: s_ashr_i32 s1, s1, 30 +; GFX6-NEXT: s_or_b32 s1, s1, 1 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX6-NEXT: v_mov_b32_e32 v7, s1 +; GFX6-NEXT: s_lshr_b32 s9, s0, 15 +; GFX6-NEXT: s_bfe_i32 s1, s2, 0xf000f +; GFX6-NEXT: v_mul_f32_e32 v6, v5, v6 +; GFX6-NEXT: v_trunc_f32_e32 v6, v6 +; GFX6-NEXT: v_mad_f32 v5, -v6, v4, v5 +; GFX6-NEXT: v_cvt_i32_f32_e32 v6, v6 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, |v4| +; GFX6-NEXT: v_cndmask_b32_e32 v4, 0, v7, vcc ; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6 +; GFX6-NEXT: v_mul_lo_u32 v4, v4, s0 +; GFX6-NEXT: s_bfe_i32 s0, s0, 0xf000f +; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s0 +; GFX6-NEXT: v_cvt_f32_i32_e32 v6, s1 ; GFX6-NEXT: v_alignbit_b32 v0, s3, v0, 30 ; GFX6-NEXT: s_movk_i32 s3, 0x7fff -; GFX6-NEXT: s_and_b32 s11, s0, s3 -; GFX6-NEXT: s_bfe_i32 s11, s11, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, s11 -; GFX6-NEXT: s_and_b32 s9, s2, s3 -; GFX6-NEXT: s_bfe_i32 s9, s9, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s9 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v2 -; GFX6-NEXT: s_xor_b32 s9, s9, s11 -; GFX6-NEXT: s_ashr_i32 s9, s9, 30 -; GFX6-NEXT: s_or_b32 s9, s9, 1 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v2, v3 -; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GFX6-NEXT: v_mov_b32_e32 v5, s9 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v2| -; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v5, vcc -; GFX6-NEXT: v_mov_b32_e32 v1, s0 -; GFX6-NEXT: s_bfe_u32 s12, s0, 0xf000f -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_alignbit_b32 v1, s1, v1, 30 -; GFX6-NEXT: s_lshr_b32 s1, s0, 15 -; GFX6-NEXT: v_mul_lo_u32 v2, v2, s0 -; GFX6-NEXT: s_bfe_i32 s0, s12, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s0 -; GFX6-NEXT: s_bfe_u32 s10, s2, 0xf000f -; GFX6-NEXT: s_lshr_b32 s8, s2, 15 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s2, v2 -; GFX6-NEXT: s_bfe_i32 s2, s10, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s2 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v5, v3 -; GFX6-NEXT: s_xor_b32 s0, s2, s0 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v7, v5 +; GFX6-NEXT: v_and_b32_e32 v3, s3, v2 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s2, v4 +; GFX6-NEXT: v_mul_f32_e32 v7, v6, v7 +; GFX6-NEXT: v_trunc_f32_e32 v7, v7 +; GFX6-NEXT: s_xor_b32 s0, s1, s0 +; GFX6-NEXT: v_mad_f32 v6, -v7, v5, v6 +; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 15 ; GFX6-NEXT: s_ashr_i32 s0, s0, 30 +; GFX6-NEXT: v_cvt_i32_f32_e32 v7, v7 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v6|, |v5| +; GFX6-NEXT: v_cvt_f32_i32_e32 v6, v2 ; GFX6-NEXT: s_or_b32 s0, s0, 1 -; GFX6-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX6-NEXT: v_trunc_f32_e32 v5, v5 -; GFX6-NEXT: v_mad_f32 v4, -v5, v3, v4 -; GFX6-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX6-NEXT: v_and_b32_e32 v1, s3, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, s0 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, |v3| -; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v6, vcc -; GFX6-NEXT: v_bfe_i32 v4, v1, 0, 15 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, v4 -; GFX6-NEXT: v_and_b32_e32 v0, s3, v0 -; GFX6-NEXT: v_bfe_i32 v6, v0, 0, 15 -; GFX6-NEXT: v_cvt_f32_i32_e32 v7, v6 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v5 -; GFX6-NEXT: v_xor_b32_e32 v4, v6, v4 -; GFX6-NEXT: v_ashrrev_i32_e32 v4, 30, v4 -; GFX6-NEXT: v_or_b32_e32 v4, 1, v4 -; GFX6-NEXT: v_mul_f32_e32 v6, v7, v8 -; GFX6-NEXT: v_trunc_f32_e32 v6, v6 -; GFX6-NEXT: v_mad_f32 v7, -v6, v5, v7 -; GFX6-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v5| -; GFX6-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v3, v3, s1 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6 -; GFX6-NEXT: v_mul_lo_u32 v1, v4, v1 -; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s8, v3 -; GFX6-NEXT: v_and_b32_e32 v3, s3, v3 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0 -; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_mov_b32_e32 v8, s0 +; GFX6-NEXT: v_and_b32_e32 v1, s3, v0 +; GFX6-NEXT: v_cndmask_b32_e32 v5, 0, v8, vcc +; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 15 +; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v7 +; GFX6-NEXT: v_cvt_f32_i32_e32 v7, v0 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v6 +; GFX6-NEXT: v_xor_b32_e32 v0, v0, v2 +; GFX6-NEXT: v_ashrrev_i32_e32 v0, 30, v0 +; GFX6-NEXT: v_or_b32_e32 v0, 1, v0 +; GFX6-NEXT: v_mul_f32_e32 v2, v7, v8 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v7, -v2, v6, v7 +; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| +; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc +; GFX6-NEXT: v_mul_lo_u32 v5, v5, s9 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, v3 +; GFX6-NEXT: s_lshr_b32 s8, s2, 15 +; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s8, v5 +; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, v0, v1 ; GFX6-NEXT: v_and_b32_e32 v2, s3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 -; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v3, s3, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 15, v2 +; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -6839,82 +6835,78 @@ ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 ; GFX9-NEXT: s_movk_i32 s8, 0x7fff ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_bfe_i32 s6, s2, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s6 ; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_and_b32 s3, s2, s8 ; GFX9-NEXT: v_mov_b32_e32 v1, s0 ; GFX9-NEXT: v_alignbit_b32 v1, s1, v1, 30 -; GFX9-NEXT: s_and_b32 s1, s0, s8 -; GFX9-NEXT: s_bfe_i32 s1, s1, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v2, s1 -; GFX9-NEXT: s_bfe_i32 s3, s3, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s3 -; GFX9-NEXT: s_xor_b32 s1, s3, s1 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2 +; GFX9-NEXT: s_bfe_i32 s1, s0, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s1 +; GFX9-NEXT: s_xor_b32 s1, s6, s1 ; GFX9-NEXT: s_ashr_i32 s1, s1, 30 -; GFX9-NEXT: s_lshr_b32 s9, s2, 15 -; GFX9-NEXT: s_bfe_u32 s10, s2, 0xf000f -; GFX9-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX9-NEXT: v_trunc_f32_e32 v4, v4 -; GFX9-NEXT: v_mad_f32 v3, -v4, v2, v3 -; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GFX9-NEXT: s_lshr_b32 s11, s0, 15 -; GFX9-NEXT: s_bfe_u32 s12, s0, 0xf000f +; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX9-NEXT: s_lshr_b32 s3, s2, 15 +; GFX9-NEXT: s_lshr_b32 s9, s0, 15 ; GFX9-NEXT: s_or_b32 s1, s1, 1 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[6:7], |v3|, |v2| +; GFX9-NEXT: v_mul_f32_e32 v6, v5, v6 +; GFX9-NEXT: v_trunc_f32_e32 v6, v6 +; GFX9-NEXT: v_mad_f32 v5, -v6, v4, v5 +; GFX9-NEXT: v_cvt_i32_f32_e32 v6, v6 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[6:7], |v5|, |v4| ; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], exec ; GFX9-NEXT: s_cselect_b32 s1, s1, 0 -; GFX9-NEXT: v_add_u32_e32 v2, s1, v4 -; GFX9-NEXT: s_bfe_i32 s1, s12, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s1 -; GFX9-NEXT: v_mul_lo_u32 v2, v2, s0 -; GFX9-NEXT: s_bfe_i32 s0, s10, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v3 +; GFX9-NEXT: v_add_u32_e32 v4, s1, v6 +; GFX9-NEXT: s_bfe_i32 s1, s0, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s1 +; GFX9-NEXT: v_mul_lo_u32 v4, v4, s0 +; GFX9-NEXT: s_bfe_i32 s0, s2, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v6, s0 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v5 ; GFX9-NEXT: s_xor_b32 s0, s0, s1 +; GFX9-NEXT: v_and_b32_e32 v3, s8, v1 ; GFX9-NEXT: s_ashr_i32 s0, s0, 30 -; GFX9-NEXT: s_or_b32 s3, s0, 1 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v3, v4 -; GFX9-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[0:1], |v4|, |v3| -; GFX9-NEXT: v_and_b32_e32 v1, s8, v1 +; GFX9-NEXT: v_mul_f32_e32 v7, v6, v7 +; GFX9-NEXT: v_trunc_f32_e32 v7, v7 +; GFX9-NEXT: v_mad_f32 v6, -v7, v5, v6 +; GFX9-NEXT: v_cvt_i32_f32_e32 v7, v7 +; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 15 +; GFX9-NEXT: s_or_b32 s6, s0, 1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[0:1], |v6|, |v5| +; GFX9-NEXT: v_cvt_f32_i32_e32 v6, v1 ; GFX9-NEXT: s_and_b64 s[0:1], s[0:1], exec -; GFX9-NEXT: s_cselect_b32 s0, s3, 0 -; GFX9-NEXT: v_bfe_i32 v4, v1, 0, 15 -; GFX9-NEXT: v_add_u32_e32 v3, s0, v5 -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, v4 +; GFX9-NEXT: s_cselect_b32 s0, s6, 0 +; GFX9-NEXT: v_add_u32_e32 v5, s0, v7 +; GFX9-NEXT: v_bfe_i32 v7, v0, 0, 15 +; GFX9-NEXT: v_cvt_f32_i32_e32 v8, v7 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v6 +; GFX9-NEXT: v_xor_b32_e32 v1, v7, v1 +; GFX9-NEXT: v_ashrrev_i32_e32 v1, 30, v1 +; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 +; GFX9-NEXT: v_mul_f32_e32 v7, v8, v9 +; GFX9-NEXT: v_trunc_f32_e32 v7, v7 +; GFX9-NEXT: v_cvt_i32_f32_e32 v9, v7 +; GFX9-NEXT: v_mad_f32 v7, -v7, v6, v8 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| +; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc +; GFX9-NEXT: v_mul_lo_u32 v5, v5, s9 +; GFX9-NEXT: v_add_u32_e32 v1, v9, v1 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3 ; GFX9-NEXT: v_and_b32_e32 v0, s8, v0 -; GFX9-NEXT: v_bfe_i32 v6, v0, 0, 15 -; GFX9-NEXT: v_cvt_f32_i32_e32 v7, v6 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v5 -; GFX9-NEXT: v_xor_b32_e32 v4, v6, v4 -; GFX9-NEXT: v_ashrrev_i32_e32 v4, 30, v4 -; GFX9-NEXT: v_or_b32_e32 v4, 1, v4 -; GFX9-NEXT: v_mul_f32_e32 v6, v7, v8 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_cvt_i32_f32_e32 v8, v6 -; GFX9-NEXT: v_mad_f32 v6, -v6, v5, v7 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v6|, |v5| -; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc -; GFX9-NEXT: v_mul_lo_u32 v3, v3, s11 -; GFX9-NEXT: v_add_u32_e32 v4, v8, v4 -; GFX9-NEXT: v_mul_lo_u32 v1, v4, v1 -; GFX9-NEXT: v_sub_u32_e32 v2, s2, v2 -; GFX9-NEXT: v_sub_u32_e32 v3, s9, v3 -; GFX9-NEXT: v_and_b32_e32 v3, s8, v3 +; GFX9-NEXT: v_sub_u32_e32 v3, s2, v4 +; GFX9-NEXT: v_sub_u32_e32 v4, s3, v5 ; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1 +; GFX9-NEXT: v_and_b32_e32 v4, s8, v4 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] -; GFX9-NEXT: v_and_b32_e32 v2, s8, v2 -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 15, v3 -; GFX9-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 -; GFX9-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX9-NEXT: global_store_dword v4, v0, s[4:5] +; GFX9-NEXT: v_and_b32_e32 v3, s8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 +; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 +; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 +; GFX9-NEXT: global_store_dword v2, v0, s[4:5] ; GFX9-NEXT: v_and_b32_e32 v0, 0x1fff, v1 -; GFX9-NEXT: global_store_short v4, v0, s[4:5] offset:4 +; GFX9-NEXT: global_store_short v2, v0, s[4:5] offset:4 ; GFX9-NEXT: s_endpgm ; ; GFX90A-LABEL: srem_v3i15: @@ -6925,73 +6917,69 @@ ; GFX90A-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; GFX90A-NEXT: v_mov_b32_e32 v2, 0 ; GFX90A-NEXT: s_waitcnt lgkmcnt(0) -; GFX90A-NEXT: s_and_b32 s6, s2, s8 -; GFX90A-NEXT: s_bfe_i32 s6, s6, 0xf0000 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v4, s6 -; GFX90A-NEXT: v_mov_b32_e32 v1, s0 -; GFX90A-NEXT: v_alignbit_b32 v1, s1, v1, 30 -; GFX90A-NEXT: s_and_b32 s1, s0, s8 -; GFX90A-NEXT: s_bfe_i32 s1, s1, 0xf0000 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v3, s1 -; GFX90A-NEXT: s_xor_b32 s1, s6, s1 +; GFX90A-NEXT: s_bfe_i32 s6, s2, 0xf0000 +; GFX90A-NEXT: v_cvt_f32_i32_e32 v6, s6 ; GFX90A-NEXT: v_mov_b32_e32 v0, s2 +; GFX90A-NEXT: v_mov_b32_e32 v3, s0 +; GFX90A-NEXT: v_alignbit_b32 v3, s1, v3, 30 +; GFX90A-NEXT: s_bfe_i32 s1, s0, 0xf0000 +; GFX90A-NEXT: v_cvt_f32_i32_e32 v5, s1 +; GFX90A-NEXT: s_xor_b32 s1, s6, s1 ; GFX90A-NEXT: s_ashr_i32 s1, s1, 30 -; GFX90A-NEXT: v_rcp_iflag_f32_e32 v5, v3 ; GFX90A-NEXT: v_alignbit_b32 v0, s3, v0, 30 +; GFX90A-NEXT: v_rcp_iflag_f32_e32 v7, v5 ; GFX90A-NEXT: s_lshr_b32 s3, s2, 15 -; GFX90A-NEXT: s_bfe_u32 s9, s2, 0xf000f -; GFX90A-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX90A-NEXT: v_trunc_f32_e32 v5, v5 -; GFX90A-NEXT: v_mad_f32 v4, -v5, v3, v4 -; GFX90A-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX90A-NEXT: s_lshr_b32 s10, s0, 15 -; GFX90A-NEXT: s_bfe_u32 s11, s0, 0xf000f +; GFX90A-NEXT: s_lshr_b32 s9, s0, 15 ; GFX90A-NEXT: s_or_b32 s1, s1, 1 -; GFX90A-NEXT: v_cmp_ge_f32_e64 s[6:7], |v4|, |v3| +; GFX90A-NEXT: v_mul_f32_e32 v7, v6, v7 +; GFX90A-NEXT: v_trunc_f32_e32 v7, v7 +; GFX90A-NEXT: v_mad_f32 v6, -v7, v5, v6 +; GFX90A-NEXT: v_cvt_i32_f32_e32 v7, v7 +; GFX90A-NEXT: v_cmp_ge_f32_e64 s[6:7], |v6|, |v5| ; GFX90A-NEXT: s_and_b64 s[6:7], s[6:7], exec ; GFX90A-NEXT: s_cselect_b32 s1, s1, 0 -; GFX90A-NEXT: v_add_u32_e32 v3, s1, v5 -; GFX90A-NEXT: v_mul_lo_u32 v3, v3, s0 -; GFX90A-NEXT: s_bfe_i32 s0, s11, 0xf0000 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v4, s0 -; GFX90A-NEXT: s_bfe_i32 s1, s9, 0xf0000 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v5, s1 +; GFX90A-NEXT: v_add_u32_e32 v5, s1, v7 +; GFX90A-NEXT: v_mul_lo_u32 v5, v5, s0 +; GFX90A-NEXT: s_bfe_i32 s0, s0, 0xf000f +; GFX90A-NEXT: v_cvt_f32_i32_e32 v6, s0 +; GFX90A-NEXT: s_bfe_i32 s1, s2, 0xf000f +; GFX90A-NEXT: v_cvt_f32_i32_e32 v7, s1 ; GFX90A-NEXT: s_xor_b32 s0, s1, s0 -; GFX90A-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX90A-NEXT: v_rcp_iflag_f32_e32 v8, v6 +; GFX90A-NEXT: v_and_b32_e32 v4, s8, v3 ; GFX90A-NEXT: s_ashr_i32 s0, s0, 30 -; GFX90A-NEXT: v_sub_u32_e32 v3, s2, v3 +; GFX90A-NEXT: v_bfe_i32 v3, v3, 0, 15 +; GFX90A-NEXT: v_mul_f32_e32 v8, v7, v8 +; GFX90A-NEXT: v_trunc_f32_e32 v8, v8 +; GFX90A-NEXT: v_mad_f32 v7, -v8, v6, v7 +; GFX90A-NEXT: v_sub_u32_e32 v5, s2, v5 ; GFX90A-NEXT: s_or_b32 s2, s0, 1 -; GFX90A-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX90A-NEXT: v_trunc_f32_e32 v6, v6 -; GFX90A-NEXT: v_mad_f32 v5, -v6, v4, v5 -; GFX90A-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX90A-NEXT: v_cmp_ge_f32_e64 s[0:1], |v5|, |v4| -; GFX90A-NEXT: v_and_b32_e32 v1, s8, v1 +; GFX90A-NEXT: v_cvt_i32_f32_e32 v8, v8 +; GFX90A-NEXT: v_cmp_ge_f32_e64 s[0:1], |v7|, |v6| +; GFX90A-NEXT: v_cvt_f32_i32_e32 v7, v3 ; GFX90A-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GFX90A-NEXT: v_and_b32_e32 v1, s8, v0 ; GFX90A-NEXT: s_cselect_b32 s0, s2, 0 -; GFX90A-NEXT: v_bfe_i32 v5, v1, 0, 15 -; GFX90A-NEXT: v_add_u32_e32 v4, s0, v6 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v6, v5 -; GFX90A-NEXT: v_and_b32_e32 v0, s8, v0 -; GFX90A-NEXT: v_bfe_i32 v7, v0, 0, 15 -; GFX90A-NEXT: v_cvt_f32_i32_e32 v8, v7 -; GFX90A-NEXT: v_rcp_iflag_f32_e32 v9, v6 -; GFX90A-NEXT: v_xor_b32_e32 v5, v7, v5 -; GFX90A-NEXT: v_ashrrev_i32_e32 v5, 30, v5 -; GFX90A-NEXT: v_or_b32_e32 v5, 1, v5 -; GFX90A-NEXT: v_mul_f32_e32 v7, v8, v9 -; GFX90A-NEXT: v_trunc_f32_e32 v7, v7 -; GFX90A-NEXT: v_cvt_i32_f32_e32 v9, v7 -; GFX90A-NEXT: v_mad_f32 v7, -v7, v6, v8 -; GFX90A-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| -; GFX90A-NEXT: v_mul_lo_u32 v4, v4, s10 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc -; GFX90A-NEXT: v_sub_u32_e32 v4, s3, v4 -; GFX90A-NEXT: v_add_u32_e32 v5, v9, v5 -; GFX90A-NEXT: v_mul_lo_u32 v1, v5, v1 -; GFX90A-NEXT: v_and_b32_e32 v4, s8, v4 -; GFX90A-NEXT: v_sub_u32_e32 v0, v0, v1 -; GFX90A-NEXT: v_and_b32_e32 v3, s8, v3 +; GFX90A-NEXT: v_bfe_i32 v0, v0, 0, 15 +; GFX90A-NEXT: v_add_u32_e32 v6, s0, v8 +; GFX90A-NEXT: v_cvt_f32_i32_e32 v8, v0 +; GFX90A-NEXT: v_rcp_iflag_f32_e32 v9, v7 +; GFX90A-NEXT: v_xor_b32_e32 v0, v0, v3 +; GFX90A-NEXT: v_ashrrev_i32_e32 v0, 30, v0 +; GFX90A-NEXT: v_or_b32_e32 v0, 1, v0 +; GFX90A-NEXT: v_mul_f32_e32 v3, v8, v9 +; GFX90A-NEXT: v_trunc_f32_e32 v3, v3 +; GFX90A-NEXT: v_cvt_i32_f32_e32 v9, v3 +; GFX90A-NEXT: v_mad_f32 v3, -v3, v7, v8 +; GFX90A-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v7| +; GFX90A-NEXT: v_mul_lo_u32 v6, v6, s9 +; GFX90A-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc +; GFX90A-NEXT: v_sub_u32_e32 v6, s3, v6 +; GFX90A-NEXT: v_add_u32_e32 v0, v9, v0 +; GFX90A-NEXT: v_mul_lo_u32 v0, v0, v4 +; GFX90A-NEXT: v_and_b32_e32 v4, s8, v6 +; GFX90A-NEXT: v_sub_u32_e32 v0, v1, v0 +; GFX90A-NEXT: v_and_b32_e32 v3, s8, v5 ; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX90A-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] ; GFX90A-NEXT: v_or_b32_e32 v3, v3, v4 diff --git a/llvm/test/CodeGen/AMDGPU/bswap.ll b/llvm/test/CodeGen/AMDGPU/bswap.ll --- a/llvm/test/CodeGen/AMDGPU/bswap.ll +++ b/llvm/test/CodeGen/AMDGPU/bswap.ll @@ -463,10 +463,10 @@ ; SI-NEXT: v_alignbit_b32 v0, v0, v0, 24 ; SI-NEXT: v_bfi_b32 v1, s4, v1, v2 ; SI-NEXT: v_bfi_b32 v0, s4, v0, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v2 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_bswap_v2i16: @@ -531,12 +531,12 @@ ; SI-NEXT: v_bfi_b32 v2, s4, v2, v7 ; SI-NEXT: v_and_b32_e32 v4, s5, v1 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_and_b32_e32 v3, s5, v3 +; SI-NEXT: v_and_b32_e32 v5, s5, v3 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v0, v0, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 ; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_bswap_v4i16: diff --git a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll --- a/llvm/test/CodeGen/AMDGPU/ds-alignment.ll +++ b/llvm/test/CodeGen/AMDGPU/ds-alignment.ll @@ -154,27 +154,27 @@ ; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0) ; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0 -; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0 -; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:1 -; ALIGNED-SDAG-NEXT: ds_read_u8 v4, v0 offset:2 -; ALIGNED-SDAG-NEXT: ds_read_u8 v5, v0 offset:3 -; ALIGNED-SDAG-NEXT: ds_read_u8 v6, v0 offset:4 -; ALIGNED-SDAG-NEXT: ds_read_u8 v7, v0 offset:5 +; ALIGNED-SDAG-NEXT: ds_read_u8 v1, v0 +; ALIGNED-SDAG-NEXT: ds_read_u8 v2, v0 offset:1 +; ALIGNED-SDAG-NEXT: ds_read_u8 v3, v0 offset:2 +; ALIGNED-SDAG-NEXT: ds_read_u8 v4, v0 offset:3 +; ALIGNED-SDAG-NEXT: ds_read_u8 v5, v0 offset:4 +; ALIGNED-SDAG-NEXT: ds_read_u8 v6, v0 offset:5 ; ALIGNED-SDAG-NEXT: ds_read_u8 v8, v0 offset:6 ; ALIGNED-SDAG-NEXT: ds_read_u8 v0, v0 offset:7 -; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v1, s1 -; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5) -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v4 offset:2 -; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5) -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v5 offset:3 -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v2 -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v3 offset:1 -; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5) -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v8 offset:6 -; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(5) -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v0 offset:7 -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v6 offset:4 -; ALIGNED-SDAG-NEXT: ds_write_b8 v1, v7 offset:5 +; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v7, s1 +; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v5 offset:4 +; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v6 offset:5 +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v3 offset:2 +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v4 offset:3 +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v1 +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v2 offset:1 +; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7) +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v8 offset:6 +; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(7) +; ALIGNED-SDAG-NEXT: ds_write_b8 v7, v0 offset:7 ; ALIGNED-SDAG-NEXT: s_endpgm ; ; ALIGNED-GISEL-LABEL: ds8align1: @@ -230,19 +230,19 @@ ; ALIGNED-SDAG-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(0) ; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v0, s0 -; ALIGNED-SDAG-NEXT: ds_read_u16 v1, v0 offset:2 -; ALIGNED-SDAG-NEXT: ds_read_u16 v2, v0 -; ALIGNED-SDAG-NEXT: ds_read_u16 v3, v0 offset:6 -; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:4 +; ALIGNED-SDAG-NEXT: ds_read_u16 v1, v0 offset:4 +; ALIGNED-SDAG-NEXT: ds_read_u16 v2, v0 offset:2 +; ALIGNED-SDAG-NEXT: ds_read_u16 v3, v0 +; ALIGNED-SDAG-NEXT: ds_read_u16 v0, v0 offset:6 ; ALIGNED-SDAG-NEXT: v_mov_b32_e32 v4, s1 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) -; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v1 offset:2 +; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v1 offset:4 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) -; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v2 +; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v2 offset:2 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) -; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v3 offset:6 +; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v3 ; ALIGNED-SDAG-NEXT: s_waitcnt lgkmcnt(3) -; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v0 offset:4 +; ALIGNED-SDAG-NEXT: ds_write_b16 v4, v0 offset:6 ; ALIGNED-SDAG-NEXT: s_endpgm ; ; ALIGNED-GISEL-LABEL: ds8align2: diff --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll --- a/llvm/test/CodeGen/AMDGPU/fshr.ll +++ b/llvm/test/CodeGen/AMDGPU/fshr.ll @@ -681,14 +681,15 @@ ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_or_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v1, v1, v3, v5 -; SI-NEXT: v_or_b32_e32 v3, 16, v4 +; SI-NEXT: v_or_b32_e32 v4, 16, v4 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_alignbit_b32 v0, v0, v2, v3 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_alignbit_b32 v1, v1, v3, v5 +; SI-NEXT: v_alignbit_b32 v0, v0, v2, v4 +; SI-NEXT: s_mov_b32 s4, 0xffff +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, s4, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v3 +; SI-NEXT: v_and_b32_e32 v1, s4, v1 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_fshr_v2i16: @@ -859,18 +860,18 @@ ; SI-NEXT: v_or_b32_e32 v4, 16, v11 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; SI-NEXT: v_alignbit_b32 v3, v3, v5, v4 -; SI-NEXT: v_or_b32_e32 v4, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6 +; SI-NEXT: v_or_b32_e32 v5, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 ; SI-NEXT: s_mov_b32 s4, 0xffff -; SI-NEXT: v_alignbit_b32 v2, v2, v5, v4 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v2, v2, v6, v5 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v3 ; SI-NEXT: v_and_b32_e32 v2, s4, v2 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, s4, v0 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_and_b32_e32 v3, s4, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_fshr_v4i16: diff --git a/llvm/test/CodeGen/AMDGPU/idot4s.ll b/llvm/test/CodeGen/AMDGPU/idot4s.ll --- a/llvm/test/CodeGen/AMDGPU/idot4s.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4s.ll @@ -966,34 +966,30 @@ ; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0 ; GFX7-NEXT: s_mov_b32 s4, 0xffff ; GFX7-NEXT: s_waitcnt vmcnt(2) -; GFX7-NEXT: v_bfe_i32 v3, v2, 8, 8 +; GFX7-NEXT: v_bfe_i32 v3, v2, 16, 8 ; GFX7-NEXT: v_bfe_i32 v4, v2, 0, 8 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 +; GFX7-NEXT: v_ashrrev_i32_e32 v5, 24, v2 +; GFX7-NEXT: v_bfe_i32 v2, v2, 8, 8 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_bfe_i32 v6, v0, 8, 8 +; GFX7-NEXT: v_bfe_i32 v6, v0, 16, 8 ; GFX7-NEXT: v_bfe_i32 v7, v0, 0, 8 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v7 -; GFX7-NEXT: v_bfe_i32 v8, v0, 16, 8 -; GFX7-NEXT: v_or_b32_e32 v4, v6, v4 -; GFX7-NEXT: v_and_b32_e32 v7, s4, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX7-NEXT: v_ashrrev_i32_e32 v8, 24, v0 +; GFX7-NEXT: v_bfe_i32 v0, v0, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_bfe_i32 v5, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_and_b32_e32 v7, s4, v7 +; GFX7-NEXT: v_alignbit_b32 v2, 0, v2, 16 +; GFX7-NEXT: v_alignbit_b32 v0, 0, v0, 16 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v3, v4, v1 -; GFX7-NEXT: v_ashrrev_i32_e32 v2, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 -; GFX7-NEXT: v_ashrrev_i32_e32 v0, 24, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v6, v8, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v5, v7, v1 +; GFX7-NEXT: v_mad_u32_u24 v1, v4, v7, v1 +; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX7-NEXT: v_and_b32_e32 v6, s4, v6 ; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 +; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 +; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 +; GFX7-NEXT: v_mad_u32_u24 v0, v3, v6, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v5, v8, v0 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll --- a/llvm/test/CodeGen/AMDGPU/idot4u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll @@ -1872,27 +1872,23 @@ ; GFX7-NEXT: s_movk_i32 s5, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(2) ; GFX7-NEXT: v_and_b32_e32 v3, s4, v2 -; GFX7-NEXT: v_and_b32_e32 v4, s5, v2 +; GFX7-NEXT: v_bfe_u32 v4, v2, 16, 8 ; GFX7-NEXT: s_waitcnt vmcnt(1) ; GFX7-NEXT: v_and_b32_e32 v6, s4, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v2, s5, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_and_b32_e32 v7, s5, v0 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v6 -; GFX7-NEXT: v_or_b32_e32 v4, v7, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s5, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s5, v4 +; GFX7-NEXT: v_bfe_u32 v7, v0, 16, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; GFX7-NEXT: v_alignbit_b32 v3, s10, v3, 16 +; GFX7-NEXT: v_alignbit_b32 v6, 0, v6, 16 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v3, v4, v1 -; GFX7-NEXT: v_bfe_u32 v5, v2, 16, 8 -; GFX7-NEXT: v_bfe_u32 v8, v0, 16, 8 -; GFX7-NEXT: v_mad_u32_u24 v1, v6, v7, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v5, v8, v1 ; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 +; GFX7-NEXT: v_mad_u32_u24 v0, v3, v6, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v4, v7, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v5, v8, v0 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; @@ -2166,18 +2162,16 @@ ; GFX9-NODL-NEXT: s_waitcnt vmcnt(1) ; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 16, v2 ; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 -; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v8, v4, v5 -; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 -; GFX9-NODL-NEXT: v_lshlrev_b32_e32 v8, 16, v6 -; GFX9-NODL-NEXT: v_or_b32_e32 v7, v7, v8 -; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v7, 8, v7 +; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v7, v4, v5 +; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v8, 8, v6 +; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1 +; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v6 ; GFX9-NODL-NEXT: s_waitcnt vmcnt(0) ; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3 -; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v7 -; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v6 -; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1 ; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v6 +; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1 +; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v8 ; GFX9-NODL-NEXT: global_store_byte v0, v1, s[2:3] ; GFX9-NODL-NEXT: s_endpgm ; @@ -2196,18 +2190,16 @@ ; GFX9-DL-NEXT: s_waitcnt vmcnt(1) ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 16, v2 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 -; GFX9-DL-NEXT: v_mul_lo_u16_e32 v8, v4, v5 -; GFX9-DL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 -; GFX9-DL-NEXT: v_lshlrev_b32_e32 v8, 16, v6 -; GFX9-DL-NEXT: v_or_b32_e32 v7, v7, v8 -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v7, 8, v7 +; GFX9-DL-NEXT: v_mul_lo_u16_e32 v7, v4, v5 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v8, 8, v6 +; GFX9-DL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v6 ; GFX9-DL-NEXT: s_waitcnt vmcnt(0) ; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3 -; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v7 -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v6 -; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1 ; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v6 +; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1 +; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v8 ; GFX9-DL-NEXT: global_store_byte v0, v1, s[2:3] ; GFX9-DL-NEXT: s_endpgm ; @@ -2226,24 +2218,23 @@ ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v4, 24, v1 ; GFX10-DL-NEXT: s_waitcnt vmcnt(1) ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 24, v2 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v7, 16, v2 -; GFX10-DL-NEXT: v_lshrrev_b16 v8, 8, v2 +; GFX10-DL-NEXT: v_lshrrev_b16 v6, 8, v1 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX10-DL-NEXT: v_lshrrev_b16 v9, 8, v2 ; GFX10-DL-NEXT: v_mul_lo_u16 v4, v4, v5 -; GFX10-DL-NEXT: v_lshrrev_b16 v5, 8, v1 -; GFX10-DL-NEXT: v_mul_lo_u16 v9, v6, v7 ; GFX10-DL-NEXT: s_waitcnt vmcnt(0) ; GFX10-DL-NEXT: v_mad_u16 v1, v1, v2, v3 +; GFX10-DL-NEXT: v_mul_lo_u16 v5, v7, v8 +; GFX10-DL-NEXT: v_mul_lo_u16 v6, v6, v9 ; GFX10-DL-NEXT: v_lshlrev_b16 v4, 8, v4 -; GFX10-DL-NEXT: v_mul_lo_u16 v5, v5, v8 -; GFX10-DL-NEXT: v_or_b32_sdwa v4, v9, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-NEXT: v_lshlrev_b16 v5, 8, v5 -; GFX10-DL-NEXT: v_lshlrev_b32_e32 v8, 16, v4 +; GFX10-DL-NEXT: v_lshlrev_b16 v6, 8, v6 +; GFX10-DL-NEXT: v_or_b32_sdwa v5, v5, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v2, 8, v4 -; GFX10-DL-NEXT: v_or_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v5 ; GFX10-DL-NEXT: v_add_nc_u16 v1, v1, v5 -; GFX10-DL-NEXT: v_mad_u16 v1, v6, v7, v1 +; GFX10-DL-NEXT: v_mad_u16 v1, v7, v8, v1 ; GFX10-DL-NEXT: v_add_nc_u16 v1, v1, v2 ; GFX10-DL-NEXT: global_store_byte v0, v1, s[0:1] ; GFX10-DL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/idot8s.ll b/llvm/test/CodeGen/AMDGPU/idot8s.ll --- a/llvm/test/CodeGen/AMDGPU/idot8s.ll +++ b/llvm/test/CodeGen/AMDGPU/idot8s.ll @@ -2207,60 +2207,48 @@ ; GFX7-NEXT: s_mov_b32 s4, 0xffff ; GFX7-NEXT: s_addc_u32 s13, s13, 0 ; GFX7-NEXT: s_waitcnt vmcnt(2) -; GFX7-NEXT: v_bfe_i32 v3, v2, 20, 4 -; GFX7-NEXT: v_bfe_i32 v4, v2, 16, 4 -; GFX7-NEXT: v_bfe_i32 v5, v2, 4, 4 -; GFX7-NEXT: v_bfe_i32 v6, v2, 0, 4 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v6 +; GFX7-NEXT: v_bfe_i32 v8, v2, 0, 4 +; GFX7-NEXT: v_bfe_i32 v6, v2, 4, 4 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_bfe_i32 v10, v0, 20, 4 -; GFX7-NEXT: v_bfe_i32 v11, v0, 16, 4 -; GFX7-NEXT: v_bfe_i32 v12, v0, 4, 4 -; GFX7-NEXT: v_bfe_i32 v13, v0, 0, 4 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX7-NEXT: v_or_b32_e32 v4, v6, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v10 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v12 -; GFX7-NEXT: v_and_b32_e32 v11, s4, v13 -; GFX7-NEXT: v_bfe_i32 v14, v0, 24, 4 -; GFX7-NEXT: v_ashrrev_i32_e32 v16, 28, v0 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v5 -; GFX7-NEXT: v_or_b32_e32 v6, v11, v10 -; GFX7-NEXT: v_and_b32_e32 v12, s4, v14 -; GFX7-NEXT: v_and_b32_e32 v14, s4, v16 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v6 +; GFX7-NEXT: v_bfe_i32 v15, v0, 0, 4 +; GFX7-NEXT: v_bfe_i32 v13, v0, 4, 4 +; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 +; GFX7-NEXT: v_and_b32_e32 v15, s4, v15 +; GFX7-NEXT: v_bfe_i32 v5, v2, 8, 4 ; GFX7-NEXT: v_and_b32_e32 v6, s4, v6 -; GFX7-NEXT: v_bfe_i32 v8, v2, 8, 4 -; GFX7-NEXT: v_bfe_i32 v15, v0, 8, 4 +; GFX7-NEXT: v_bfe_i32 v12, v0, 8, 4 +; GFX7-NEXT: v_and_b32_e32 v13, s4, v13 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v4, v6, v1 -; GFX7-NEXT: v_bfe_i32 v7, v2, 24, 4 +; GFX7-NEXT: v_mad_u32_u24 v1, v8, v15, v1 +; GFX7-NEXT: v_bfe_i32 v3, v2, 24, 4 +; GFX7-NEXT: v_bfe_i32 v4, v2, 20, 4 +; GFX7-NEXT: v_bfe_i32 v7, v2, 16, 4 ; GFX7-NEXT: v_ashrrev_i32_e32 v9, 28, v2 ; GFX7-NEXT: v_bfe_i32 v2, v2, 12, 4 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 +; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 +; GFX7-NEXT: v_bfe_i32 v10, v0, 24, 4 +; GFX7-NEXT: v_bfe_i32 v11, v0, 20, 4 +; GFX7-NEXT: v_bfe_i32 v14, v0, 16, 4 +; GFX7-NEXT: v_ashrrev_i32_e32 v16, 28, v0 ; GFX7-NEXT: v_bfe_i32 v0, v0, 12, 4 -; GFX7-NEXT: v_and_b32_e32 v13, s4, v15 -; GFX7-NEXT: v_mad_u32_u24 v1, v16, v11, v1 +; GFX7-NEXT: v_and_b32_e32 v12, s4, v12 +; GFX7-NEXT: v_mad_u32_u24 v1, v6, v13, v1 ; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 ; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v8, v13, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 -; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 -; GFX7-NEXT: v_mad_u32_u24 v0, v3, v5, v0 +; GFX7-NEXT: v_mad_u32_u24 v1, v5, v12, v1 ; GFX7-NEXT: v_and_b32_e32 v7, s4, v7 -; GFX7-NEXT: v_mad_u32_u24 v0, v15, v10, v0 +; GFX7-NEXT: v_and_b32_e32 v14, s4, v14 +; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 +; GFX7-NEXT: v_and_b32_e32 v11, s4, v11 +; GFX7-NEXT: v_mad_u32_u24 v0, v7, v14, v0 +; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX7-NEXT: v_and_b32_e32 v10, s4, v10 +; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0 ; GFX7-NEXT: v_and_b32_e32 v9, s4, v9 -; GFX7-NEXT: v_mad_u32_u24 v0, v7, v12, v0 -; GFX7-NEXT: v_mad_u32_u24 v0, v9, v14, v0 +; GFX7-NEXT: v_and_b32_e32 v16, s4, v16 +; GFX7-NEXT: v_mad_u32_u24 v0, v3, v10, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; @@ -2816,87 +2804,52 @@ ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: buffer_load_ubyte v1, off, s[0:3], 0 ; GFX7-NEXT: s_movk_i32 s4, 0xff -; GFX7-NEXT: s_mov_b32 s5, 0xffff ; GFX7-NEXT: s_addc_u32 s13, s13, 0 ; GFX7-NEXT: s_waitcnt vmcnt(2) -; GFX7-NEXT: v_ashrrev_i32_e32 v3, 28, v2 -; GFX7-NEXT: v_bfe_i32 v4, v2, 24, 4 -; GFX7-NEXT: v_bfe_i32 v5, v2, 20, 4 -; GFX7-NEXT: v_bfe_i32 v6, v2, 16, 4 -; GFX7-NEXT: v_bfe_i32 v7, v2, 12, 4 -; GFX7-NEXT: v_bfe_i32 v8, v2, 8, 4 -; GFX7-NEXT: v_bfe_i32 v9, v2, 4, 4 -; GFX7-NEXT: v_bfe_i32 v2, v2, 0, 4 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX7-NEXT: v_bfe_i32 v7, v2, 0, 4 +; GFX7-NEXT: v_bfe_i32 v3, v2, 24, 4 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_ashrrev_i32_e32 v11, 28, v0 -; GFX7-NEXT: v_bfe_i32 v12, v0, 24, 4 -; GFX7-NEXT: v_bfe_i32 v13, v0, 20, 4 -; GFX7-NEXT: v_bfe_i32 v14, v0, 16, 4 -; GFX7-NEXT: v_bfe_i32 v15, v0, 12, 4 -; GFX7-NEXT: v_bfe_i32 v16, v0, 8, 4 -; GFX7-NEXT: v_bfe_i32 v17, v0, 4, 4 -; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 4 -; GFX7-NEXT: v_or_b32_e32 v4, v4, v10 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v5 -; GFX7-NEXT: v_or_b32_e32 v6, v8, v7 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v11 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v12 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v13 -; GFX7-NEXT: v_and_b32_e32 v10, s4, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v15 -; GFX7-NEXT: v_and_b32_e32 v13, s4, v16 -; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v17 +; GFX7-NEXT: v_bfe_i32 v14, v0, 0, 4 +; GFX7-NEXT: v_bfe_i32 v4, v2, 20, 4 +; GFX7-NEXT: v_bfe_i32 v5, v2, 16, 4 +; GFX7-NEXT: v_bfe_i32 v6, v2, 8, 4 +; GFX7-NEXT: v_ashrrev_i32_e32 v8, 28, v2 +; GFX7-NEXT: v_bfe_i32 v9, v2, 12, 4 +; GFX7-NEXT: v_bfe_i32 v2, v2, 4, 4 +; GFX7-NEXT: v_and_b32_e32 v7, s4, v7 +; GFX7-NEXT: v_bfe_i32 v10, v0, 24, 4 +; GFX7-NEXT: v_bfe_i32 v11, v0, 20, 4 +; GFX7-NEXT: v_bfe_i32 v12, v0, 16, 4 +; GFX7-NEXT: v_bfe_i32 v13, v0, 8, 4 +; GFX7-NEXT: v_ashrrev_i32_e32 v15, 28, v0 +; GFX7-NEXT: v_bfe_i32 v16, v0, 12, 4 +; GFX7-NEXT: v_bfe_i32 v0, v0, 4, 4 +; GFX7-NEXT: v_and_b32_e32 v14, s4, v14 +; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 ; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_and_b32_e32 v5, s5, v5 -; GFX7-NEXT: v_or_b32_e32 v7, v8, v7 -; GFX7-NEXT: v_or_b32_e32 v8, v10, v9 -; GFX7-NEXT: v_or_b32_e32 v9, v13, v12 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_and_b32_e32 v2, s5, v2 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v9 -; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX7-NEXT: v_and_b32_e32 v7, s4, v2 -; GFX7-NEXT: v_and_b32_e32 v13, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v6, s5, v8 -; GFX7-NEXT: v_bfe_u32 v8, v2, 8, 8 -; GFX7-NEXT: v_bfe_u32 v14, v0, 8, 8 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v7, v13, v1 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 -; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v0 -; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 -; GFX7-NEXT: v_mad_u32_u24 v1, v8, v14, v1 +; GFX7-NEXT: v_mad_u32_u24 v1, v7, v14, v1 +; GFX7-NEXT: v_and_b32_e32 v6, s4, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX7-NEXT: v_and_b32_e32 v13, s4, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v16, 24, v16 ; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 -; GFX7-NEXT: v_and_b32_e32 v9, s4, v4 -; GFX7-NEXT: v_and_b32_e32 v15, s4, v5 -; GFX7-NEXT: v_mad_u32_u24 v0, v6, v12, v0 -; GFX7-NEXT: v_bfe_u32 v10, v4, 8, 8 -; GFX7-NEXT: v_bfe_u32 v16, v5, 8, 8 -; GFX7-NEXT: v_mad_u32_u24 v0, v9, v15, v0 -; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 -; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 8 -; GFX7-NEXT: v_mad_u32_u24 v0, v10, v16, v0 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX7-NEXT: v_alignbit_b32 v9, 0, v9, 24 +; GFX7-NEXT: v_alignbit_b32 v16, 0, v16, 24 +; GFX7-NEXT: v_mad_u32_u24 v0, v6, v13, v0 +; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 +; GFX7-NEXT: v_and_b32_e32 v12, s4, v12 +; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0 +; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 ; GFX7-NEXT: v_and_b32_e32 v11, s4, v11 -; GFX7-NEXT: v_mad_u32_u24 v0, v4, v5, v0 -; GFX7-NEXT: v_mad_u32_u24 v0, v3, v11, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v5, v12, v0 +; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX7-NEXT: v_and_b32_e32 v10, s4, v10 +; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0 +; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 +; GFX7-NEXT: v_and_b32_e32 v15, s4, v15 +; GFX7-NEXT: v_mad_u32_u24 v0, v3, v10, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v8, v15, v0 ; GFX7-NEXT: buffer_store_byte v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; @@ -3026,6 +2979,8 @@ ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_lshrrev_b32_e32 v10, 20, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v11, 28, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 12, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 8, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v14, 4, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v15, 12, v1 ; GFX9-NEXT: v_lshlrev_b16_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 @@ -3033,63 +2988,60 @@ ; GFX9-NEXT: v_lshlrev_b16_e32 v17, 12, v2 ; GFX9-NEXT: v_lshlrev_b16_sdwa v18, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; GFX9-NEXT: v_lshlrev_b16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 12, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 8, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v2, 12, v9 ; GFX9-NEXT: v_ashrrev_i16_e32 v9, 12, v15 +; GFX9-NEXT: v_lshlrev_b16_e32 v8, 12, v8 +; GFX9-NEXT: v_lshlrev_b16_e32 v7, 12, v7 ; GFX9-NEXT: v_ashrrev_i16_e32 v15, 12, v16 ; GFX9-NEXT: v_lshlrev_b16_e32 v6, 12, v6 ; GFX9-NEXT: v_ashrrev_i16_e32 v16, 12, v1 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 12, v5 ; GFX9-NEXT: v_lshlrev_b16_e32 v5, 12, v14 ; GFX9-NEXT: v_ashrrev_i16_e32 v14, 12, v17 +; GFX9-NEXT: v_lshlrev_b16_e32 v13, 12, v13 +; GFX9-NEXT: v_lshlrev_b16_e32 v12, 12, v12 ; GFX9-NEXT: v_ashrrev_i16_e32 v17, 12, v18 ; GFX9-NEXT: v_lshlrev_b16_e32 v11, 12, v11 ; GFX9-NEXT: v_ashrrev_i16_e32 v18, 12, v0 ; GFX9-NEXT: v_lshlrev_b16_e32 v0, 12, v10 -; GFX9-NEXT: v_lshlrev_b16_e32 v8, 12, v8 -; GFX9-NEXT: v_lshlrev_b16_e32 v7, 12, v7 -; GFX9-NEXT: v_lshlrev_b16_e32 v13, 12, v13 -; GFX9-NEXT: v_lshlrev_b16_e32 v12, 12, v12 -; GFX9-NEXT: v_ashrrev_i16_e32 v6, 12, v6 -; GFX9-NEXT: v_ashrrev_i16_e32 v1, 12, v1 -; GFX9-NEXT: v_ashrrev_i16_e32 v11, 12, v11 -; GFX9-NEXT: v_ashrrev_i16_e32 v0, 12, v0 ; GFX9-NEXT: v_ashrrev_i16_e32 v8, 12, v8 ; GFX9-NEXT: v_ashrrev_i16_e32 v7, 12, v7 +; GFX9-NEXT: v_ashrrev_i16_e32 v6, 12, v6 +; GFX9-NEXT: v_ashrrev_i16_e32 v1, 12, v1 ; GFX9-NEXT: v_ashrrev_i16_e32 v10, 12, v13 ; GFX9-NEXT: v_ashrrev_i16_e32 v12, 12, v12 -; GFX9-NEXT: v_mul_lo_u16_e32 v19, v15, v17 -; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v1, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_mul_lo_u16_sdwa v1, v6, v11 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_ashrrev_i16_e32 v11, 12, v11 +; GFX9-NEXT: v_ashrrev_i16_e32 v0, 12, v0 ; GFX9-NEXT: v_ashrrev_i16_e32 v2, 12, v2 ; GFX9-NEXT: v_ashrrev_i16_e32 v5, 12, v5 ; GFX9-NEXT: v_mul_lo_u16_e32 v13, v16, v18 +; GFX9-NEXT: v_mul_lo_u16_e32 v19, v15, v17 +; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v1, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_mul_lo_u16_sdwa v1, v6, v11 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_mul_lo_u16_sdwa v6, v7, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_mul_lo_u16_e32 v7, v8, v10 -; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mul_lo_u16_e32 v9, v9, v14 ; GFX9-NEXT: v_mul_lo_u16_sdwa v2, v2, v5 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v13, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v13, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v5, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; GFX9-NEXT: v_or_b32_sdwa v7, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mul_lo_u16_e32 v9, v9, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v6 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_e32 v2, v2, v0 +; GFX9-NEXT: v_or_b32_sdwa v7, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX9-NEXT: v_or_b32_e32 v2, v2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u16_e32 v1, v7, v4 -; GFX9-NEXT: v_add_u16_e32 v1, v1, v2 +; GFX9-NEXT: v_add_u16_e32 v2, v7, v4 +; GFX9-NEXT: v_add_u16_e32 v1, v2, v1 ; GFX9-NEXT: v_add_u16_e32 v1, v1, v6 ; GFX9-NEXT: v_add_u16_e32 v0, v1, v0 ; GFX9-NEXT: v_mad_legacy_u16 v0, v16, v18, v0 ; GFX9-NEXT: v_add_u16_e32 v0, v0, v5 ; GFX9-NEXT: v_mad_legacy_u16 v0, v15, v17, v0 -; GFX9-NEXT: v_add_u16_e32 v0, v0, v9 +; GFX9-NEXT: v_add_u16_e32 v0, v0, v8 ; GFX9-NEXT: global_store_byte v3, v0, s[2:3] ; GFX9-NEXT: s_endpgm ; @@ -3119,6 +3071,8 @@ ; GFX9-DL-NEXT: s_waitcnt vmcnt(1) ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v10, 20, v2 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v11, 28, v2 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v12, 12, v2 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v13, 8, v2 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v14, 4, v2 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v15, 12, v1 ; GFX9-DL-NEXT: v_lshlrev_b16_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 @@ -3126,63 +3080,60 @@ ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v17, 12, v2 ; GFX9-DL-NEXT: v_lshlrev_b16_sdwa v18, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 ; GFX9-DL-NEXT: v_lshlrev_b16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v12, 12, v2 -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v13, 8, v2 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v2, 12, v9 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v9, 12, v15 +; GFX9-DL-NEXT: v_lshlrev_b16_e32 v8, 12, v8 +; GFX9-DL-NEXT: v_lshlrev_b16_e32 v7, 12, v7 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v15, 12, v16 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v6, 12, v6 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v16, 12, v1 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v1, 12, v5 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v5, 12, v14 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v14, 12, v17 +; GFX9-DL-NEXT: v_lshlrev_b16_e32 v13, 12, v13 +; GFX9-DL-NEXT: v_lshlrev_b16_e32 v12, 12, v12 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v17, 12, v18 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v11, 12, v11 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v18, 12, v0 ; GFX9-DL-NEXT: v_lshlrev_b16_e32 v0, 12, v10 -; GFX9-DL-NEXT: v_lshlrev_b16_e32 v8, 12, v8 -; GFX9-DL-NEXT: v_lshlrev_b16_e32 v7, 12, v7 -; GFX9-DL-NEXT: v_lshlrev_b16_e32 v13, 12, v13 -; GFX9-DL-NEXT: v_lshlrev_b16_e32 v12, 12, v12 -; GFX9-DL-NEXT: v_ashrrev_i16_e32 v6, 12, v6 -; GFX9-DL-NEXT: v_ashrrev_i16_e32 v1, 12, v1 -; GFX9-DL-NEXT: v_ashrrev_i16_e32 v11, 12, v11 -; GFX9-DL-NEXT: v_ashrrev_i16_e32 v0, 12, v0 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v8, 12, v8 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v7, 12, v7 +; GFX9-DL-NEXT: v_ashrrev_i16_e32 v6, 12, v6 +; GFX9-DL-NEXT: v_ashrrev_i16_e32 v1, 12, v1 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v10, 12, v13 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v12, 12, v12 -; GFX9-DL-NEXT: v_mul_lo_u16_e32 v19, v15, v17 -; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v0, v1, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v1, v6, v11 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-DL-NEXT: v_ashrrev_i16_e32 v11, 12, v11 +; GFX9-DL-NEXT: v_ashrrev_i16_e32 v0, 12, v0 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v2, 12, v2 ; GFX9-DL-NEXT: v_ashrrev_i16_e32 v5, 12, v5 ; GFX9-DL-NEXT: v_mul_lo_u16_e32 v13, v16, v18 +; GFX9-DL-NEXT: v_mul_lo_u16_e32 v19, v15, v17 +; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v0, v1, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v1, v6, v11 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v7, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-DL-NEXT: v_mul_lo_u16_e32 v7, v8, v10 -; GFX9-DL-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-DL-NEXT: v_mul_lo_u16_e32 v9, v9, v14 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v2, v2, v5 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-DL-NEXT: v_or_b32_sdwa v5, v13, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-DL-NEXT: v_or_b32_sdwa v0, v13, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-DL-NEXT: v_or_b32_sdwa v5, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-DL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-DL-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; GFX9-DL-NEXT: v_or_b32_sdwa v7, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-DL-NEXT: v_mul_lo_u16_e32 v9, v9, v14 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v8, 8, v1 +; GFX9-DL-NEXT: v_or_b32_sdwa v1, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-DL-NEXT: v_lshlrev_b32_e32 v0, 16, v6 -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX9-DL-NEXT: v_or_b32_sdwa v1, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-DL-NEXT: v_or_b32_e32 v2, v2, v0 +; GFX9-DL-NEXT: v_or_b32_sdwa v7, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX9-DL-NEXT: v_or_b32_e32 v2, v2, v0 ; GFX9-DL-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v2, 8, v2 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v1, 8, v2 ; GFX9-DL-NEXT: s_waitcnt vmcnt(0) -; GFX9-DL-NEXT: v_add_u16_e32 v1, v7, v4 -; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v2 +; GFX9-DL-NEXT: v_add_u16_e32 v2, v7, v4 +; GFX9-DL-NEXT: v_add_u16_e32 v1, v2, v1 ; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v6 ; GFX9-DL-NEXT: v_add_u16_e32 v0, v1, v0 ; GFX9-DL-NEXT: v_mad_legacy_u16 v0, v16, v18, v0 ; GFX9-DL-NEXT: v_add_u16_e32 v0, v0, v5 ; GFX9-DL-NEXT: v_mad_legacy_u16 v0, v15, v17, v0 -; GFX9-DL-NEXT: v_add_u16_e32 v0, v0, v9 +; GFX9-DL-NEXT: v_add_u16_e32 v0, v0, v8 ; GFX9-DL-NEXT: global_store_byte v3, v0, s[2:3] ; GFX9-DL-NEXT: s_endpgm ; @@ -3209,68 +3160,67 @@ ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v15, 12, v2 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v9, 8, v1 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v16, 8, v2 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v6, 28, v1 +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v10, 4, v1 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v8, 12, v8 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v15, 12, v15 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v13, 28, v2 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v10, 4, v1 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v17, 4, v2 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v9, 12, v9 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v16, 12, v16 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v8, 12, v8 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v15, 12, v15 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v0, 20, v1 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v6, 28, v1 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v11, 20, v2 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v14, 24, v2 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v6, 12, v6 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v13, 12, v13 +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v13, 28, v2 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v10, 12, v10 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v17, 12, v17 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v9, 12, v9 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v16, 12, v16 ; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v8, v8, v15 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v7, 24, v1 ; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v12, 16, v2 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v7, 12, v7 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v0, 12, v0 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v14, 12, v14 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v11, 12, v11 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v6, 12, v6 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v13, 12, v13 +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v14, 24, v2 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v1, 12, v1 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v2, 12, v2 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v6, 12, v6 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v0, 12, v0 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v13, 12, v13 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v11, 12, v11 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v10, 12, v10 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v15, 12, v17 ; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v9, v9, v16 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v8, 8, v8 +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v1, 12, v1 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v7, 12, v7 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v5, 12, v5 +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v2, 12, v2 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v14, 12, v14 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v12, 12, v12 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v7, 12, v7 +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v6, 12, v6 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v0, 12, v0 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v14, 12, v14 +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v13, 12, v13 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v11, 12, v11 -; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v6, v6, v13 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v1, 12, v1 -; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v2, 12, v2 ; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v10, v10, v15 ; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v7, 12, v7 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v5, 12, v5 +; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v14, 12, v14 ; GFX10-DL-XNACK-NEXT: v_ashrrev_i16 v12, 12, v12 -; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v9, v0, v11 +; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v1, v1, v2 +; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v2, v0, v11 +; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v6, v6, v13 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v9, 8, v10 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b32_e32 v0, 16, v8 +; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v10, v5, v12 ; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v11, v7, v14 +; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v2, 8, v2 ; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v6, 8, v6 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v10, 8, v10 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b32_e32 v0, 16, v8 -; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v1, v1, v2 -; GFX10-DL-XNACK-NEXT: v_mul_lo_u16 v2, v5, v12 -; GFX10-DL-XNACK-NEXT: v_lshlrev_b16 v9, 8, v9 -; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v6, v11, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v11, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-XNACK-NEXT: v_lshlrev_b32_e32 v9, 16, v6 -; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v10, 8, v11 +; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v13, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v2, v10, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v9, v11, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-XNACK-NEXT: v_lshrrev_b32_e32 v10, 8, v13 ; GFX10-DL-XNACK-NEXT: s_waitcnt vmcnt(0) ; GFX10-DL-XNACK-NEXT: v_add_nc_u16 v3, v1, v3 ; GFX10-DL-XNACK-NEXT: v_or_b32_sdwa v1, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD @@ -3309,78 +3259,77 @@ ; GFX10-DL-NOXNACK-NEXT: s_waitcnt vmcnt(1) ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v15, 12, v0 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v11, 20, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v12, 16, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v13, 28, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v14, 24, v0 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v16, 8, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v17, 4, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v0, 12, v0 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v10, 4, v1 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v8, 12, v8 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v15, 12, v15 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v6, 28, v1 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v10, 4, v1 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v17, 4, v0 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v9, 12, v9 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v18, 12, v0 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v0, 12, v16 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v16, 12, v16 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v8, 12, v8 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v15, 12, v15 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v3, 20, v1 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v6, 12, v6 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v13, 12, v13 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v6, 28, v1 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v11, 20, v0 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v13, 28, v0 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v10, 12, v10 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v17, 12, v17 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v9, 12, v9 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v0, 12, v0 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v16, 12, v16 ; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v8, v8, v15 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v7, 12, v7 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v12, 16, v0 +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v14, 24, v0 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v6, 12, v6 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v3, 12, v3 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v14, 12, v14 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v13, 12, v13 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v11, 12, v11 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v6, 12, v6 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v13, 12, v13 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v1, 12, v1 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v0, 12, v0 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v10, 12, v10 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v15, 12, v17 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v0, v9, v0 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v9, v9, v16 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v8, 8, v8 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v7, 12, v7 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v5, 12, v5 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v14, 12, v14 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v12, 12, v12 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v7, 12, v7 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v6, 12, v6 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v3, 12, v3 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v14, 12, v14 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v9, 12, v11 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v6, v6, v13 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v13, 12, v13 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v11, 12, v11 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v1, 12, v1 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v0, 12, v0 ; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v10, v10, v15 -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v8, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v7, 12, v7 ; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v5, 12, v5 -; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v11, 12, v12 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v3, v3, v9 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v9, v7, v14 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v6, 8, v6 -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v10, 8, v10 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v14, 12, v14 +; GFX10-DL-NOXNACK-NEXT: v_ashrrev_i16 v12, 12, v12 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v3, v3, v11 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v6, v6, v13 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v1, v1, v0 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v9, 8, v10 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b32_e32 v0, 16, v8 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v1, v1, v18 -; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v12, v5, v11 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v10, v5, v12 +; GFX10-DL-NOXNACK-NEXT: v_mul_lo_u16 v11, v7, v14 ; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v3, 8, v3 -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v6, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v9, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b32_e32 v10, 16, v6 -; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v9, 8, v9 +; GFX10-DL-NOXNACK-NEXT: v_lshlrev_b16 v6, 8, v6 +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v13, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v3, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v9, v11, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v10, 8, v13 ; GFX10-DL-NOXNACK-NEXT: s_waitcnt vmcnt(0) ; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v2, v1, v2 -; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v1, v3, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v9, v2, v9 +; GFX10-DL-NOXNACK-NEXT: v_or_b32_sdwa v1, v3, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v9, v2, v10 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b64 v[2:3], 24, v[0:1] ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v1, 8, v1 ; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v0, v9, v8 ; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v0, v0, v2 -; GFX10-DL-NOXNACK-NEXT: v_mad_u16 v0, v5, v11, v0 +; GFX10-DL-NOXNACK-NEXT: v_mad_u16 v0, v5, v12, v0 ; GFX10-DL-NOXNACK-NEXT: v_add_nc_u16 v0, v0, v1 ; GFX10-DL-NOXNACK-NEXT: v_lshrrev_b32_e32 v1, 8, v6 ; GFX10-DL-NOXNACK-NEXT: v_mad_u16 v0, v7, v14, v0 diff --git a/llvm/test/CodeGen/AMDGPU/idot8u.ll b/llvm/test/CodeGen/AMDGPU/idot8u.ll --- a/llvm/test/CodeGen/AMDGPU/idot8u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot8u.ll @@ -2119,45 +2119,32 @@ ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: buffer_load_ushort v1, off, s[0:3], 0 -; GFX7-NEXT: s_mov_b32 s4, 0xf0000 ; GFX7-NEXT: s_addc_u32 s13, s13, 0 ; GFX7-NEXT: s_waitcnt vmcnt(2) -; GFX7-NEXT: v_bfe_u32 v8, v2, 20, 4 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 12, v2 ; GFX7-NEXT: v_lshrrev_b32_e32 v3, 28, v2 ; GFX7-NEXT: v_bfe_u32 v4, v2, 24, 4 -; GFX7-NEXT: v_bfe_u32 v5, v2, 12, 4 -; GFX7-NEXT: v_bfe_u32 v6, v2, 8, 4 -; GFX7-NEXT: v_and_b32_e32 v7, 15, v2 -; GFX7-NEXT: v_alignbit_b32 v2, v8, v2, 16 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v9 +; GFX7-NEXT: v_bfe_u32 v5, v2, 20, 4 +; GFX7-NEXT: v_bfe_u32 v6, v2, 16, 4 +; GFX7-NEXT: v_bfe_u32 v7, v2, 12, 4 +; GFX7-NEXT: v_bfe_u32 v8, v2, 8, 4 +; GFX7-NEXT: v_bfe_u32 v9, v2, 4, 4 +; GFX7-NEXT: v_and_b32_e32 v2, 15, v2 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 12, v0 -; GFX7-NEXT: v_and_b32_e32 v14, 15, v0 -; GFX7-NEXT: v_or_b32_e32 v7, v7, v8 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v9 -; GFX7-NEXT: v_or_b32_e32 v8, v14, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v7 -; GFX7-NEXT: v_and_b32_e32 v7, 15, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v8 -; GFX7-NEXT: v_and_b32_e32 v8, 15, v8 -; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v7, v8, v1 -; GFX7-NEXT: v_bfe_u32 v13, v0, 8, 4 -; GFX7-NEXT: v_bfe_u32 v15, v0, 20, 4 -; GFX7-NEXT: v_mad_u32_u24 v1, v9, v14, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v10, 28, v0 ; GFX7-NEXT: v_bfe_u32 v11, v0, 24, 4 -; GFX7-NEXT: v_bfe_u32 v12, v0, 12, 4 -; GFX7-NEXT: v_alignbit_b32 v0, v15, v0, 16 -; GFX7-NEXT: v_mad_u32_u24 v1, v6, v13, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v2, 15, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v0 +; GFX7-NEXT: v_bfe_u32 v12, v0, 20, 4 +; GFX7-NEXT: v_bfe_u32 v13, v0, 16, 4 +; GFX7-NEXT: v_bfe_u32 v14, v0, 12, 4 +; GFX7-NEXT: v_bfe_u32 v15, v0, 8, 4 +; GFX7-NEXT: v_bfe_u32 v16, v0, 4, 4 ; GFX7-NEXT: v_and_b32_e32 v0, 15, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v5, v12, v1 +; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 -; GFX7-NEXT: v_mad_u32_u24 v0, v16, v15, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v9, v16, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v8, v15, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v7, v14, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v6, v13, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v5, v12, v0 ; GFX7-NEXT: v_mad_u32_u24 v0, v4, v11, v0 ; GFX7-NEXT: v_mad_u32_u24 v0, v3, v10, v0 ; GFX7-NEXT: buffer_store_short v0, off, s[0:3], 0 @@ -2480,74 +2467,45 @@ ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: buffer_load_ubyte v1, off, s[0:3], 0 -; GFX7-NEXT: s_movk_i32 s4, 0xf00 -; GFX7-NEXT: s_movk_i32 s5, 0xf0f +; GFX7-NEXT: s_movk_i32 s4, 0xf0f ; GFX7-NEXT: s_addc_u32 s13, s13, 0 ; GFX7-NEXT: s_waitcnt vmcnt(2) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 12, v2 -; GFX7-NEXT: v_bfe_u32 v3, v2, 8, 4 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 28, v2 -; GFX7-NEXT: v_bfe_u32 v7, v2, 16, 4 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 4, v2 +; GFX7-NEXT: v_and_b32_e32 v6, 15, v2 +; GFX7-NEXT: v_bfe_u32 v5, v2, 4, 4 ; GFX7-NEXT: s_waitcnt vmcnt(1) -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 4, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 28, v0 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_and_b32_e32 v5, 15, v2 -; GFX7-NEXT: v_bfe_u32 v10, v0, 8, 4 -; GFX7-NEXT: v_and_b32_e32 v12, 15, v0 -; GFX7-NEXT: v_bfe_u32 v14, v0, 16, 4 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 12, v0 -; GFX7-NEXT: v_alignbit_b32 v2, v6, v2, 24 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 4, v0 -; GFX7-NEXT: v_or_b32_e32 v7, v7, v8 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_alignbit_b32 v0, v13, v0, 24 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v11 -; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v15 -; GFX7-NEXT: v_and_b32_e32 v6, s4, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 -; GFX7-NEXT: v_or_b32_e32 v8, v10, v8 -; GFX7-NEXT: v_and_b32_e32 v2, s5, v2 -; GFX7-NEXT: v_or_b32_e32 v4, v14, v4 -; GFX7-NEXT: v_or_b32_e32 v6, v12, v6 -; GFX7-NEXT: v_or_b32_e32 v3, v5, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 -; GFX7-NEXT: v_or_b32_e32 v4, v6, v5 -; GFX7-NEXT: v_or_b32_e32 v2, v7, v2 -; GFX7-NEXT: v_and_b32_e32 v7, 15, v3 -; GFX7-NEXT: v_and_b32_e32 v13, 15, v4 -; GFX7-NEXT: v_bfe_u32 v8, v3, 8, 4 -; GFX7-NEXT: v_bfe_u32 v14, v4, 8, 4 +; GFX7-NEXT: v_and_b32_e32 v13, 15, v0 +; GFX7-NEXT: v_bfe_u32 v7, v2, 12, 4 +; GFX7-NEXT: v_bfe_u32 v12, v0, 4, 4 +; GFX7-NEXT: v_bfe_u32 v14, v0, 12, 4 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mad_u32_u24 v1, v7, v13, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v3 -; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 4 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 4 -; GFX7-NEXT: v_mad_u32_u24 v1, v8, v14, v1 -; GFX7-NEXT: v_mad_u32_u24 v1, v3, v4, v1 -; GFX7-NEXT: v_and_b32_e32 v9, 15, v2 -; GFX7-NEXT: v_and_b32_e32 v15, 15, v0 -; GFX7-NEXT: v_mad_u32_u24 v1, v5, v11, v1 -; GFX7-NEXT: v_bfe_u32 v10, v2, 8, 4 -; GFX7-NEXT: v_bfe_u32 v16, v0, 8, 4 -; GFX7-NEXT: v_mad_u32_u24 v1, v9, v15, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 -; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 4 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v0 -; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 4 -; GFX7-NEXT: v_mad_u32_u24 v1, v10, v16, v1 +; GFX7-NEXT: v_mad_u32_u24 v1, v6, v13, v1 +; GFX7-NEXT: v_bfe_u32 v4, v2, 8, 4 +; GFX7-NEXT: v_bfe_u32 v11, v0, 8, 4 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; GFX7-NEXT: v_mad_u32_u24 v1, v5, v12, v1 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 28, v2 +; GFX7-NEXT: v_alignbit_b32 v7, 0, v7, 24 +; GFX7-NEXT: v_alignbit_b32 v14, 0, v14, 24 +; GFX7-NEXT: v_mad_u32_u24 v1, v4, v11, v1 +; GFX7-NEXT: v_bfe_u32 v3, v2, 20, 4 +; GFX7-NEXT: v_bfe_u32 v9, v2, 16, 4 +; GFX7-NEXT: v_lshrrev_b32_e32 v15, 28, v0 +; GFX7-NEXT: v_alignbit_b32 v2, v8, v2, 24 +; GFX7-NEXT: v_bfe_u32 v8, v0, 16, 4 +; GFX7-NEXT: v_mad_u32_u24 v1, v7, v14, v1 +; GFX7-NEXT: v_bfe_u32 v10, v0, 20, 4 +; GFX7-NEXT: v_alignbit_b32 v0, v15, v0, 24 +; GFX7-NEXT: v_mad_u32_u24 v1, v9, v8, v1 +; GFX7-NEXT: v_and_b32_e32 v16, s4, v2 +; GFX7-NEXT: v_and_b32_e32 v2, 15, v2 +; GFX7-NEXT: v_and_b32_e32 v6, s4, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 15, v0 +; GFX7-NEXT: v_mad_u32_u24 v1, v3, v10, v1 +; GFX7-NEXT: v_bfe_u32 v13, v16, 8, 8 +; GFX7-NEXT: v_bfe_u32 v5, v6, 8, 8 ; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1 -; GFX7-NEXT: v_mad_u32_u24 v0, v6, v12, v0 +; GFX7-NEXT: v_mad_u32_u24 v0, v13, v5, v0 ; GFX7-NEXT: buffer_store_byte v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm ; @@ -2641,45 +2599,44 @@ ; GFX9-NEXT: global_load_ubyte v4, v3, s[2:3] ; GFX9-NEXT: s_addc_u32 s9, s9, 0 ; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 4 ; GFX9-NEXT: v_bfe_u32 v0, v1, 20, 4 ; GFX9-NEXT: v_bfe_u32 v6, v1, 24, 4 ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 28, v1 +; GFX9-NEXT: v_bfe_u32 v8, v1, 8, 4 +; GFX9-NEXT: v_bfe_u32 v9, v1, 12, 4 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_bfe_u32 v11, v2, 16, 4 ; GFX9-NEXT: v_bfe_u32 v12, v2, 20, 4 ; GFX9-NEXT: v_bfe_u32 v13, v2, 24, 4 ; GFX9-NEXT: v_lshrrev_b32_e32 v14, 28, v2 -; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 4 -; GFX9-NEXT: v_bfe_u32 v8, v1, 8, 4 -; GFX9-NEXT: v_bfe_u32 v9, v1, 12, 4 -; GFX9-NEXT: v_and_b32_e32 v10, 15, v1 -; GFX9-NEXT: v_bfe_u32 v1, v1, 4, 4 -; GFX9-NEXT: v_bfe_u32 v11, v2, 16, 4 ; GFX9-NEXT: v_bfe_u32 v15, v2, 8, 4 ; GFX9-NEXT: v_bfe_u32 v16, v2, 12, 4 +; GFX9-NEXT: v_and_b32_e32 v10, 15, v1 +; GFX9-NEXT: v_bfe_u32 v1, v1, 4, 4 ; GFX9-NEXT: v_and_b32_e32 v17, 15, v2 ; GFX9-NEXT: v_bfe_u32 v2, v2, 4, 4 +; GFX9-NEXT: v_mul_lo_u16_e32 v18, v5, v11 ; GFX9-NEXT: v_mul_lo_u16_sdwa v0, v0, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_mul_lo_u16_e32 v12, v6, v13 ; GFX9-NEXT: v_mul_lo_u16_sdwa v7, v7, v14 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_mul_lo_u16_e32 v18, v5, v11 ; GFX9-NEXT: v_mul_lo_u16_e32 v8, v8, v15 ; GFX9-NEXT: v_mul_lo_u16_sdwa v9, v9, v16 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_mul_lo_u16_e32 v10, v10, v17 ; GFX9-NEXT: v_mul_lo_u16_sdwa v2, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_e32 v7, v12, v7 -; GFX9-NEXT: v_or_b32_e32 v1, v18, v0 +; GFX9-NEXT: v_or_b32_e32 v0, v18, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v12, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v8, v8, v9 -; GFX9-NEXT: v_or_b32_e32 v9, v10, v2 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v7 +; GFX9-NEXT: v_mul_lo_u16_e32 v10, v10, v17 +; GFX9-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v8 -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_e32 v2, v2, v0 +; GFX9-NEXT: v_or_b32_e32 v9, v10, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v1 +; GFX9-NEXT: v_or_b32_e32 v2, v2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u16_e32 v1, v9, v4 -; GFX9-NEXT: v_add_u16_e32 v1, v1, v2 +; GFX9-NEXT: v_add_u16_e32 v2, v9, v4 +; GFX9-NEXT: v_add_u16_e32 v1, v2, v1 ; GFX9-NEXT: v_add_u16_e32 v1, v1, v8 ; GFX9-NEXT: v_add_u16_e32 v0, v1, v0 ; GFX9-NEXT: v_mad_legacy_u16 v0, v5, v11, v0 @@ -2707,45 +2664,44 @@ ; GFX9-DL-NEXT: global_load_ubyte v4, v3, s[2:3] ; GFX9-DL-NEXT: s_addc_u32 s9, s9, 0 ; GFX9-DL-NEXT: s_waitcnt vmcnt(2) +; GFX9-DL-NEXT: v_bfe_u32 v5, v1, 16, 4 ; GFX9-DL-NEXT: v_bfe_u32 v0, v1, 20, 4 ; GFX9-DL-NEXT: v_bfe_u32 v6, v1, 24, 4 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v7, 28, v1 +; GFX9-DL-NEXT: v_bfe_u32 v8, v1, 8, 4 +; GFX9-DL-NEXT: v_bfe_u32 v9, v1, 12, 4 ; GFX9-DL-NEXT: s_waitcnt vmcnt(1) +; GFX9-DL-NEXT: v_bfe_u32 v11, v2, 16, 4 ; GFX9-DL-NEXT: v_bfe_u32 v12, v2, 20, 4 ; GFX9-DL-NEXT: v_bfe_u32 v13, v2, 24, 4 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v14, 28, v2 -; GFX9-DL-NEXT: v_bfe_u32 v5, v1, 16, 4 -; GFX9-DL-NEXT: v_bfe_u32 v8, v1, 8, 4 -; GFX9-DL-NEXT: v_bfe_u32 v9, v1, 12, 4 -; GFX9-DL-NEXT: v_and_b32_e32 v10, 15, v1 -; GFX9-DL-NEXT: v_bfe_u32 v1, v1, 4, 4 -; GFX9-DL-NEXT: v_bfe_u32 v11, v2, 16, 4 ; GFX9-DL-NEXT: v_bfe_u32 v15, v2, 8, 4 ; GFX9-DL-NEXT: v_bfe_u32 v16, v2, 12, 4 +; GFX9-DL-NEXT: v_and_b32_e32 v10, 15, v1 +; GFX9-DL-NEXT: v_bfe_u32 v1, v1, 4, 4 ; GFX9-DL-NEXT: v_and_b32_e32 v17, 15, v2 ; GFX9-DL-NEXT: v_bfe_u32 v2, v2, 4, 4 +; GFX9-DL-NEXT: v_mul_lo_u16_e32 v18, v5, v11 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v0, v0, v12 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-DL-NEXT: v_mul_lo_u16_e32 v12, v6, v13 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v7, v7, v14 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-DL-NEXT: v_mul_lo_u16_e32 v18, v5, v11 ; GFX9-DL-NEXT: v_mul_lo_u16_e32 v8, v8, v15 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v9, v9, v16 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-DL-NEXT: v_mul_lo_u16_e32 v10, v10, v17 ; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v2, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-DL-NEXT: v_or_b32_e32 v7, v12, v7 -; GFX9-DL-NEXT: v_or_b32_e32 v1, v18, v0 +; GFX9-DL-NEXT: v_or_b32_e32 v0, v18, v0 +; GFX9-DL-NEXT: v_or_b32_sdwa v1, v12, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-DL-NEXT: v_or_b32_e32 v8, v8, v9 -; GFX9-DL-NEXT: v_or_b32_e32 v9, v10, v2 -; GFX9-DL-NEXT: v_lshlrev_b32_e32 v10, 16, v7 +; GFX9-DL-NEXT: v_mul_lo_u16_e32 v10, v10, v17 +; GFX9-DL-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-DL-NEXT: v_lshlrev_b32_e32 v0, 16, v8 -; GFX9-DL-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-DL-NEXT: v_or_b32_e32 v2, v2, v0 +; GFX9-DL-NEXT: v_or_b32_e32 v9, v10, v2 ; GFX9-DL-NEXT: v_lshrrev_b32_e32 v10, 8, v1 +; GFX9-DL-NEXT: v_or_b32_e32 v2, v2, v0 ; GFX9-DL-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] -; GFX9-DL-NEXT: v_lshrrev_b32_e32 v2, 8, v2 +; GFX9-DL-NEXT: v_lshrrev_b32_e32 v1, 8, v2 ; GFX9-DL-NEXT: s_waitcnt vmcnt(0) -; GFX9-DL-NEXT: v_add_u16_e32 v1, v9, v4 -; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v2 +; GFX9-DL-NEXT: v_add_u16_e32 v2, v9, v4 +; GFX9-DL-NEXT: v_add_u16_e32 v1, v2, v1 ; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v8 ; GFX9-DL-NEXT: v_add_u16_e32 v0, v1, v0 ; GFX9-DL-NEXT: v_mad_legacy_u16 v0, v5, v11, v0 @@ -2774,55 +2730,54 @@ ; GFX10-DL-NEXT: global_load_dword v2, v0, s[6:7] ; GFX10-DL-NEXT: global_load_ubyte v3, v4, s[0:1] ; GFX10-DL-NEXT: s_waitcnt vmcnt(2) -; GFX10-DL-NEXT: v_bfe_u32 v9, v1, 12, 4 +; GFX10-DL-NEXT: v_bfe_u32 v8, v1, 12, 4 ; GFX10-DL-NEXT: s_waitcnt vmcnt(1) -; GFX10-DL-NEXT: v_bfe_u32 v10, v2, 12, 4 -; GFX10-DL-NEXT: v_bfe_u32 v8, v1, 8, 4 -; GFX10-DL-NEXT: v_bfe_u32 v13, v2, 8, 4 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v7, 28, v1 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v14, 28, v2 -; GFX10-DL-NEXT: v_mul_lo_u16 v9, v9, v10 +; GFX10-DL-NEXT: v_bfe_u32 v9, v2, 12, 4 +; GFX10-DL-NEXT: v_bfe_u32 v10, v1, 8, 4 +; GFX10-DL-NEXT: v_bfe_u32 v12, v2, 8, 4 ; GFX10-DL-NEXT: v_bfe_u32 v5, v1, 16, 4 ; GFX10-DL-NEXT: v_bfe_u32 v0, v1, 20, 4 +; GFX10-DL-NEXT: v_mul_lo_u16 v8, v8, v9 ; GFX10-DL-NEXT: v_bfe_u32 v6, v1, 24, 4 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v7, 28, v1 ; GFX10-DL-NEXT: v_and_b32_e32 v11, 15, v1 ; GFX10-DL-NEXT: v_bfe_u32 v1, v1, 4, 4 -; GFX10-DL-NEXT: v_bfe_u32 v15, v2, 4, 4 -; GFX10-DL-NEXT: v_mul_lo_u16 v8, v8, v13 -; GFX10-DL-NEXT: v_lshlrev_b16 v9, 8, v9 -; GFX10-DL-NEXT: v_bfe_u32 v10, v2, 20, 4 -; GFX10-DL-NEXT: v_bfe_u32 v13, v2, 24, 4 -; GFX10-DL-NEXT: v_mul_lo_u16 v7, v7, v14 -; GFX10-DL-NEXT: v_bfe_u32 v12, v2, 16, 4 +; GFX10-DL-NEXT: v_bfe_u32 v14, v2, 4, 4 +; GFX10-DL-NEXT: v_mul_lo_u16 v10, v10, v12 +; GFX10-DL-NEXT: v_lshlrev_b16 v8, 8, v8 +; GFX10-DL-NEXT: v_bfe_u32 v13, v2, 20, 4 +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v15, 28, v2 +; GFX10-DL-NEXT: v_bfe_u32 v9, v2, 16, 4 +; GFX10-DL-NEXT: v_bfe_u32 v12, v2, 24, 4 ; GFX10-DL-NEXT: v_and_b32_e32 v2, 15, v2 -; GFX10-DL-NEXT: v_mul_lo_u16 v1, v1, v15 -; GFX10-DL-NEXT: v_or_b32_e32 v8, v8, v9 -; GFX10-DL-NEXT: v_mul_lo_u16 v9, v0, v10 -; GFX10-DL-NEXT: v_mul_lo_u16 v10, v6, v13 -; GFX10-DL-NEXT: v_lshlrev_b16 v7, 8, v7 +; GFX10-DL-NEXT: v_mul_lo_u16 v1, v1, v14 +; GFX10-DL-NEXT: v_or_b32_e32 v8, v10, v8 +; GFX10-DL-NEXT: v_mul_lo_u16 v13, v0, v13 +; GFX10-DL-NEXT: v_mul_lo_u16 v7, v7, v15 +; GFX10-DL-NEXT: v_mul_lo_u16 v2, v11, v2 ; GFX10-DL-NEXT: v_lshlrev_b16 v1, 8, v1 ; GFX10-DL-NEXT: v_lshlrev_b32_e32 v0, 16, v8 -; GFX10-DL-NEXT: v_mul_lo_u16 v2, v11, v2 -; GFX10-DL-NEXT: v_mul_lo_u16 v11, v5, v12 -; GFX10-DL-NEXT: v_lshlrev_b16 v9, 8, v9 -; GFX10-DL-NEXT: v_or_b32_e32 v7, v10, v7 -; GFX10-DL-NEXT: v_or_b32_sdwa v10, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-NEXT: v_mul_lo_u16 v10, v5, v9 +; GFX10-DL-NEXT: v_mul_lo_u16 v11, v6, v12 +; GFX10-DL-NEXT: v_lshlrev_b16 v13, 8, v13 +; GFX10-DL-NEXT: v_lshlrev_b16 v7, 8, v7 +; GFX10-DL-NEXT: v_or_b32_sdwa v14, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX10-DL-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX10-DL-NEXT: v_or_b32_e32 v2, v11, v9 -; GFX10-DL-NEXT: v_lshlrev_b32_e32 v9, 16, v7 -; GFX10-DL-NEXT: v_lshrrev_b32_e32 v10, 8, v10 +; GFX10-DL-NEXT: v_or_b32_e32 v2, v10, v13 +; GFX10-DL-NEXT: v_or_b32_sdwa v10, v11, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX10-DL-NEXT: v_lshrrev_b32_e32 v11, 8, v14 ; GFX10-DL-NEXT: s_waitcnt vmcnt(0) ; GFX10-DL-NEXT: v_add_nc_u16 v3, v1, v3 -; GFX10-DL-NEXT: v_or_b32_sdwa v1, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX10-DL-NEXT: v_add_nc_u16 v9, v3, v10 +; GFX10-DL-NEXT: v_or_b32_sdwa v1, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX10-DL-NEXT: v_add_nc_u16 v10, v3, v11 ; GFX10-DL-NEXT: v_lshrrev_b64 v[2:3], 24, v[0:1] ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; GFX10-DL-NEXT: v_add_nc_u16 v0, v9, v8 +; GFX10-DL-NEXT: v_add_nc_u16 v0, v10, v8 ; GFX10-DL-NEXT: v_add_nc_u16 v0, v0, v2 -; GFX10-DL-NEXT: v_mad_u16 v0, v5, v12, v0 +; GFX10-DL-NEXT: v_mad_u16 v0, v5, v9, v0 ; GFX10-DL-NEXT: v_add_nc_u16 v0, v0, v1 ; GFX10-DL-NEXT: v_lshrrev_b32_e32 v1, 8, v7 -; GFX10-DL-NEXT: v_mad_u16 v0, v6, v13, v0 +; GFX10-DL-NEXT: v_mad_u16 v0, v6, v12, v0 ; GFX10-DL-NEXT: v_add_nc_u16 v0, v0, v1 ; GFX10-DL-NEXT: global_store_byte v4, v0, s[0:1] ; GFX10-DL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll --- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll @@ -1608,10 +1608,10 @@ ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] ; VI-NEXT: s_mov_b64 s[2:3], 0xffff ; VI-NEXT: v_mov_b32_e32 v3, s1 -; VI-NEXT: s_and_b32 s1, s4, s2 +; VI-NEXT: s_lshl_b32 s1, s4, 16 +; VI-NEXT: s_and_b32 s4, s4, s2 ; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2 -; VI-NEXT: s_lshl_b32 s0, s1, 16 -; VI-NEXT: s_or_b32 s0, s1, s0 +; VI-NEXT: s_or_b32 s0, s4, s1 ; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc ; VI-NEXT: v_lshlrev_b32_e32 v4, 4, v4 ; VI-NEXT: v_lshlrev_b64 v[4:5], v4, s[2:3] @@ -1693,11 +1693,11 @@ ; VI-NEXT: s_mov_b64 s[2:3], 0xffff ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: s_lshl_b32 s1, s5, 4 +; VI-NEXT: s_lshl_b32 s5, s4, 16 ; VI-NEXT: s_and_b32 s4, s4, s2 ; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], s1 -; VI-NEXT: s_lshl_b32 s2, s4, 16 -; VI-NEXT: s_or_b32 s2, s4, s2 +; VI-NEXT: s_or_b32 s2, s4, s5 ; VI-NEXT: v_mov_b32_e32 v4, s2 ; VI-NEXT: v_mov_b32_e32 v5, s2 ; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc diff --git a/llvm/test/CodeGen/AMDGPU/saddsat.ll b/llvm/test/CodeGen/AMDGPU/saddsat.ll --- a/llvm/test/CodeGen/AMDGPU/saddsat.ll +++ b/llvm/test/CodeGen/AMDGPU/saddsat.ll @@ -141,10 +141,11 @@ ; GFX6-NEXT: v_min_i32_e32 v0, s4, v0 ; GFX6-NEXT: v_max_i32_e32 v1, s5, v1 ; GFX6-NEXT: v_max_i32_e32 v0, s5, v0 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX6-NEXT: s_mov_b32 s4, 0xffff +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; GFX6-NEXT: v_and_b32_e32 v0, s4, v0 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_saddsat_v2i16: diff --git a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll --- a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll +++ b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll @@ -38,8 +38,9 @@ ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; VI-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; VI-NEXT: v_alignbit_b32 v0, s0, v0, 16 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_mov_b32_e32 v1, v0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm @@ -85,8 +86,9 @@ ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; VI-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; VI-NEXT: v_alignbit_b32 v0, s0, v0, 16 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_mov_b32_e32 v1, v0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm @@ -105,13 +107,10 @@ ; SI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_or_b32_e32 v0, v0, v2 +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_mov_b32_e32 v1, v0 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -122,13 +121,10 @@ ; VI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v0 -; VI-NEXT: v_or_b32_e32 v0, v1, v0 -; VI-NEXT: v_lshrrev_b16_e32 v1, 8, v0 -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v1 -; VI-NEXT: v_or_b32_e32 v1, v1, v2 -; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v1, v0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm bb: @@ -147,13 +143,10 @@ ; SI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_or_b32_e32 v0, v0, v2 +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_mov_b32_e32 v1, v0 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll --- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll +++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll @@ -83,11 +83,11 @@ ; GCN-LABEL: v_shl_i128_vk: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_alignbit_b32 v4, v2, v1, 15 +; GCN-NEXT: v_lshl_b64 v[2:3], v[2:3], 17 +; GCN-NEXT: v_lshrrev_b32_e32 v4, 15, v1 +; GCN-NEXT: v_or_b32_e32 v2, v2, v4 ; GCN-NEXT: v_alignbit_b32 v1, v1, v0, 15 -; GCN-NEXT: v_alignbit_b32 v3, v3, v2, 15 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 17, v0 -; GCN-NEXT: v_mov_b32_e32 v2, v4 ; GCN-NEXT: s_setpc_b64 s[30:31] %shl = shl i128 %lhs, 17 ret i128 %shl @@ -110,11 +110,11 @@ ; GCN-LABEL: v_ashr_i128_vk: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_ashr_i64 v[4:5], v[2:3], 33 -; GCN-NEXT: v_alignbit_b32 v0, v2, v1, 1 -; GCN-NEXT: v_alignbit_b32 v1, v3, v2, 1 -; GCN-NEXT: v_mov_b32_e32 v2, v4 -; GCN-NEXT: v_mov_b32_e32 v3, v5 +; GCN-NEXT: v_mov_b32_e32 v4, v1 +; GCN-NEXT: v_lshl_b64 v[0:1], v[2:3], 31 +; GCN-NEXT: v_lshrrev_b32_e32 v4, 1, v4 +; GCN-NEXT: v_ashr_i64 v[2:3], v[2:3], 33 +; GCN-NEXT: v_or_b32_e32 v0, v4, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %shl = ashr i128 %lhs, 33 ret i128 %shl diff --git a/llvm/test/CodeGen/AMDGPU/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/ssubsat.ll --- a/llvm/test/CodeGen/AMDGPU/ssubsat.ll +++ b/llvm/test/CodeGen/AMDGPU/ssubsat.ll @@ -141,10 +141,11 @@ ; GFX6-NEXT: v_min_i32_e32 v0, s4, v0 ; GFX6-NEXT: v_max_i32_e32 v1, s5, v1 ; GFX6-NEXT: v_max_i32_e32 v0, s5, v0 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX6-NEXT: s_mov_b32 s4, 0xffff +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; GFX6-NEXT: v_and_b32_e32 v0, s4, v0 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_ssubsat_v2i16: diff --git a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll --- a/llvm/test/CodeGen/AMDGPU/trunc-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/trunc-combine.ll @@ -141,10 +141,11 @@ ; SI-LABEL: trunc_v2i64_arg_to_v2i16: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0xffff ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_and_b32_e32 v0, s4, v0 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v2 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: trunc_v2i64_arg_to_v2i16: diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll --- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll +++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll @@ -120,9 +120,8 @@ ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; GFX6-NEXT: v_min_u32_e32 v1, s4, v1 ; GFX6-NEXT: v_min_u32_e32 v0, s4, v0 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_uaddsat_v2i16: diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll --- a/llvm/test/CodeGen/AMDGPU/usubsat.ll +++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll @@ -216,17 +216,17 @@ ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_mov_b32 s4, 0xffff -; GFX6-NEXT: v_and_b32_e32 v4, s4, v3 +; GFX6-NEXT: v_and_b32_e32 v3, s4, v3 ; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: v_and_b32_e32 v2, s4, v2 ; GFX6-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX6-NEXT: v_max_u32_e32 v1, v1, v4 +; GFX6-NEXT: v_max_u32_e32 v1, v1, v3 ; GFX6-NEXT: v_max_u32_e32 v0, v0, v2 ; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v3 ; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: v_usubsat_v2i16: diff --git a/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll --- a/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll +++ b/llvm/test/CodeGen/ARM/illegal-bitfield-loadstore.ll @@ -91,18 +91,15 @@ ; BE-LABEL: i56_or: ; BE: @ %bb.0: ; BE-NEXT: mov r1, r0 -; BE-NEXT: ldr r12, [r0] +; BE-NEXT: ldr r0, [r0] ; BE-NEXT: ldrh r2, [r1, #4]! ; BE-NEXT: ldrb r3, [r1, #2] ; BE-NEXT: orr r2, r3, r2, lsl #8 -; BE-NEXT: orr r2, r2, r12, lsl #24 -; BE-NEXT: orr r2, r2, #384 -; BE-NEXT: strb r2, [r1, #2] -; BE-NEXT: lsr r3, r2, #8 -; BE-NEXT: strh r3, [r1] -; BE-NEXT: bic r1, r12, #255 -; BE-NEXT: orr r1, r1, r2, lsr #24 -; BE-NEXT: str r1, [r0] +; BE-NEXT: orr r0, r2, r0, lsl #24 +; BE-NEXT: orr r0, r0, #384 +; BE-NEXT: strb r0, [r1, #2] +; BE-NEXT: lsr r0, r0, #8 +; BE-NEXT: strh r0, [r1] ; BE-NEXT: mov pc, lr %aa = load i56, i56* %a %b = or i56 %aa, 384 @@ -121,20 +118,11 @@ ; ; BE-LABEL: i56_and_or: ; BE: @ %bb.0: -; BE-NEXT: mov r1, r0 +; BE-NEXT: ldrh r1, [r0, #4]! ; BE-NEXT: mov r2, #128 -; BE-NEXT: ldrh r12, [r1, #4]! -; BE-NEXT: ldrb r3, [r1, #2] -; BE-NEXT: strb r2, [r1, #2] -; BE-NEXT: orr r2, r3, r12, lsl #8 -; BE-NEXT: ldr r12, [r0] -; BE-NEXT: orr r2, r2, r12, lsl #24 -; BE-NEXT: orr r2, r2, #384 -; BE-NEXT: lsr r3, r2, #8 -; BE-NEXT: strh r3, [r1] -; BE-NEXT: bic r1, r12, #255 -; BE-NEXT: orr r1, r1, r2, lsr #24 -; BE-NEXT: str r1, [r0] +; BE-NEXT: orr r1, r1, #1 +; BE-NEXT: strb r2, [r0, #2] +; BE-NEXT: strh r1, [r0] ; BE-NEXT: mov pc, lr %b = load i56, i56* %a, align 1 @@ -155,22 +143,13 @@ ; ; BE-LABEL: i56_insert_bit: ; BE: @ %bb.0: -; BE-NEXT: .save {r11, lr} -; BE-NEXT: push {r11, lr} -; BE-NEXT: mov r2, r0 -; BE-NEXT: ldr lr, [r0] -; BE-NEXT: ldrh r12, [r2, #4]! -; BE-NEXT: ldrb r3, [r2, #2] -; BE-NEXT: orr r12, r3, r12, lsl #8 -; BE-NEXT: orr r3, r12, lr, lsl #24 -; BE-NEXT: bic r3, r3, #8192 -; BE-NEXT: orr r1, r3, r1, lsl #13 -; BE-NEXT: lsr r3, r1, #8 -; BE-NEXT: strh r3, [r2] -; BE-NEXT: bic r2, lr, #255 -; BE-NEXT: orr r1, r2, r1, lsr #24 -; BE-NEXT: str r1, [r0] -; BE-NEXT: pop {r11, lr} +; BE-NEXT: ldrh r2, [r0, #4]! +; BE-NEXT: mov r3, #57088 +; BE-NEXT: orr r3, r3, #16711680 +; BE-NEXT: and r2, r3, r2, lsl #8 +; BE-NEXT: orr r1, r2, r1, lsl #13 +; BE-NEXT: lsr r1, r1, #8 +; BE-NEXT: strh r1, [r0] ; BE-NEXT: mov pc, lr %extbit = zext i1 %bit to i56 %b = load i56, i56* %a, align 1 diff --git a/llvm/test/CodeGen/ARM/parity.ll b/llvm/test/CodeGen/ARM/parity.ll --- a/llvm/test/CodeGen/ARM/parity.ll +++ b/llvm/test/CodeGen/ARM/parity.ll @@ -47,8 +47,8 @@ ; CHECK-LABEL: parity_17: ; CHECK: @ %bb.0: ; CHECK-NEXT: bfc r0, #17, #15 -; CHECK-NEXT: eor r0, r0, r0, lsr #16 -; CHECK-NEXT: eor r0, r0, r0, lsr #8 +; CHECK-NEXT: eor r1, r0, r0, lsr #16 +; CHECK-NEXT: eor r0, r1, r0, lsr #8 ; CHECK-NEXT: eor r0, r0, r0, lsr #4 ; CHECK-NEXT: eor r0, r0, r0, lsr #2 ; CHECK-NEXT: eor r0, r0, r0, lsr #1 diff --git a/llvm/test/CodeGen/ARM/ror.ll b/llvm/test/CodeGen/ARM/ror.ll --- a/llvm/test/CodeGen/ARM/ror.ll +++ b/llvm/test/CodeGen/ARM/ror.ll @@ -21,8 +21,14 @@ define <2 x i32> @test2(<2 x i32> %x) nounwind readnone { ; CHECK-LABEL: test2: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: ror r0, r0, #10 -; CHECK-NEXT: ror r1, r1, #10 +; CHECK-NEXT: bic r2, r0, #15 +; CHECK-NEXT: ror r0, r0, #4 +; CHECK-NEXT: lsr r0, r0, #6 +; CHECK-NEXT: orr r0, r0, r2, lsl #22 +; CHECK-NEXT: bic r2, r1, #15 +; CHECK-NEXT: ror r1, r1, #4 +; CHECK-NEXT: lsr r1, r1, #6 +; CHECK-NEXT: orr r1, r1, r2, lsl #22 ; CHECK-NEXT: bx lr entry: %high_part.i = shl <2 x i32> %x, diff --git a/llvm/test/CodeGen/ARM/uxtb.ll b/llvm/test/CodeGen/ARM/uxtb.ll --- a/llvm/test/CodeGen/ARM/uxtb.ll +++ b/llvm/test/CodeGen/ARM/uxtb.ll @@ -103,11 +103,12 @@ ; CHECK-LABEL: test10: ; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #248 +; CHECK-NEXT: mov r2, #7 ; CHECK-NEXT: orr r1, r1, #16252928 -; CHECK-NEXT: and r0, r1, r0, lsr #7 -; CHECK-NEXT: lsr r1, r0, #5 -; CHECK-NEXT: uxtb16 r1, r1 -; CHECK-NEXT: orr r0, r1, r0 +; CHECK-NEXT: orr r2, r2, #458752 +; CHECK-NEXT: and r1, r1, r0, lsr #7 +; CHECK-NEXT: and r0, r2, r0, lsr #12 +; CHECK-NEXT: orr r0, r0, r1 ; CHECK-NEXT: bx lr %tmp1 = lshr i32 %p0, 7 %tmp2 = and i32 %tmp1, 16253176 diff --git a/llvm/test/CodeGen/Mips/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/Mips/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/Mips/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/Mips/urem-seteq-illegal-types.ll @@ -148,48 +148,48 @@ define i1 @test_urem_oversized(i66 %X) nounwind { ; MIPSEL-LABEL: test_urem_oversized: ; MIPSEL: # %bb.0: -; MIPSEL-NEXT: lui $1, 12057 -; MIPSEL-NEXT: ori $1, $1, 37186 +; MIPSEL-NEXT: lui $1, 52741 +; MIPSEL-NEXT: ori $1, $1, 40665 ; MIPSEL-NEXT: multu $6, $1 -; MIPSEL-NEXT: mflo $2 -; MIPSEL-NEXT: mfhi $3 -; MIPSEL-NEXT: lui $7, 52741 -; MIPSEL-NEXT: ori $7, $7, 40665 -; MIPSEL-NEXT: multu $6, $7 +; MIPSEL-NEXT: mfhi $2 +; MIPSEL-NEXT: mflo $3 +; MIPSEL-NEXT: multu $5, $1 +; MIPSEL-NEXT: mfhi $7 ; MIPSEL-NEXT: mflo $8 -; MIPSEL-NEXT: mfhi $9 -; MIPSEL-NEXT: multu $5, $7 -; MIPSEL-NEXT: mfhi $10 -; MIPSEL-NEXT: mflo $11 -; MIPSEL-NEXT: addu $9, $11, $9 -; MIPSEL-NEXT: addu $12, $2, $9 -; MIPSEL-NEXT: sltu $9, $9, $11 -; MIPSEL-NEXT: sll $11, $12, 31 -; MIPSEL-NEXT: sltu $2, $12, $2 -; MIPSEL-NEXT: srl $13, $8, 1 -; MIPSEL-NEXT: sll $8, $8, 1 -; MIPSEL-NEXT: addu $2, $3, $2 -; MIPSEL-NEXT: or $3, $13, $11 -; MIPSEL-NEXT: srl $11, $12, 1 -; MIPSEL-NEXT: addu $9, $10, $9 -; MIPSEL-NEXT: mul $4, $4, $7 -; MIPSEL-NEXT: mul $1, $5, $1 +; MIPSEL-NEXT: lui $9, 12057 +; MIPSEL-NEXT: ori $9, $9, 37186 +; MIPSEL-NEXT: multu $6, $9 +; MIPSEL-NEXT: mflo $10 +; MIPSEL-NEXT: mfhi $11 +; MIPSEL-NEXT: addu $2, $8, $2 +; MIPSEL-NEXT: addu $12, $10, $2 +; MIPSEL-NEXT: sltu $2, $2, $8 +; MIPSEL-NEXT: addu $2, $7, $2 +; MIPSEL-NEXT: sltu $7, $12, $10 +; MIPSEL-NEXT: sll $8, $12, 31 +; MIPSEL-NEXT: srl $10, $12, 1 +; MIPSEL-NEXT: sll $12, $3, 1 +; MIPSEL-NEXT: srl $3, $3, 1 +; MIPSEL-NEXT: mul $1, $4, $1 +; MIPSEL-NEXT: mul $4, $5, $9 ; MIPSEL-NEXT: sll $5, $6, 1 ; MIPSEL-NEXT: lui $6, 60010 -; MIPSEL-NEXT: ori $6, $6, 61135 -; MIPSEL-NEXT: addu $2, $9, $2 -; MIPSEL-NEXT: addu $1, $1, $2 -; MIPSEL-NEXT: addu $2, $5, $4 -; MIPSEL-NEXT: addu $1, $1, $2 -; MIPSEL-NEXT: andi $1, $1, 3 +; MIPSEL-NEXT: addu $7, $11, $7 +; MIPSEL-NEXT: addu $2, $2, $7 +; MIPSEL-NEXT: addu $2, $4, $2 +; MIPSEL-NEXT: addu $1, $5, $1 +; MIPSEL-NEXT: addu $1, $2, $1 ; MIPSEL-NEXT: sll $2, $1, 31 -; MIPSEL-NEXT: or $4, $11, $2 +; MIPSEL-NEXT: or $4, $10, $2 ; MIPSEL-NEXT: sltiu $2, $4, 13 ; MIPSEL-NEXT: xori $4, $4, 13 -; MIPSEL-NEXT: sltu $3, $3, $6 +; MIPSEL-NEXT: or $3, $3, $8 +; MIPSEL-NEXT: ori $5, $6, 61135 +; MIPSEL-NEXT: sltu $3, $3, $5 ; MIPSEL-NEXT: movz $2, $3, $4 +; MIPSEL-NEXT: andi $1, $1, 2 ; MIPSEL-NEXT: srl $1, $1, 1 -; MIPSEL-NEXT: or $1, $1, $8 +; MIPSEL-NEXT: or $1, $1, $12 ; MIPSEL-NEXT: andi $1, $1, 3 ; MIPSEL-NEXT: jr $ra ; MIPSEL-NEXT: movn $2, $zero, $1 @@ -213,12 +213,12 @@ ; MIPS64EL-NEXT: daddiu $5, $5, -4401 ; MIPS64EL-NEXT: dsll $4, $4, 1 ; MIPS64EL-NEXT: daddu $3, $3, $4 -; MIPS64EL-NEXT: daddu $2, $3, $2 -; MIPS64EL-NEXT: andi $3, $2, 3 +; MIPS64EL-NEXT: daddu $3, $3, $2 ; MIPS64EL-NEXT: dsll $2, $3, 63 ; MIPS64EL-NEXT: dsrl $4, $1, 1 ; MIPS64EL-NEXT: or $2, $4, $2 ; MIPS64EL-NEXT: sltu $2, $2, $5 +; MIPS64EL-NEXT: andi $3, $3, 2 ; MIPS64EL-NEXT: dsrl $3, $3, 1 ; MIPS64EL-NEXT: dsll $1, $1, 1 ; MIPS64EL-NEXT: or $1, $3, $1 diff --git a/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll b/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll --- a/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll +++ b/llvm/test/CodeGen/PowerPC/fp-to-int-to-fp.ll @@ -84,35 +84,35 @@ ; PPC64-NEXT: addi 3, 5, 0 ; PPC64-NEXT: .LBB2_2: # %entry ; PPC64-NEXT: sradi 4, 3, 53 -; PPC64-NEXT: clrldi 5, 3, 63 +; PPC64-NEXT: rldicl 5, 3, 63, 1 ; PPC64-NEXT: addi 4, 4, 1 +; PPC64-NEXT: clrldi 6, 3, 63 ; PPC64-NEXT: cmpldi 4, 1 -; PPC64-NEXT: rldicl 4, 3, 63, 1 -; PPC64-NEXT: or 5, 5, 4 -; PPC64-NEXT: rldicl 6, 5, 11, 53 -; PPC64-NEXT: addi 6, 6, 1 -; PPC64-NEXT: clrldi 7, 5, 53 -; PPC64-NEXT: cmpldi 1, 6, 1 -; PPC64-NEXT: clrldi 6, 3, 53 +; PPC64-NEXT: clrldi 4, 3, 53 +; PPC64-NEXT: or 6, 6, 5 +; PPC64-NEXT: clrldi 7, 6, 53 +; PPC64-NEXT: addi 4, 4, 2047 ; PPC64-NEXT: addi 7, 7, 2047 -; PPC64-NEXT: addi 6, 6, 2047 -; PPC64-NEXT: or 4, 7, 4 -; PPC64-NEXT: or 6, 6, 3 -; PPC64-NEXT: rldicl 4, 4, 53, 11 -; PPC64-NEXT: rldicr 6, 6, 0, 52 +; PPC64-NEXT: or 4, 4, 3 +; PPC64-NEXT: or 5, 7, 5 +; PPC64-NEXT: rldicl 7, 3, 10, 54 +; PPC64-NEXT: rldicr 4, 4, 0, 52 +; PPC64-NEXT: addi 7, 7, 1 ; PPC64-NEXT: bc 12, 1, .LBB2_4 ; PPC64-NEXT: # %bb.3: # %entry -; PPC64-NEXT: ori 6, 3, 0 +; PPC64-NEXT: ori 4, 3, 0 ; PPC64-NEXT: b .LBB2_4 ; PPC64-NEXT: .LBB2_4: # %entry -; PPC64-NEXT: rldicl 4, 4, 11, 1 -; PPC64-NEXT: cmpdi 3, 0 -; PPC64-NEXT: std 6, -32(1) -; PPC64-NEXT: bc 12, 5, .LBB2_6 +; PPC64-NEXT: rldicl 5, 5, 53, 11 +; PPC64-NEXT: std 4, -32(1) +; PPC64-NEXT: rldicl 4, 5, 11, 1 +; PPC64-NEXT: cmpldi 7, 1 +; PPC64-NEXT: bc 12, 1, .LBB2_6 ; PPC64-NEXT: # %bb.5: # %entry -; PPC64-NEXT: ori 4, 5, 0 +; PPC64-NEXT: ori 4, 6, 0 ; PPC64-NEXT: b .LBB2_6 ; PPC64-NEXT: .LBB2_6: # %entry +; PPC64-NEXT: cmpdi 3, 0 ; PPC64-NEXT: std 4, -24(1) ; PPC64-NEXT: bc 12, 0, .LBB2_8 ; PPC64-NEXT: # %bb.7: # %entry diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -54,7 +54,7 @@ ; ; RV64I-LABEL: test_bswap_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -256 ; RV64I-NEXT: and a1, a1, a2 @@ -312,7 +312,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -405,7 +405,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -708,7 +708,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -897,7 +897,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll @@ -794,18 +794,30 @@ ; RV32I-NEXT: addi a4, a4, 819 ; RV32I-NEXT: and a3, a3, a4 ; RV32I-NEXT: or a0, a3, a0 -; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: slli a1, a0, 2 -; RV32I-NEXT: and a1, a1, a2 -; RV32I-NEXT: srli a2, a0, 2 +; RV32I-NEXT: or a1, a0, a1 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: srli a2, a1, 2 ; RV32I-NEXT: and a2, a2, a4 -; RV32I-NEXT: or a0, a2, a0 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a1, a2, a1 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: gorc2b_i32: ; RV32ZBP: # %bb.0: +; RV32ZBP-NEXT: srli a1, a0, 2 +; RV32ZBP-NEXT: or a1, a1, a0 ; RV32ZBP-NEXT: orc2.n a0, a0 +; RV32ZBP-NEXT: slli a1, a1, 2 +; RV32ZBP-NEXT: lui a2, 838861 +; RV32ZBP-NEXT: addi a2, a2, -820 +; RV32ZBP-NEXT: and a1, a1, a2 +; RV32ZBP-NEXT: srli a2, a0, 2 +; RV32ZBP-NEXT: lui a3, 209715 +; RV32ZBP-NEXT: addi a3, a3, 819 +; RV32ZBP-NEXT: and a2, a2, a3 +; RV32ZBP-NEXT: or a0, a2, a0 +; RV32ZBP-NEXT: or a0, a0, a1 ; RV32ZBP-NEXT: ret %and1 = shl i32 %a, 2 %shl1 = and i32 %and1, -858993460 @@ -826,40 +838,61 @@ define i64 @gorc2b_i64(i64 %a) nounwind { ; RV32I-LABEL: gorc2b_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a2, a1, 2 -; RV32I-NEXT: slli a3, a0, 2 +; RV32I-NEXT: slli a2, a0, 2 +; RV32I-NEXT: slli a3, a1, 2 ; RV32I-NEXT: lui a4, 838861 ; RV32I-NEXT: addi a4, a4, -820 ; RV32I-NEXT: and a6, a3, a4 ; RV32I-NEXT: and a7, a2, a4 -; RV32I-NEXT: srli a5, a0, 2 -; RV32I-NEXT: srli a3, a1, 2 +; RV32I-NEXT: srli a5, a1, 2 +; RV32I-NEXT: srli a3, a0, 2 ; RV32I-NEXT: lui a2, 209715 ; RV32I-NEXT: addi a2, a2, 819 ; RV32I-NEXT: and a3, a3, a2 ; RV32I-NEXT: and a5, a5, a2 -; RV32I-NEXT: or a0, a5, a0 -; RV32I-NEXT: or a1, a3, a1 -; RV32I-NEXT: or a1, a1, a7 -; RV32I-NEXT: or a0, a0, a6 -; RV32I-NEXT: slli a3, a0, 2 -; RV32I-NEXT: slli a5, a1, 2 -; RV32I-NEXT: and a6, a5, a4 -; RV32I-NEXT: and a3, a3, a4 -; RV32I-NEXT: srli a4, a1, 2 -; RV32I-NEXT: srli a5, a0, 2 -; RV32I-NEXT: and a5, a5, a2 +; RV32I-NEXT: or a1, a5, a1 +; RV32I-NEXT: or a0, a3, a0 +; RV32I-NEXT: or a3, a0, a7 +; RV32I-NEXT: or a5, a1, a6 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: slli a1, a1, 2 +; RV32I-NEXT: and a6, a1, a4 +; RV32I-NEXT: and a0, a0, a4 +; RV32I-NEXT: srli a4, a5, 2 +; RV32I-NEXT: srli a1, a3, 2 +; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: and a2, a4, a2 -; RV32I-NEXT: or a1, a2, a1 -; RV32I-NEXT: or a0, a5, a0 -; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: or a1, a1, a6 +; RV32I-NEXT: or a2, a2, a5 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: or a1, a2, a6 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: gorc2b_i64: ; RV32ZBP: # %bb.0: -; RV32ZBP-NEXT: orc2.n a0, a0 +; RV32ZBP-NEXT: srli a2, a1, 2 +; RV32ZBP-NEXT: srli a3, a0, 2 +; RV32ZBP-NEXT: lui a4, 209715 +; RV32ZBP-NEXT: addi a4, a4, 819 +; RV32ZBP-NEXT: and a3, a3, a4 +; RV32ZBP-NEXT: or a3, a3, a0 +; RV32ZBP-NEXT: or a2, a2, a1 ; RV32ZBP-NEXT: orc2.n a1, a1 +; RV32ZBP-NEXT: orc2.n a0, a0 +; RV32ZBP-NEXT: slli a2, a2, 2 +; RV32ZBP-NEXT: slli a3, a3, 2 +; RV32ZBP-NEXT: lui a5, 838861 +; RV32ZBP-NEXT: addi a5, a5, -820 +; RV32ZBP-NEXT: and a6, a3, a5 +; RV32ZBP-NEXT: and a2, a2, a5 +; RV32ZBP-NEXT: srli a5, a0, 2 +; RV32ZBP-NEXT: srli a3, a1, 2 +; RV32ZBP-NEXT: and a3, a3, a4 +; RV32ZBP-NEXT: and a4, a5, a4 +; RV32ZBP-NEXT: or a0, a4, a0 +; RV32ZBP-NEXT: or a1, a3, a1 +; RV32ZBP-NEXT: or a1, a1, a2 +; RV32ZBP-NEXT: or a0, a0, a6 ; RV32ZBP-NEXT: ret %and1 = shl i64 %a, 2 %shl1 = and i64 %and1, -3689348814741910324 @@ -2347,21 +2380,18 @@ define i32 @bswap_rotr_i32(i32 %a) { ; RV32I-LABEL: bswap_rotr_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a1, a0, 8 -; RV32I-NEXT: lui a2, 16 -; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: slli a1, a0, 8 +; RV32I-NEXT: lui a2, 4080 ; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 24 +; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: srli a2, a0, 24 -; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: slli a2, a0, 8 -; RV32I-NEXT: lui a3, 4080 -; RV32I-NEXT: and a2, a2, a3 -; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srli a0, a0, 8 +; RV32I-NEXT: andi a0, a0, -256 ; RV32I-NEXT: or a0, a0, a2 -; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: slli a1, a0, 16 -; RV32I-NEXT: srli a0, a0, 16 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bswap_rotr_i32: @@ -2376,21 +2406,18 @@ define i32 @bswap_rotl_i32(i32 %a) { ; RV32I-LABEL: bswap_rotl_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a1, a0, 8 -; RV32I-NEXT: lui a2, 16 -; RV32I-NEXT: addi a2, a2, -256 -; RV32I-NEXT: and a1, a1, a2 -; RV32I-NEXT: srli a2, a0, 24 -; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: srli a1, a0, 24 +; RV32I-NEXT: srli a2, a0, 8 +; RV32I-NEXT: andi a2, a2, -256 +; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: slli a2, a0, 8 ; RV32I-NEXT: lui a3, 4080 ; RV32I-NEXT: and a2, a2, a3 ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: or a0, a0, a2 -; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 16 -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV32ZBP-LABEL: bswap_rotl_i32: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll @@ -463,31 +463,31 @@ ; RV64I-LABEL: roriw_bug: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a1, a0, 31 -; RV64I-NEXT: andi a0, a0, -2 -; RV64I-NEXT: srli a2, a0, 1 -; RV64I-NEXT: or a1, a1, a2 -; RV64I-NEXT: sext.w a1, a1 -; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: andi a2, a0, -2 +; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: xor a0, a2, a0 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: roriw_bug: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: slli a1, a0, 31 -; RV64ZBB-NEXT: andi a0, a0, -2 -; RV64ZBB-NEXT: srli a2, a0, 1 -; RV64ZBB-NEXT: or a1, a1, a2 -; RV64ZBB-NEXT: sext.w a1, a1 -; RV64ZBB-NEXT: xor a0, a0, a1 +; RV64ZBB-NEXT: andi a2, a0, -2 +; RV64ZBB-NEXT: srli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: xor a0, a2, a0 ; RV64ZBB-NEXT: ret ; ; RV64ZBP-LABEL: roriw_bug: ; RV64ZBP: # %bb.0: ; RV64ZBP-NEXT: slli a1, a0, 31 -; RV64ZBP-NEXT: andi a0, a0, -2 -; RV64ZBP-NEXT: srli a2, a0, 1 -; RV64ZBP-NEXT: or a1, a1, a2 -; RV64ZBP-NEXT: sext.w a1, a1 -; RV64ZBP-NEXT: xor a0, a0, a1 +; RV64ZBP-NEXT: andi a2, a0, -2 +; RV64ZBP-NEXT: srli a0, a0, 1 +; RV64ZBP-NEXT: or a0, a1, a0 +; RV64ZBP-NEXT: sext.w a0, a0 +; RV64ZBP-NEXT: xor a0, a2, a0 ; RV64ZBP-NEXT: ret %a = shl i64 %x, 31 %b = and i64 %x, 18446744073709551614 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -28,7 +28,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -81,7 +81,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -144,7 +144,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -202,7 +202,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -273,7 +273,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -401,7 +401,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -444,7 +444,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -486,7 +486,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -541,7 +541,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -665,7 +665,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -703,7 +703,7 @@ ; RV64I-NEXT: lui a2, 349525 ; RV64I-NEXT: addiw a2, a2, 1365 ; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: lui a1, 209715 ; RV64I-NEXT: addiw a1, a1, 819 ; RV64I-NEXT: and a2, a0, a1 @@ -1079,7 +1079,7 @@ define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -256 ; RV64I-NEXT: and a1, a1, a2 @@ -1106,7 +1106,7 @@ define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind { ; RV64I-LABEL: bswap_i32_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a2, a0, 8 +; RV64I-NEXT: srli a2, a0, 8 ; RV64I-NEXT: lui a3, 16 ; RV64I-NEXT: addiw a3, a3, -256 ; RV64I-NEXT: and a2, a2, a3 diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll @@ -885,18 +885,30 @@ ; RV64I-NEXT: addiw a4, a4, 819 ; RV64I-NEXT: and a3, a3, a4 ; RV64I-NEXT: or a0, a3, a0 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: slliw a1, a0, 2 -; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: srli a2, a0, 2 +; RV64I-NEXT: or a1, a0, a1 +; RV64I-NEXT: slliw a0, a0, 2 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: srli a2, a1, 2 ; RV64I-NEXT: and a2, a2, a4 -; RV64I-NEXT: or a0, a2, a0 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: gorc2b_i32: ; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: srli a1, a0, 2 +; RV64ZBP-NEXT: or a1, a1, a0 ; RV64ZBP-NEXT: gorciw a0, a0, 2 +; RV64ZBP-NEXT: slliw a1, a1, 2 +; RV64ZBP-NEXT: lui a2, 838861 +; RV64ZBP-NEXT: addiw a2, a2, -820 +; RV64ZBP-NEXT: and a1, a1, a2 +; RV64ZBP-NEXT: srli a2, a0, 2 +; RV64ZBP-NEXT: lui a3, 209715 +; RV64ZBP-NEXT: addiw a3, a3, 819 +; RV64ZBP-NEXT: and a2, a2, a3 +; RV64ZBP-NEXT: or a0, a2, a0 +; RV64ZBP-NEXT: or a0, a0, a1 ; RV64ZBP-NEXT: ret %and1 = shl i32 %a, 2 %shl1 = and i32 %and1, -858993460 @@ -938,18 +950,42 @@ ; RV64I-NEXT: addi a4, a4, 819 ; RV64I-NEXT: and a3, a3, a4 ; RV64I-NEXT: or a0, a3, a0 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: slli a1, a0, 2 -; RV64I-NEXT: and a1, a1, a2 -; RV64I-NEXT: srli a2, a0, 2 +; RV64I-NEXT: or a1, a0, a1 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: srli a2, a1, 2 ; RV64I-NEXT: and a2, a2, a4 -; RV64I-NEXT: or a0, a2, a0 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64ZBP-LABEL: gorc2b_i64: ; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: srli a1, a0, 2 +; RV64ZBP-NEXT: or a1, a1, a0 ; RV64ZBP-NEXT: orc2.n a0, a0 +; RV64ZBP-NEXT: slli a1, a1, 2 +; RV64ZBP-NEXT: lui a2, 1035469 +; RV64ZBP-NEXT: addiw a2, a2, -819 +; RV64ZBP-NEXT: slli a2, a2, 12 +; RV64ZBP-NEXT: addi a2, a2, -819 +; RV64ZBP-NEXT: slli a2, a2, 12 +; RV64ZBP-NEXT: addi a2, a2, -819 +; RV64ZBP-NEXT: slli a2, a2, 12 +; RV64ZBP-NEXT: addi a2, a2, -820 +; RV64ZBP-NEXT: and a1, a1, a2 +; RV64ZBP-NEXT: srli a2, a0, 2 +; RV64ZBP-NEXT: lui a3, 13107 +; RV64ZBP-NEXT: addiw a3, a3, 819 +; RV64ZBP-NEXT: slli a3, a3, 12 +; RV64ZBP-NEXT: addi a3, a3, 819 +; RV64ZBP-NEXT: slli a3, a3, 12 +; RV64ZBP-NEXT: addi a3, a3, 819 +; RV64ZBP-NEXT: slli a3, a3, 12 +; RV64ZBP-NEXT: addi a3, a3, 819 +; RV64ZBP-NEXT: and a2, a2, a3 +; RV64ZBP-NEXT: or a0, a2, a0 +; RV64ZBP-NEXT: or a0, a0, a1 ; RV64ZBP-NEXT: ret %and1 = shl i64 %a, 2 %shl1 = and i64 %and1, -3689348814741910324 @@ -2385,7 +2421,7 @@ define signext i32 @bswap_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bswap_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -256 ; RV64I-NEXT: and a1, a1, a2 @@ -2411,7 +2447,7 @@ define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind { ; RV64I-LABEL: bswap_i32_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a2, a0, 8 +; RV64I-NEXT: srli a2, a0, 8 ; RV64I-NEXT: lui a3, 16 ; RV64I-NEXT: addiw a3, a3, -256 ; RV64I-NEXT: and a2, a2, a3 @@ -2552,7 +2588,7 @@ define signext i32 @bitreverse_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bitreverse_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -256 ; RV64I-NEXT: and a1, a1, a2 @@ -2599,7 +2635,7 @@ define void @bitreverse_i32_nosext(i32 signext %a, i32* %x) nounwind { ; RV64I-LABEL: bitreverse_i32_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a2, a0, 8 +; RV64I-NEXT: srli a2, a0, 8 ; RV64I-NEXT: lui a3, 16 ; RV64I-NEXT: addiw a3, a3, -256 ; RV64I-NEXT: and a2, a2, a3 @@ -2736,7 +2772,7 @@ ; RV64I-NEXT: slli a2, a0, 24 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: srliw a2, a0, 24 -; RV64I-NEXT: srliw a0, a0, 8 +; RV64I-NEXT: srli a0, a0, 8 ; RV64I-NEXT: andi a0, a0, -256 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slliw a0, a0, 16 @@ -2757,7 +2793,7 @@ ; RV64I-LABEL: bswap_rotl_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a1, a0, 24 -; RV64I-NEXT: srliw a2, a0, 8 +; RV64I-NEXT: srli a2, a0, 8 ; RV64I-NEXT: andi a2, a2, -256 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: slli a2, a0, 8 @@ -2782,7 +2818,7 @@ define i32 @bitreverse_bswap_i32(i32 %a) { ; RV64I-LABEL: bitreverse_bswap_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -256 ; RV64I-NEXT: and a1, a1, a2 @@ -2815,7 +2851,7 @@ ; RV64I-NEXT: and a0, a0, a3 ; RV64I-NEXT: slliw a0, a0, 1 ; RV64I-NEXT: or a0, a1, a0 -; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: srli a1, a0, 8 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: srliw a2, a0, 24 ; RV64I-NEXT: or a1, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -321,7 +321,7 @@ ; RV32-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32-NEXT: mv s0, a0 ; RV32-NEXT: lw a0, 4(a0) -; RV32-NEXT: lbu a1, 12(s0) +; RV32-NEXT: lb a1, 12(s0) ; RV32-NEXT: lw a2, 8(s0) ; RV32-NEXT: andi a3, a0, 1 ; RV32-NEXT: neg s2, a3 @@ -392,34 +392,30 @@ ; ; RV64-LABEL: test_srem_vec: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -64 -; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s2, 32(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s3, 24(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s4, 16(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s5, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: addi sp, sp, -48 +; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s2, 16(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s3, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s4, 0(sp) # 8-byte Folded Spill ; RV64-NEXT: mv s0, a0 ; RV64-NEXT: lb a0, 12(a0) ; RV64-NEXT: lwu a1, 8(s0) ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: or a0, a1, a0 -; RV64-NEXT: li s4, -1 -; RV64-NEXT: srli a1, s4, 24 -; RV64-NEXT: and a0, a0, a1 -; RV64-NEXT: ld a1, 0(s0) -; RV64-NEXT: slli a2, a0, 29 -; RV64-NEXT: srai s1, a2, 31 -; RV64-NEXT: slli a0, a0, 31 -; RV64-NEXT: srli a2, a1, 33 -; RV64-NEXT: or a0, a2, a0 +; RV64-NEXT: ld a2, 0(s0) +; RV64-NEXT: slli a0, a0, 29 +; RV64-NEXT: srai s1, a0, 31 +; RV64-NEXT: slli a0, a1, 31 +; RV64-NEXT: srli a1, a2, 33 +; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: slli a0, a0, 31 ; RV64-NEXT: srai a0, a0, 31 -; RV64-NEXT: slli a1, a1, 31 +; RV64-NEXT: slli a1, a2, 31 ; RV64-NEXT: srai s2, a1, 31 ; RV64-NEXT: li a1, 7 -; RV64-NEXT: li s5, 7 +; RV64-NEXT: li s4, 7 ; RV64-NEXT: call __moddi3@plt ; RV64-NEXT: mv s3, a0 ; RV64-NEXT: li a1, -5 @@ -454,30 +450,32 @@ ; RV64-NEXT: addi a2, s3, -1 ; RV64-NEXT: snez a2, a2 ; RV64-NEXT: neg a0, a0 -; RV64-NEXT: neg a2, a2 -; RV64-NEXT: neg a3, a1 -; RV64-NEXT: slli a4, s5, 32 -; RV64-NEXT: and a3, a3, a4 -; RV64-NEXT: srli a3, a3, 32 -; RV64-NEXT: sb a3, 12(s0) +; RV64-NEXT: neg a3, a2 +; RV64-NEXT: neg a4, a1 +; RV64-NEXT: slli a5, s4, 32 +; RV64-NEXT: and a4, a4, a5 +; RV64-NEXT: srli a4, a4, 32 +; RV64-NEXT: sb a4, 12(s0) ; RV64-NEXT: slliw a1, a1, 2 -; RV64-NEXT: srli a3, s4, 31 -; RV64-NEXT: and a2, a2, a3 -; RV64-NEXT: srli a4, a2, 31 -; RV64-NEXT: subw a1, a4, a1 +; RV64-NEXT: li a4, 3 +; RV64-NEXT: slli a4, a4, 31 +; RV64-NEXT: and a3, a3, a4 +; RV64-NEXT: srli a3, a3, 31 +; RV64-NEXT: subw a1, a3, a1 ; RV64-NEXT: sw a1, 8(s0) -; RV64-NEXT: and a0, a0, a3 +; RV64-NEXT: li a1, -1 +; RV64-NEXT: srli a1, a1, 31 +; RV64-NEXT: and a0, a0, a1 ; RV64-NEXT: slli a1, a2, 33 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: sub a0, a0, a1 ; RV64-NEXT: sd a0, 0(s0) -; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s2, 32(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s3, 24(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s4, 16(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s5, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s2, 16(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s3, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s4, 0(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 48 ; RV64-NEXT: ret ; ; RV32M-LABEL: test_srem_vec: @@ -493,7 +491,7 @@ ; RV32M-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32M-NEXT: mv s0, a0 ; RV32M-NEXT: lw a0, 4(a0) -; RV32M-NEXT: lbu a1, 12(s0) +; RV32M-NEXT: lb a1, 12(s0) ; RV32M-NEXT: lw a2, 8(s0) ; RV32M-NEXT: andi a3, a0, 1 ; RV32M-NEXT: neg s2, a3 @@ -567,54 +565,51 @@ ; RV64M-NEXT: lb a1, 12(a0) ; RV64M-NEXT: lwu a2, 8(a0) ; RV64M-NEXT: slli a1, a1, 32 -; RV64M-NEXT: or a2, a2, a1 -; RV64M-NEXT: li a6, -1 -; RV64M-NEXT: srli a3, a6, 24 -; RV64M-NEXT: and a2, a2, a3 +; RV64M-NEXT: or a1, a2, a1 ; RV64M-NEXT: ld a3, 0(a0) -; RV64M-NEXT: slli a4, a2, 29 -; RV64M-NEXT: srai a4, a4, 31 +; RV64M-NEXT: slli a1, a1, 29 +; RV64M-NEXT: srai a1, a1, 31 ; RV64M-NEXT: slli a2, a2, 31 -; RV64M-NEXT: srli a5, a3, 33 -; RV64M-NEXT: or a2, a5, a2 +; RV64M-NEXT: srli a4, a3, 33 +; RV64M-NEXT: or a2, a4, a2 ; RV64M-NEXT: slli a2, a2, 31 ; RV64M-NEXT: srai a2, a2, 31 ; RV64M-NEXT: slli a3, a3, 31 ; RV64M-NEXT: srai a3, a3, 31 -; RV64M-NEXT: lui a5, 18725 -; RV64M-NEXT: addiw a5, a5, -1755 -; RV64M-NEXT: slli a5, a5, 12 -; RV64M-NEXT: addi a5, a5, -1755 -; RV64M-NEXT: slli a5, a5, 12 -; RV64M-NEXT: addi a5, a5, -1755 -; RV64M-NEXT: slli a5, a5, 12 -; RV64M-NEXT: addi a5, a5, -1755 -; RV64M-NEXT: mulh a5, a2, a5 -; RV64M-NEXT: srli a1, a5, 63 -; RV64M-NEXT: srai a5, a5, 1 -; RV64M-NEXT: add a1, a5, a1 -; RV64M-NEXT: slli a5, a1, 3 -; RV64M-NEXT: sub a1, a1, a5 -; RV64M-NEXT: add a1, a2, a1 -; RV64M-NEXT: lui a2, 1035469 -; RV64M-NEXT: addiw a2, a2, -819 -; RV64M-NEXT: slli a2, a2, 12 -; RV64M-NEXT: addi a2, a2, -819 -; RV64M-NEXT: slli a2, a2, 12 -; RV64M-NEXT: addi a2, a2, -819 -; RV64M-NEXT: slli a2, a2, 13 -; RV64M-NEXT: addi a2, a2, -1639 -; RV64M-NEXT: mulh a2, a4, a2 -; RV64M-NEXT: srli a5, a2, 63 -; RV64M-NEXT: srai a2, a2, 1 -; RV64M-NEXT: add a2, a2, a5 -; RV64M-NEXT: slli a5, a2, 2 -; RV64M-NEXT: add a2, a5, a2 -; RV64M-NEXT: add a2, a4, a2 -; RV64M-NEXT: addi a2, a2, -2 +; RV64M-NEXT: lui a4, 18725 +; RV64M-NEXT: addiw a4, a4, -1755 +; RV64M-NEXT: slli a4, a4, 12 +; RV64M-NEXT: addi a4, a4, -1755 +; RV64M-NEXT: slli a4, a4, 12 +; RV64M-NEXT: addi a4, a4, -1755 +; RV64M-NEXT: slli a4, a4, 12 +; RV64M-NEXT: addi a4, a4, -1755 +; RV64M-NEXT: mulh a4, a2, a4 +; RV64M-NEXT: srli a5, a4, 63 +; RV64M-NEXT: srai a4, a4, 1 +; RV64M-NEXT: add a4, a4, a5 +; RV64M-NEXT: slli a5, a4, 3 +; RV64M-NEXT: sub a4, a4, a5 +; RV64M-NEXT: add a2, a2, a4 +; RV64M-NEXT: lui a4, 1035469 +; RV64M-NEXT: addiw a4, a4, -819 +; RV64M-NEXT: slli a4, a4, 12 +; RV64M-NEXT: addi a4, a4, -819 +; RV64M-NEXT: slli a4, a4, 12 +; RV64M-NEXT: addi a4, a4, -819 +; RV64M-NEXT: slli a4, a4, 13 +; RV64M-NEXT: addi a4, a4, -1639 +; RV64M-NEXT: mulh a4, a1, a4 +; RV64M-NEXT: srli a5, a4, 63 +; RV64M-NEXT: srai a4, a4, 1 +; RV64M-NEXT: add a4, a4, a5 +; RV64M-NEXT: slli a5, a4, 2 +; RV64M-NEXT: add a4, a5, a4 +; RV64M-NEXT: add a1, a1, a4 +; RV64M-NEXT: addi a1, a1, -2 +; RV64M-NEXT: snez a6, a1 +; RV64M-NEXT: addi a2, a2, -1 ; RV64M-NEXT: snez a2, a2 -; RV64M-NEXT: addi a1, a1, -1 -; RV64M-NEXT: snez a1, a1 ; RV64M-NEXT: lui a4, 1026731 ; RV64M-NEXT: addiw a4, a4, -1365 ; RV64M-NEXT: slli a4, a4, 12 @@ -637,24 +632,27 @@ ; RV64M-NEXT: srli a3, a3, 1 ; RV64M-NEXT: or a3, a3, a5 ; RV64M-NEXT: sltu a3, a4, a3 -; RV64M-NEXT: neg a1, a1 ; RV64M-NEXT: neg a4, a2 +; RV64M-NEXT: neg a5, a6 ; RV64M-NEXT: neg a3, a3 -; RV64M-NEXT: li a5, 7 -; RV64M-NEXT: slli a5, a5, 32 -; RV64M-NEXT: and a4, a4, a5 -; RV64M-NEXT: srli a4, a4, 32 -; RV64M-NEXT: sb a4, 12(a0) -; RV64M-NEXT: slliw a2, a2, 2 -; RV64M-NEXT: srli a4, a6, 31 -; RV64M-NEXT: and a1, a1, a4 -; RV64M-NEXT: srli a5, a1, 31 -; RV64M-NEXT: subw a2, a5, a2 -; RV64M-NEXT: sw a2, 8(a0) -; RV64M-NEXT: slli a1, a1, 33 -; RV64M-NEXT: and a2, a3, a4 -; RV64M-NEXT: or a1, a2, a1 +; RV64M-NEXT: slli a2, a2, 33 +; RV64M-NEXT: li a1, -1 +; RV64M-NEXT: srli a1, a1, 31 +; RV64M-NEXT: and a1, a3, a1 +; RV64M-NEXT: sub a1, a1, a2 ; RV64M-NEXT: sd a1, 0(a0) +; RV64M-NEXT: li a1, 7 +; RV64M-NEXT: slli a1, a1, 32 +; RV64M-NEXT: and a1, a5, a1 +; RV64M-NEXT: srli a1, a1, 32 +; RV64M-NEXT: sb a1, 12(a0) +; RV64M-NEXT: slliw a1, a6, 2 +; RV64M-NEXT: li a2, 3 +; RV64M-NEXT: slli a2, a2, 31 +; RV64M-NEXT: and a2, a4, a2 +; RV64M-NEXT: srli a2, a2, 31 +; RV64M-NEXT: subw a1, a2, a1 +; RV64M-NEXT: sw a1, 8(a0) ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_srem_vec: @@ -675,7 +673,7 @@ ; RV32MV-NEXT: slli a2, a0, 31 ; RV32MV-NEXT: srli a3, a1, 1 ; RV32MV-NEXT: or s2, a3, a2 -; RV32MV-NEXT: lbu a2, 12(s1) +; RV32MV-NEXT: lb a2, 12(s1) ; RV32MV-NEXT: srli a3, a0, 1 ; RV32MV-NEXT: andi a3, a3, 1 ; RV32MV-NEXT: neg s3, a3 @@ -768,53 +766,50 @@ ; RV64MV-NEXT: sd s0, 80(sp) # 8-byte Folded Spill ; RV64MV-NEXT: addi s0, sp, 96 ; RV64MV-NEXT: andi sp, sp, -32 -; RV64MV-NEXT: lb a1, 12(a0) -; RV64MV-NEXT: lwu a2, 8(a0) -; RV64MV-NEXT: slli a1, a1, 32 -; RV64MV-NEXT: or a2, a2, a1 -; RV64MV-NEXT: li a6, -1 -; RV64MV-NEXT: ld a3, 0(a0) -; RV64MV-NEXT: srli a4, a6, 24 -; RV64MV-NEXT: and a2, a2, a4 -; RV64MV-NEXT: slli a4, a2, 31 -; RV64MV-NEXT: srli a5, a3, 33 -; RV64MV-NEXT: or a4, a5, a4 -; RV64MV-NEXT: slli a4, a4, 31 -; RV64MV-NEXT: srai a4, a4, 31 -; RV64MV-NEXT: slli a2, a2, 29 -; RV64MV-NEXT: srai a2, a2, 31 +; RV64MV-NEXT: lwu a1, 8(a0) +; RV64MV-NEXT: ld a2, 0(a0) +; RV64MV-NEXT: slli a3, a1, 31 +; RV64MV-NEXT: srli a4, a2, 33 +; RV64MV-NEXT: lb a5, 12(a0) +; RV64MV-NEXT: or a3, a4, a3 ; RV64MV-NEXT: slli a3, a3, 31 ; RV64MV-NEXT: srai a3, a3, 31 -; RV64MV-NEXT: lui a5, 10923 -; RV64MV-NEXT: addiw a5, a5, -1365 -; RV64MV-NEXT: slli a5, a5, 12 -; RV64MV-NEXT: addi a5, a5, -1365 -; RV64MV-NEXT: slli a5, a5, 12 -; RV64MV-NEXT: addi a5, a5, -1365 -; RV64MV-NEXT: slli a5, a5, 12 -; RV64MV-NEXT: addi a5, a5, -1365 -; RV64MV-NEXT: mulh a5, a3, a5 -; RV64MV-NEXT: srli a1, a5, 63 -; RV64MV-NEXT: add a1, a5, a1 +; RV64MV-NEXT: slli a4, a5, 32 +; RV64MV-NEXT: or a1, a1, a4 +; RV64MV-NEXT: slli a1, a1, 29 +; RV64MV-NEXT: srai a1, a1, 31 +; RV64MV-NEXT: slli a2, a2, 31 +; RV64MV-NEXT: srai a2, a2, 31 +; RV64MV-NEXT: lui a4, 10923 +; RV64MV-NEXT: addiw a4, a4, -1365 +; RV64MV-NEXT: slli a4, a4, 12 +; RV64MV-NEXT: addi a4, a4, -1365 +; RV64MV-NEXT: slli a4, a4, 12 +; RV64MV-NEXT: addi a4, a4, -1365 +; RV64MV-NEXT: slli a4, a4, 12 +; RV64MV-NEXT: addi a4, a4, -1365 +; RV64MV-NEXT: mulh a4, a2, a4 +; RV64MV-NEXT: srli a5, a4, 63 +; RV64MV-NEXT: add a4, a4, a5 ; RV64MV-NEXT: li a5, 6 -; RV64MV-NEXT: mul a1, a1, a5 -; RV64MV-NEXT: sub a1, a3, a1 -; RV64MV-NEXT: sd a1, 32(sp) -; RV64MV-NEXT: lui a1, 1035469 -; RV64MV-NEXT: addiw a1, a1, -819 -; RV64MV-NEXT: slli a1, a1, 12 -; RV64MV-NEXT: addi a1, a1, -819 -; RV64MV-NEXT: slli a1, a1, 12 -; RV64MV-NEXT: addi a1, a1, -819 -; RV64MV-NEXT: slli a1, a1, 13 -; RV64MV-NEXT: addi a1, a1, -1639 -; RV64MV-NEXT: mulh a1, a2, a1 -; RV64MV-NEXT: srli a3, a1, 63 -; RV64MV-NEXT: srai a1, a1, 1 -; RV64MV-NEXT: add a1, a1, a3 -; RV64MV-NEXT: slli a3, a1, 2 -; RV64MV-NEXT: add a1, a3, a1 -; RV64MV-NEXT: add a1, a2, a1 +; RV64MV-NEXT: mul a4, a4, a5 +; RV64MV-NEXT: sub a2, a2, a4 +; RV64MV-NEXT: sd a2, 32(sp) +; RV64MV-NEXT: lui a2, 1035469 +; RV64MV-NEXT: addiw a2, a2, -819 +; RV64MV-NEXT: slli a2, a2, 12 +; RV64MV-NEXT: addi a2, a2, -819 +; RV64MV-NEXT: slli a2, a2, 12 +; RV64MV-NEXT: addi a2, a2, -819 +; RV64MV-NEXT: slli a2, a2, 13 +; RV64MV-NEXT: addi a2, a2, -1639 +; RV64MV-NEXT: mulh a2, a1, a2 +; RV64MV-NEXT: srli a4, a2, 63 +; RV64MV-NEXT: srai a2, a2, 1 +; RV64MV-NEXT: add a2, a2, a4 +; RV64MV-NEXT: slli a4, a2, 2 +; RV64MV-NEXT: add a2, a4, a2 +; RV64MV-NEXT: add a1, a1, a2 ; RV64MV-NEXT: sd a1, 48(sp) ; RV64MV-NEXT: lui a1, 18725 ; RV64MV-NEXT: addiw a1, a1, -1755 @@ -824,13 +819,13 @@ ; RV64MV-NEXT: addi a1, a1, -1755 ; RV64MV-NEXT: slli a1, a1, 12 ; RV64MV-NEXT: addi a1, a1, -1755 -; RV64MV-NEXT: mulh a1, a4, a1 +; RV64MV-NEXT: mulh a1, a3, a1 ; RV64MV-NEXT: srli a2, a1, 63 ; RV64MV-NEXT: srai a1, a1, 1 ; RV64MV-NEXT: add a1, a1, a2 ; RV64MV-NEXT: slli a2, a1, 3 ; RV64MV-NEXT: sub a1, a1, a2 -; RV64MV-NEXT: add a1, a4, a1 +; RV64MV-NEXT: add a1, a3, a1 ; RV64MV-NEXT: sd a1, 40(sp) ; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64MV-NEXT: addi a1, sp, 32 @@ -838,7 +833,8 @@ ; RV64MV-NEXT: lui a1, %hi(.LCPI3_0) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_0) ; RV64MV-NEXT: vle64.v v10, (a1) -; RV64MV-NEXT: srli a1, a6, 31 +; RV64MV-NEXT: li a1, -1 +; RV64MV-NEXT: srli a1, a1, 31 ; RV64MV-NEXT: vand.vx v8, v8, a1 ; RV64MV-NEXT: vmsne.vv v0, v8, v10 ; RV64MV-NEXT: vmv.v.i v8, 0 @@ -849,18 +845,20 @@ ; RV64MV-NEXT: srli a3, a2, 30 ; RV64MV-NEXT: andi a3, a3, 7 ; RV64MV-NEXT: sb a3, 12(a0) -; RV64MV-NEXT: slli a2, a2, 2 -; RV64MV-NEXT: vslidedown.vi v10, v8, 1 -; RV64MV-NEXT: vmv.x.s a3, v10 -; RV64MV-NEXT: and a3, a3, a1 -; RV64MV-NEXT: srli a4, a3, 31 -; RV64MV-NEXT: or a2, a4, a2 -; RV64MV-NEXT: sw a2, 8(a0) -; RV64MV-NEXT: vmv.x.s a2, v8 -; RV64MV-NEXT: and a1, a2, a1 -; RV64MV-NEXT: slli a2, a3, 33 -; RV64MV-NEXT: or a1, a1, a2 +; RV64MV-NEXT: vmv.x.s a3, v8 +; RV64MV-NEXT: and a1, a3, a1 +; RV64MV-NEXT: vslidedown.vi v8, v8, 1 +; RV64MV-NEXT: vmv.x.s a3, v8 +; RV64MV-NEXT: slli a4, a3, 33 +; RV64MV-NEXT: or a1, a1, a4 ; RV64MV-NEXT: sd a1, 0(a0) +; RV64MV-NEXT: slli a1, a2, 2 +; RV64MV-NEXT: li a2, 3 +; RV64MV-NEXT: slli a2, a2, 31 +; RV64MV-NEXT: and a2, a3, a2 +; RV64MV-NEXT: srli a2, a2, 31 +; RV64MV-NEXT: or a1, a2, a1 +; RV64MV-NEXT: sw a1, 8(a0) ; RV64MV-NEXT: addi sp, s0, -96 ; RV64MV-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; RV64MV-NEXT: ld s0, 80(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -12,30 +12,27 @@ ; RV32-LABEL: vec3_setcc_crash: ; RV32: # %bb.0: ; RV32-NEXT: lw a0, 0(a0) -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: and a2, a0, a2 -; RV32-NEXT: slli a3, a2, 16 -; RV32-NEXT: srai a6, a3, 24 -; RV32-NEXT: slli a4, a0, 24 -; RV32-NEXT: srai a3, a4, 24 -; RV32-NEXT: slli a4, a0, 8 -; RV32-NEXT: mv a5, a0 -; RV32-NEXT: bgtz a3, .LBB0_2 +; RV32-NEXT: slli a2, a0, 8 +; RV32-NEXT: slli a3, a0, 24 +; RV32-NEXT: slli a4, a0, 16 +; RV32-NEXT: srai a5, a4, 24 +; RV32-NEXT: srai a3, a3, 24 +; RV32-NEXT: bgtz a5, .LBB0_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a5, 0 +; RV32-NEXT: j .LBB0_3 ; RV32-NEXT: .LBB0_2: -; RV32-NEXT: srai a4, a4, 24 -; RV32-NEXT: andi a5, a5, 255 -; RV32-NEXT: bgtz a6, .LBB0_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: li a2, 0 -; RV32-NEXT: j .LBB0_5 -; RV32-NEXT: .LBB0_4: -; RV32-NEXT: srli a2, a2, 8 +; RV32-NEXT: srli a5, a4, 24 +; RV32-NEXT: .LBB0_3: +; RV32-NEXT: srai a4, a2, 24 +; RV32-NEXT: slli a2, a5, 8 +; RV32-NEXT: mv a5, a0 +; RV32-NEXT: bgtz a3, .LBB0_5 +; RV32-NEXT: # %bb.4: +; RV32-NEXT: li a5, 0 ; RV32-NEXT: .LBB0_5: -; RV32-NEXT: slli a2, a2, 8 -; RV32-NEXT: or a2, a5, a2 +; RV32-NEXT: andi a3, a5, 255 +; RV32-NEXT: or a2, a3, a2 ; RV32-NEXT: bgtz a4, .LBB0_7 ; RV32-NEXT: # %bb.6: ; RV32-NEXT: li a0, 0 @@ -50,30 +47,27 @@ ; RV64-LABEL: vec3_setcc_crash: ; RV64: # %bb.0: ; RV64-NEXT: lwu a0, 0(a0) -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: and a2, a0, a2 -; RV64-NEXT: slli a3, a2, 48 -; RV64-NEXT: srai a6, a3, 56 -; RV64-NEXT: slli a4, a0, 56 -; RV64-NEXT: srai a3, a4, 56 -; RV64-NEXT: slli a4, a0, 40 -; RV64-NEXT: mv a5, a0 -; RV64-NEXT: bgtz a3, .LBB0_2 +; RV64-NEXT: slli a2, a0, 40 +; RV64-NEXT: slli a3, a0, 56 +; RV64-NEXT: slli a4, a0, 48 +; RV64-NEXT: srai a5, a4, 56 +; RV64-NEXT: srai a3, a3, 56 +; RV64-NEXT: bgtz a5, .LBB0_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a5, 0 +; RV64-NEXT: j .LBB0_3 ; RV64-NEXT: .LBB0_2: -; RV64-NEXT: srai a4, a4, 56 -; RV64-NEXT: andi a5, a5, 255 -; RV64-NEXT: bgtz a6, .LBB0_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: li a2, 0 -; RV64-NEXT: j .LBB0_5 -; RV64-NEXT: .LBB0_4: -; RV64-NEXT: srli a2, a2, 8 +; RV64-NEXT: srli a5, a4, 56 +; RV64-NEXT: .LBB0_3: +; RV64-NEXT: srai a4, a2, 56 +; RV64-NEXT: slli a2, a5, 8 +; RV64-NEXT: mv a5, a0 +; RV64-NEXT: bgtz a3, .LBB0_5 +; RV64-NEXT: # %bb.4: +; RV64-NEXT: li a5, 0 ; RV64-NEXT: .LBB0_5: -; RV64-NEXT: slli a2, a2, 8 -; RV64-NEXT: or a2, a5, a2 +; RV64-NEXT: andi a3, a5, 255 +; RV64-NEXT: or a2, a3, a2 ; RV64-NEXT: bgtz a4, .LBB0_7 ; RV64-NEXT: # %bb.6: ; RV64-NEXT: li a0, 0 diff --git a/llvm/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll b/llvm/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll --- a/llvm/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll +++ b/llvm/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: nihh %r1, 4095 ; CHECK-NEXT: stg %r1, 0(%r2) ; CHECK-NEXT: vlgvf %r1, %v24, 2 -; CHECK-NEXT: risbgn %r0, %r0, 0, 129, 62 +; CHECK-NEXT: sllg %r0, %r0, 62 ; CHECK-NEXT: rosbg %r0, %r1, 2, 32, 31 ; CHECK-NEXT: vlgvf %r1, %v24, 3 ; CHECK-NEXT: rosbg %r0, %r1, 33, 63, 0 @@ -76,38 +76,41 @@ ; CHECK-NEXT: stmg %r14, %r15, 112(%r15) ; CHECK-NEXT: .cfi_offset %r14, -48 ; CHECK-NEXT: .cfi_offset %r15, -40 -; CHECK-NEXT: vlgvf %r0, %v26, 3 -; CHECK-NEXT: vlgvf %r4, %v24, 1 -; CHECK-NEXT: vlgvf %r3, %v24, 2 -; CHECK-NEXT: srlk %r1, %r0, 8 +; CHECK-NEXT: vlgvf %r1, %v26, 3 +; CHECK-NEXT: vlgvf %r0, %v26, 2 +; CHECK-NEXT: stc %r1, 30(%r2) +; CHECK-NEXT: srlk %r3, %r1, 8 +; CHECK-NEXT: risbgn %r1, %r1, 33, 167, 0 +; CHECK-NEXT: vlgvf %r5, %v24, 2 +; CHECK-NEXT: rosbg %r1, %r0, 2, 32, 31 +; CHECK-NEXT: sth %r3, 28(%r2) +; CHECK-NEXT: srlg %r1, %r1, 24 +; CHECK-NEXT: vlgvf %r3, %v24, 3 +; CHECK-NEXT: st %r1, 24(%r2) +; CHECK-NEXT: vlgvf %r1, %v26, 0 +; CHECK-NEXT: risbgn %r14, %r5, 6, 164, 27 +; CHECK-NEXT: sllg %r4, %r3, 60 +; CHECK-NEXT: rosbg %r14, %r3, 37, 63, 60 +; CHECK-NEXT: sllg %r3, %r14, 8 +; CHECK-NEXT: rosbg %r4, %r1, 4, 34, 29 +; CHECK-NEXT: rosbg %r3, %r4, 56, 63, 8 +; CHECK-NEXT: stg %r3, 8(%r2) +; CHECK-NEXT: vlgvf %r3, %v24, 1 +; CHECK-NEXT: sllg %r4, %r3, 58 +; CHECK-NEXT: rosbg %r4, %r5, 6, 36, 27 ; CHECK-NEXT: vlgvf %r5, %v24, 0 -; CHECK-NEXT: sth %r1, 28(%r2) -; CHECK-NEXT: risbgn %r1, %r4, 0, 133, 58 ; CHECK-NEXT: sllg %r5, %r5, 25 -; CHECK-NEXT: stc %r0, 30(%r2) -; CHECK-NEXT: rosbg %r1, %r3, 6, 36, 27 -; CHECK-NEXT: vlgvf %r3, %v24, 3 -; CHECK-NEXT: rosbg %r5, %r4, 39, 63, 58 -; CHECK-NEXT: sllg %r4, %r5, 8 -; CHECK-NEXT: rosbg %r1, %r3, 37, 63, 60 -; CHECK-NEXT: vlgvf %r5, %v26, 1 -; CHECK-NEXT: rosbg %r4, %r1, 56, 63, 8 -; CHECK-NEXT: stg %r4, 0(%r2) -; CHECK-NEXT: vlgvf %r4, %v26, 2 -; CHECK-NEXT: risbgn %r14, %r5, 0, 129, 62 -; CHECK-NEXT: risbgn %r3, %r3, 0, 131, 60 -; CHECK-NEXT: rosbg %r14, %r4, 2, 32, 31 -; CHECK-NEXT: rosbg %r14, %r0, 33, 63, 0 -; CHECK-NEXT: srlg %r0, %r14, 24 -; CHECK-NEXT: st %r0, 24(%r2) -; CHECK-NEXT: vlgvf %r0, %v26, 0 -; CHECK-NEXT: rosbg %r3, %r0, 4, 34, 29 -; CHECK-NEXT: sllg %r0, %r1, 8 -; CHECK-NEXT: rosbg %r3, %r5, 35, 63, 62 -; CHECK-NEXT: rosbg %r0, %r3, 56, 63, 8 -; CHECK-NEXT: stg %r0, 8(%r2) -; CHECK-NEXT: sllg %r0, %r3, 8 -; CHECK-NEXT: rosbg %r0, %r14, 56, 63, 8 +; CHECK-NEXT: rosbg %r5, %r3, 39, 63, 58 +; CHECK-NEXT: sllg %r3, %r5, 8 +; CHECK-NEXT: rosbg %r3, %r4, 56, 63, 8 +; CHECK-NEXT: stg %r3, 0(%r2) +; CHECK-NEXT: vlgvf %r3, %v26, 1 +; CHECK-NEXT: sllg %r4, %r3, 62 +; CHECK-NEXT: rosbg %r4, %r0, 2, 32, 31 +; CHECK-NEXT: risbgn %r0, %r1, 4, 162, 29 +; CHECK-NEXT: rosbg %r0, %r3, 35, 63, 62 +; CHECK-NEXT: sllg %r0, %r0, 8 +; CHECK-NEXT: rosbg %r0, %r4, 56, 63, 8 ; CHECK-NEXT: stg %r0, 16(%r2) ; CHECK-NEXT: lmg %r14, %r15, 112(%r15) ; CHECK-NEXT: br %r14 @@ -121,20 +124,19 @@ define void @fun3(<3 x i31>* %src, <3 x i31>* %p) ; CHECK-LABEL: fun3: ; CHECK: # %bb.0: -; CHECK-NEXT: l %r0, 8(%r2) +; CHECK-NEXT: llgf %r0, 8(%r2) ; CHECK-NEXT: lg %r1, 0(%r2) ; CHECK-NEXT: sllg %r2, %r1, 32 ; CHECK-NEXT: lr %r2, %r0 -; CHECK-NEXT: srlg %r0, %r2, 62 -; CHECK-NEXT: st %r2, 8(%r3) -; CHECK-NEXT: rosbg %r0, %r1, 33, 61, 34 -; CHECK-NEXT: sllg %r1, %r0, 62 -; CHECK-NEXT: rosbg %r1, %r2, 2, 32, 0 -; CHECK-NEXT: srlg %r1, %r1, 32 -; CHECK-NEXT: sllg %r0, %r0, 30 -; CHECK-NEXT: lr %r0, %r1 -; CHECK-NEXT: nihh %r0, 8191 -; CHECK-NEXT: stg %r0, 0(%r3) +; CHECK-NEXT: risbgn %r2, %r2, 2, 160, 0 +; CHECK-NEXT: lgr %r4, %r2 +; CHECK-NEXT: rosbg %r2, %r1, 0, 1, 32 +; CHECK-NEXT: rosbg %r4, %r0, 33, 63, 0 +; CHECK-NEXT: srlg %r0, %r2, 32 +; CHECK-NEXT: lr %r1, %r0 +; CHECK-NEXT: nihh %r1, 8191 +; CHECK-NEXT: st %r4, 8(%r3) +; CHECK-NEXT: stg %r1, 0(%r3) ; CHECK-NEXT: br %r14 { %tmp = load <3 x i31>, <3 x i31>* %src diff --git a/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll --- a/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fptosi-sat-vector.ll @@ -1851,32 +1851,33 @@ ; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: mov r8, r0 -; CHECK-NEXT: vmov r0, s18 +; CHECK-NEXT: vmov r0, s17 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: vmov r0, s19 ; CHECK-NEXT: vldr s20, .LCPI28_0 -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: vmov r4, s16 +; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r5, #0 -; CHECK-NEXT: movtlt r5, #65534 +; CHECK-NEXT: movlt r7, #0 +; CHECK-NEXT: movtlt r7, #65534 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: mov r7, r0 -; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vldr s22, .LCPI28_1 ; CHECK-NEXT: vcmp.f32 s19, s20 +; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: mov r6, r1 -; CHECK-NEXT: vcmp.f32 s18, s22 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: vcmp.f32 s17, s22 ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r6, #0 -; CHECK-NEXT: movtlt r6, #65534 +; CHECK-NEXT: movlt r5, #0 +; CHECK-NEXT: movtlt r5, #65534 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r5, #65535 -; CHECK-NEXT: movtgt r5, #1 +; CHECK-NEXT: movwgt r7, #65535 +; CHECK-NEXT: movtgt r7, #1 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: mov r4, r1 @@ -1887,8 +1888,8 @@ ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r6, #65535 -; CHECK-NEXT: movtgt r6, #1 +; CHECK-NEXT: movwgt r5, #65535 +; CHECK-NEXT: movtgt r5, #1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s16 ; CHECK-NEXT: it gt @@ -1898,27 +1899,46 @@ ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s19, s22 ; CHECK-NEXT: str.w r0, [r8] ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #0 -; CHECK-NEXT: vcmp.f32 s19, s22 +; CHECK-NEXT: movlt r6, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r7, #-1 ; CHECK-NEXT: vcmp.f32 s19, s19 +; CHECK-NEXT: it gt +; CHECK-NEXT: movgt.w r6, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s17, s17 ; CHECK-NEXT: itt vs -; CHECK-NEXT: movvs r7, #0 ; CHECK-NEXT: movvs r6, #0 -; CHECK-NEXT: lsls r0, r6, #22 -; CHECK-NEXT: orr.w r1, r0, r7, lsr #10 -; CHECK-NEXT: vmov r0, s17 -; CHECK-NEXT: str.w r1, [r8, #20] -; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: movvs r5, #0 +; CHECK-NEXT: lsls r0, r5, #22 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s17, s20 -; CHECK-NEXT: lsrs r2, r6, #10 +; CHECK-NEXT: orr.w r0, r0, r6, lsr #10 +; CHECK-NEXT: str.w r0, [r8, #20] +; CHECK-NEXT: it vs +; CHECK-NEXT: movvs r7, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt.w r9, #0 ; CHECK-NEXT: vcmp.f32 s17, s22 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: it gt +; CHECK-NEXT: movgt.w r9, #-1 +; CHECK-NEXT: vcmp.f32 s17, s17 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: it vs +; CHECK-NEXT: movvs.w r9, #0 +; CHECK-NEXT: lsr.w r0, r9, #14 +; CHECK-NEXT: orr.w r1, r0, r7, lsl #18 +; CHECK-NEXT: vmov r0, s18 +; CHECK-NEXT: str.w r1, [r8, #8] +; CHECK-NEXT: bl __aeabi_f2lz +; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: lsrs r2, r5, #10 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s18, s22 ; CHECK-NEXT: itt lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: movtlt r1, #65534 @@ -1933,60 +1953,40 @@ ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: movtlt r4, #65534 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, s20 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r4, #65535 ; CHECK-NEXT: movtgt r4, #1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, s22 +; CHECK-NEXT: vcmp.f32 s18, s22 ; CHECK-NEXT: strb.w r2, [r8, #24] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, s17 +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: ubfx r2, r7, #14, #4 ; CHECK-NEXT: vcmp.f32 s16, s16 ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r0, #0 +; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: str.w r2, [r8, #12] ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r4, #0 -; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: bfc r4, #18, #14 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s22 -; CHECK-NEXT: orr.w r2, r4, r0, lsl #18 +; CHECK-NEXT: orr.w r2, r4, r9, lsl #18 ; CHECK-NEXT: str.w r2, [r8, #4] -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #0 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s18 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r9, #-1 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, s17 -; CHECK-NEXT: it vs -; CHECK-NEXT: movvs.w r9, #0 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r1, #0 -; CHECK-NEXT: vcmp.f32 s18, s18 +; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: bfc r1, #18, #14 -; CHECK-NEXT: lsrs r0, r0, #14 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: lsr.w r2, r1, #14 -; CHECK-NEXT: orr.w r0, r0, r1, lsl #18 -; CHECK-NEXT: orr.w r2, r2, r9, lsl #4 -; CHECK-NEXT: str.w r2, [r8, #12] -; CHECK-NEXT: str.w r0, [r8, #8] -; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r5, #0 -; CHECK-NEXT: bfc r5, #18, #14 -; CHECK-NEXT: lsr.w r0, r9, #28 -; CHECK-NEXT: orr.w r0, r0, r5, lsl #4 -; CHECK-NEXT: orr.w r0, r0, r7, lsl #22 +; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 +; CHECK-NEXT: orr.w r0, r0, r6, lsl #22 ; CHECK-NEXT: str.w r0, [r8, #16] ; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: add sp, #4 @@ -4938,104 +4938,100 @@ ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: .vsave {d8} ; CHECK-NEXT: vpush {d8} -; CHECK-NEXT: vldr s5, .LCPI46_0 +; CHECK-NEXT: vldr s12, .LCPI46_0 ; CHECK-NEXT: vcvtt.f32.f16 s15, s3 -; CHECK-NEXT: vldr s7, .LCPI46_1 -; CHECK-NEXT: vcvtb.f32.f16 s8, s2 -; CHECK-NEXT: vmaxnm.f32 s16, s15, s5 +; CHECK-NEXT: vldr s14, .LCPI46_1 +; CHECK-NEXT: vcvtb.f32.f16 s7, s0 +; CHECK-NEXT: vmaxnm.f32 s16, s15, s12 ; CHECK-NEXT: vcvtb.f32.f16 s4, s1 -; CHECK-NEXT: vcvtt.f32.f16 s12, s1 -; CHECK-NEXT: vcvtb.f32.f16 s1, s0 -; CHECK-NEXT: vminnm.f32 s16, s16, s7 +; CHECK-NEXT: vcvtt.f32.f16 s8, s1 +; CHECK-NEXT: vcvtb.f32.f16 s1, s2 ; CHECK-NEXT: vcvtt.f32.f16 s0, s0 ; CHECK-NEXT: vcvtt.f32.f16 s2, s2 ; CHECK-NEXT: vcvtb.f32.f16 s3, s3 -; CHECK-NEXT: vmaxnm.f32 s6, s4, s5 -; CHECK-NEXT: vmaxnm.f32 s10, s8, s5 -; CHECK-NEXT: vmaxnm.f32 s14, s12, s5 -; CHECK-NEXT: vmaxnm.f32 s9, s1, s5 -; CHECK-NEXT: vmaxnm.f32 s11, s0, s5 -; CHECK-NEXT: vmaxnm.f32 s13, s2, s5 -; CHECK-NEXT: vmaxnm.f32 s5, s3, s5 +; CHECK-NEXT: vmaxnm.f32 s6, s4, s12 +; CHECK-NEXT: vmaxnm.f32 s10, s8, s12 +; CHECK-NEXT: vmaxnm.f32 s5, s1, s12 +; CHECK-NEXT: vmaxnm.f32 s9, s7, s12 +; CHECK-NEXT: vmaxnm.f32 s11, s0, s12 +; CHECK-NEXT: vmaxnm.f32 s13, s2, s12 +; CHECK-NEXT: vminnm.f32 s16, s16, s14 +; CHECK-NEXT: vmaxnm.f32 s12, s3, s12 ; CHECK-NEXT: vcvt.s32.f32 s16, s16 -; CHECK-NEXT: vminnm.f32 s5, s5, s7 -; CHECK-NEXT: vminnm.f32 s13, s13, s7 -; CHECK-NEXT: vcvt.s32.f32 s5, s5 -; CHECK-NEXT: vminnm.f32 s11, s11, s7 +; CHECK-NEXT: vminnm.f32 s12, s12, s14 +; CHECK-NEXT: vminnm.f32 s13, s13, s14 +; CHECK-NEXT: vcvt.s32.f32 s12, s12 +; CHECK-NEXT: vminnm.f32 s9, s9, s14 ; CHECK-NEXT: vcvt.s32.f32 s13, s13 -; CHECK-NEXT: vminnm.f32 s9, s9, s7 -; CHECK-NEXT: vcmp.f32 s15, s15 -; CHECK-NEXT: vminnm.f32 s10, s10, s7 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vminnm.f32 s11, s11, s14 ; CHECK-NEXT: vcvt.s32.f32 s11, s11 -; CHECK-NEXT: vcmp.f32 s3, s3 -; CHECK-NEXT: vminnm.f32 s14, s14, s7 +; CHECK-NEXT: vminnm.f32 s5, s5, s14 +; CHECK-NEXT: vcvt.s32.f32 s9, s9 +; CHECK-NEXT: vminnm.f32 s10, s10, s14 +; CHECK-NEXT: vcmp.f32 s15, s15 +; CHECK-NEXT: vminnm.f32 s6, s6, s14 ; CHECK-NEXT: vmov r1, s16 -; CHECK-NEXT: vminnm.f32 s6, s6, s7 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r1, #0 ; CHECK-NEXT: lsrs r2, r1, #11 +; CHECK-NEXT: vcmp.f32 s3, s3 ; CHECK-NEXT: strb r2, [r0, #18] -; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov r3, s12 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r2, #0 -; CHECK-NEXT: vcvt.s32.f32 s9, s9 -; CHECK-NEXT: bfc r2, #19, #13 +; CHECK-NEXT: movvs r3, #0 +; CHECK-NEXT: ubfx r2, r3, #14, #5 +; CHECK-NEXT: vcvt.s32.f32 s5, s5 +; CHECK-NEXT: orr.w r1, r2, r1, lsl #5 ; CHECK-NEXT: vcmp.f32 s2, s2 -; CHECK-NEXT: vmov r12, s13 -; CHECK-NEXT: lsrs r3, r2, #14 -; CHECK-NEXT: orr.w r1, r3, r1, lsl #5 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: strh r1, [r0, #16] -; CHECK-NEXT: it vs -; CHECK-NEXT: movvs.w r12, #0 -; CHECK-NEXT: vcvt.s32.f32 s10, s10 -; CHECK-NEXT: bfc r12, #19, #13 -; CHECK-NEXT: vcvt.s32.f32 s14, s14 -; CHECK-NEXT: lsr.w r3, r12, #1 -; CHECK-NEXT: vcmp.f32 s0, s0 -; CHECK-NEXT: vmov lr, s11 -; CHECK-NEXT: orr.w r2, r3, r2, lsl #18 +; CHECK-NEXT: vmov lr, s13 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: str r2, [r0, #12] ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs.w lr, #0 -; CHECK-NEXT: vcmp.f32 s1, s1 +; CHECK-NEXT: ubfx r1, lr, #1, #18 +; CHECK-NEXT: vcmp.f32 s0, s0 +; CHECK-NEXT: orr.w r1, r1, r3, lsl #18 +; CHECK-NEXT: vcvt.s32.f32 s10, s10 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vmov r12, s11 +; CHECK-NEXT: str r1, [r0, #12] ; CHECK-NEXT: vmov r3, s9 +; CHECK-NEXT: it vs +; CHECK-NEXT: movvs.w r12, #0 +; CHECK-NEXT: vcmp.f32 s7, s7 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r3, #0 -; CHECK-NEXT: bfc lr, #19, #13 ; CHECK-NEXT: bfc r3, #19, #13 -; CHECK-NEXT: vcmp.f32 s12, s12 -; CHECK-NEXT: orr.w r3, r3, lr, lsl #19 ; CHECK-NEXT: vcvt.s32.f32 s6, s6 +; CHECK-NEXT: orr.w r3, r3, r12, lsl #19 ; CHECK-NEXT: str r3, [r0] +; CHECK-NEXT: vcmp.f32 s1, s1 +; CHECK-NEXT: vmov r3, s5 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vmov r3, s14 -; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r3, #0 ; CHECK-NEXT: vcmp.f32 s8, s8 ; CHECK-NEXT: bfc r3, #19, #13 +; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r1, #0 -; CHECK-NEXT: bfc r1, #19, #13 -; CHECK-NEXT: lsrs r2, r3, #7 +; CHECK-NEXT: ubfx r2, r1, #7, #12 ; CHECK-NEXT: vcmp.f32 s4, s4 -; CHECK-NEXT: orr.w r1, r2, r1, lsl #12 +; CHECK-NEXT: orr.w r2, r2, r3, lsl #12 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: orr.w r1, r1, r12, lsl #31 -; CHECK-NEXT: str r1, [r0, #8] -; CHECK-NEXT: vmov r1, s6 -; CHECK-NEXT: lsr.w r2, lr, #13 +; CHECK-NEXT: orr.w r2, r2, lr, lsl #31 +; CHECK-NEXT: str r2, [r0, #8] +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: ubfx r3, r12, #13, #6 ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r1, #0 -; CHECK-NEXT: bfc r1, #19, #13 -; CHECK-NEXT: orr.w r1, r2, r1, lsl #6 -; CHECK-NEXT: orr.w r1, r1, r3, lsl #25 +; CHECK-NEXT: movvs r2, #0 +; CHECK-NEXT: bfc r2, #19, #13 +; CHECK-NEXT: orr.w r2, r3, r2, lsl #6 +; CHECK-NEXT: orr.w r1, r2, r1, lsl #25 ; CHECK-NEXT: str r1, [r0, #4] ; CHECK-NEXT: vpop {d8} ; CHECK-NEXT: pop {r7, pc} @@ -5088,42 +5084,42 @@ ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #8 -; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 ; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: mov r10, r0 -; CHECK-NEXT: vcvtt.f32.f16 s30, s19 -; CHECK-NEXT: vmov r0, s30 +; CHECK-NEXT: mov r11, r0 +; CHECK-NEXT: vcvtt.f32.f16 s28, s19 +; CHECK-NEXT: vmov r0, s28 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcvtb.f32.f16 s26, s18 -; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vmov r0, s26 ; CHECK-NEXT: vldr s22, .LCPI48_1 ; CHECK-NEXT: vcvtb.f32.f16 s24, s16 -; CHECK-NEXT: vcvtt.f32.f16 s28, s17 -; CHECK-NEXT: vcmp.f32 s30, s22 -; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: vcvtt.f32.f16 s18, s18 +; CHECK-NEXT: vcmp.f32 s28, s22 +; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vmov r7, s24 +; CHECK-NEXT: vmov r6, s24 ; CHECK-NEXT: vldr s20, .LCPI48_0 -; CHECK-NEXT: vmov r8, s28 +; CHECK-NEXT: vmov r5, s18 ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r6, #0 -; CHECK-NEXT: movtlt r6, #65534 +; CHECK-NEXT: movlt r4, #0 +; CHECK-NEXT: movtlt r4, #65534 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s26, s22 -; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s30, s20 +; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r6, #65535 -; CHECK-NEXT: movtgt r6, #1 +; CHECK-NEXT: movwgt r4, #65535 +; CHECK-NEXT: movtgt r4, #1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 @@ -5131,11 +5127,11 @@ ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r0, #0 -; CHECK-NEXT: str.w r0, [r10, #25] -; CHECK-NEXT: mov r0, r7 +; CHECK-NEXT: str.w r0, [r11, #25] +; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s24, s22 -; CHECK-NEXT: mov r11, r1 +; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: it lt @@ -5145,99 +5141,97 @@ ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s30, s22 +; CHECK-NEXT: vcmp.f32 s28, s22 ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: str.w r0, [r10] +; CHECK-NEXT: str.w r0, [r11] ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #0 -; CHECK-NEXT: vcmp.f32 s30, s20 +; CHECK-NEXT: movlt r7, #0 +; CHECK-NEXT: vcmp.f32 s28, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r4, #-1 -; CHECK-NEXT: vcmp.f32 s30, s30 +; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: vcmp.f32 s28, s28 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r4, #0 -; CHECK-NEXT: str r4, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: movvs r7, #0 +; CHECK-NEXT: str r7, [sp, #12] @ 4-byte Spill ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r6, #0 -; CHECK-NEXT: lsls r0, r6, #22 -; CHECK-NEXT: orr.w r7, r0, r4, lsr #10 -; CHECK-NEXT: mov r0, r8 +; CHECK-NEXT: movvs r4, #0 +; CHECK-NEXT: lsls r0, r4, #22 +; CHECK-NEXT: orr.w r7, r0, r7, lsr #10 +; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: vcmp.f32 s28, s22 -; CHECK-NEXT: mov r4, r1 +; CHECK-NEXT: vcmp.f32 s18, s22 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s28, s20 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r4, #0 -; CHECK-NEXT: movtlt r4, #65534 +; CHECK-NEXT: movlt r6, #0 +; CHECK-NEXT: movtlt r6, #65534 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s28, s22 -; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r4, #65535 -; CHECK-NEXT: movtgt r4, #1 +; CHECK-NEXT: movwgt r6, #65535 +; CHECK-NEXT: movtgt r6, #1 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: vcmp.f32 s18, s22 +; CHECK-NEXT: str.w r7, [r11, #45] +; CHECK-NEXT: it vs +; CHECK-NEXT: movvs r6, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: str.w r7, [r10, #45] ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #0 -; CHECK-NEXT: vcmp.f32 s28, s20 -; CHECK-NEXT: vcvtt.f32.f16 s18, s18 +; CHECK-NEXT: movlt r5, #0 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r1, #-1 -; CHECK-NEXT: vcmp.f32 s28, s28 +; CHECK-NEXT: movgt.w r5, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r1, #0 -; CHECK-NEXT: str r1, [sp] @ 4-byte Spill -; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r4, #0 -; CHECK-NEXT: lsls r0, r4, #22 -; CHECK-NEXT: orr.w r0, r0, r1, lsr #10 -; CHECK-NEXT: str.w r0, [r10, #20] +; CHECK-NEXT: movvs r5, #0 +; CHECK-NEXT: lsrs r0, r5, #14 +; CHECK-NEXT: orr.w r0, r0, r6, lsl #18 +; CHECK-NEXT: vcvtt.f32.f16 s18, s17 +; CHECK-NEXT: str.w r0, [r11, #33] ; CHECK-NEXT: vmov r0, s18 -; CHECK-NEXT: lsrs r1, r6, #10 -; CHECK-NEXT: strb.w r1, [r10, #49] ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s18, s22 -; CHECK-NEXT: mov r9, r0 +; CHECK-NEXT: mov r9, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #0 +; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r9, #-1 +; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s26, s22 +; CHECK-NEXT: vcmp.f32 s18, s22 ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs.w r9, #0 +; CHECK-NEXT: movvs r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r5, #0 -; CHECK-NEXT: movtlt r5, #65534 -; CHECK-NEXT: vcmp.f32 s26, s20 +; CHECK-NEXT: movwlt r9, #0 +; CHECK-NEXT: movtlt r9, #65534 +; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: mov r1, r0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r5, #65535 -; CHECK-NEXT: movtgt r5, #1 -; CHECK-NEXT: vcmp.f32 s26, s26 +; CHECK-NEXT: movwgt r9, #65535 +; CHECK-NEXT: movtgt r9, #1 +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: vcvtt.f32.f16 s16, s16 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r5, #0 -; CHECK-NEXT: bfc r5, #18, #14 -; CHECK-NEXT: mov r7, r1 -; CHECK-NEXT: orr.w r0, r5, r9, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #29] +; CHECK-NEXT: movvs.w r9, #0 +; CHECK-NEXT: lsl.w r0, r9, #22 +; CHECK-NEXT: orr.w r0, r0, r1, lsr #10 +; CHECK-NEXT: str.w r0, [r11, #20] ; CHECK-NEXT: vmov r0, s16 -; CHECK-NEXT: lsrs r1, r4, #10 -; CHECK-NEXT: strb.w r1, [r10, #24] ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: mov r8, r0 @@ -5250,105 +5244,109 @@ ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r8, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, s22 +; CHECK-NEXT: mov r10, r1 +; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs.w r8, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt lt -; CHECK-NEXT: movwlt r11, #0 -; CHECK-NEXT: movtlt r11, #65534 -; CHECK-NEXT: vcmp.f32 s24, s20 +; CHECK-NEXT: movwlt r10, #0 +; CHECK-NEXT: movtlt r10, #65534 +; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: lsr.w r0, r8, #14 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, s24 +; CHECK-NEXT: vcmp.f32 s16, s16 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r11, #65535 -; CHECK-NEXT: movtgt r11, #1 +; CHECK-NEXT: movwgt r10, #65535 +; CHECK-NEXT: movtgt r10, #1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs.w r11, #0 -; CHECK-NEXT: vcvtb.f32.f16 s24, s19 -; CHECK-NEXT: bfc r11, #18, #14 -; CHECK-NEXT: mov r6, r1 -; CHECK-NEXT: orr.w r0, r11, r8, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #4] -; CHECK-NEXT: vmov r0, s24 +; CHECK-NEXT: movvs.w r10, #0 +; CHECK-NEXT: orr.w r0, r0, r10, lsl #18 +; CHECK-NEXT: str.w r0, [r11, #8] +; CHECK-NEXT: lsrs r0, r4, #10 +; CHECK-NEXT: vcvtb.f32.f16 s16, s19 +; CHECK-NEXT: strb.w r0, [r11, #49] +; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: vcmp.f32 s24, s22 -; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #0 +; CHECK-NEXT: movlt r7, #0 +; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: ubfx r0, r6, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, s24 ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r5, #-1 +; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: vcmp.f32 s16, s16 +; CHECK-NEXT: vcvtb.f32.f16 s18, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s22 ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r5, #0 +; CHECK-NEXT: movvs r7, #0 +; CHECK-NEXT: orr.w r0, r0, r7, lsl #4 +; CHECK-NEXT: str.w r0, [r11, #37] +; CHECK-NEXT: vcmp.f32 s26, s22 +; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r7, #0 -; CHECK-NEXT: movtlt r7, #65534 -; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: movlt r0, #0 +; CHECK-NEXT: movtlt r0, #65534 +; CHECK-NEXT: vcmp.f32 s26, s20 +; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r7, #65535 -; CHECK-NEXT: movtgt r7, #1 +; CHECK-NEXT: movwgt r0, #65535 +; CHECK-NEXT: movtgt r0, #1 +; CHECK-NEXT: vcmp.f32 s26, s26 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r7, #0 -; CHECK-NEXT: vcvtb.f32.f16 s18, s17 -; CHECK-NEXT: bfc r7, #18, #14 -; CHECK-NEXT: mov r4, r1 -; CHECK-NEXT: lsrs r0, r7, #14 -; CHECK-NEXT: orr.w r0, r0, r5, lsl #4 -; CHECK-NEXT: str.w r0, [r10, #37] -; CHECK-NEXT: lsr.w r0, r9, #14 -; CHECK-NEXT: orr.w r0, r0, r7, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #33] +; CHECK-NEXT: movvs r0, #0 +; CHECK-NEXT: bfc r0, #18, #14 +; CHECK-NEXT: orr.w r0, r0, r5, lsl #18 +; CHECK-NEXT: str.w r0, [r11, #29] +; CHECK-NEXT: lsr.w r0, r9, #10 +; CHECK-NEXT: strb.w r0, [r11, #24] ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: vcmp.f32 s18, s22 +; CHECK-NEXT: ubfx r2, r10, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 +; CHECK-NEXT: vcmp.f32 s18, s18 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r0, #0 +; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 +; CHECK-NEXT: str.w r2, [r11, #12] +; CHECK-NEXT: vcmp.f32 s24, s22 +; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: itt lt -; CHECK-NEXT: movlt r6, #0 -; CHECK-NEXT: movtlt r6, #65534 -; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: movlt r2, #0 +; CHECK-NEXT: movtlt r2, #65534 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s24, s24 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r6, #65535 -; CHECK-NEXT: movtgt r6, #1 -; CHECK-NEXT: vcmp.f32 s16, s16 +; CHECK-NEXT: movwgt r2, #65535 +; CHECK-NEXT: movtgt r2, #1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: it vs -; CHECK-NEXT: movvs r6, #0 -; CHECK-NEXT: bfc r6, #18, #14 ; CHECK-NEXT: vcmp.f32 s18, s22 +; CHECK-NEXT: it vs +; CHECK-NEXT: movvs r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 -; CHECK-NEXT: lsr.w r2, r6, #14 -; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 -; CHECK-NEXT: str.w r2, [r10, #12] ; CHECK-NEXT: itt lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: movtlt r1, #65534 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, s22 +; CHECK-NEXT: vcmp.f32 s16, s22 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r1, #65535 ; CHECK-NEXT: movtgt r1, #1 @@ -5356,36 +5354,36 @@ ; CHECK-NEXT: itt lt ; CHECK-NEXT: movlt r4, #0 ; CHECK-NEXT: movtlt r4, #65534 -; CHECK-NEXT: vcmp.f32 s24, s20 -; CHECK-NEXT: lsr.w r2, r8, #14 +; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: bfc r2, #18, #14 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r4, #65535 ; CHECK-NEXT: movtgt r4, #1 -; CHECK-NEXT: vcmp.f32 s24, s24 -; CHECK-NEXT: orr.w r2, r2, r6, lsl #18 -; CHECK-NEXT: str.w r2, [r10, #8] +; CHECK-NEXT: vcmp.f32 s16, s16 +; CHECK-NEXT: orr.w r2, r2, r8, lsl #18 +; CHECK-NEXT: str.w r2, [r11, #4] ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r4, #0 -; CHECK-NEXT: ldr r3, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: bfc r4, #18, #14 -; CHECK-NEXT: lsrs r2, r5, #28 +; CHECK-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: lsrs r2, r7, #28 ; CHECK-NEXT: vcmp.f32 s18, s18 -; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: orr.w r2, r2, r4, lsl #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: orr.w r2, r2, r3, lsl #22 -; CHECK-NEXT: str.w r2, [r10, #41] +; CHECK-NEXT: str.w r2, [r11, #41] ; CHECK-NEXT: it vs ; CHECK-NEXT: movvs r1, #0 +; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: bfc r1, #18, #14 ; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 ; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload ; CHECK-NEXT: orr.w r0, r0, r1, lsl #22 -; CHECK-NEXT: str.w r0, [r10, #16] -; CHECK-NEXT: add sp, #8 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: str.w r0, [r11, #16] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 2 diff --git a/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll --- a/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fptoui-sat-vector.ll @@ -1526,29 +1526,29 @@ ; CHECK-NEXT: mov r8, r0 ; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: vldr s20, .LCPI28_0 -; CHECK-NEXT: vmov r5, s18 +; CHECK-NEXT: vmov r4, s17 ; CHECK-NEXT: vmov r6, s19 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: vcmp.f32 s16, #0 -; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #0 +; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcmp.f32 s18, #0 -; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vcmp.f32 s17, #0 +; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r9, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #0 -; CHECK-NEXT: vcmp.f32 s18, s20 +; CHECK-NEXT: movlt.w r10, #0 +; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r5, #65535 -; CHECK-NEXT: movtgt r5, #3 +; CHECK-NEXT: movwgt r10, #65535 +; CHECK-NEXT: movtgt r10, #3 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: mov r6, r1 @@ -1562,27 +1562,38 @@ ; CHECK-NEXT: movwgt r6, #65535 ; CHECK-NEXT: movtgt r6, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: mov r10, r0 ; CHECK-NEXT: vcmp.f32 s19, #0 ; CHECK-NEXT: it gt +; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s19, s20 +; CHECK-NEXT: str.w r7, [r8] +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: lsl.w r0, r6, #22 +; CHECK-NEXT: vcmp.f32 s17, #0 +; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r4, #-1 +; CHECK-NEXT: orr.w r0, r0, r4, lsr #10 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: str.w r4, [r8] +; CHECK-NEXT: str.w r0, [r8, #20] ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r10, #0 -; CHECK-NEXT: vcmp.f32 s19, s20 -; CHECK-NEXT: lsls r0, r6, #22 +; CHECK-NEXT: movlt.w r9, #0 +; CHECK-NEXT: vcmp.f32 s17, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r10, #-1 -; CHECK-NEXT: orr.w r1, r0, r10, lsr #10 -; CHECK-NEXT: vmov r0, s17 -; CHECK-NEXT: str.w r1, [r8, #20] +; CHECK-NEXT: movgt.w r9, #-1 +; CHECK-NEXT: lsr.w r0, r9, #14 +; CHECK-NEXT: orr.w r1, r0, r10, lsl #18 +; CHECK-NEXT: vmov r0, s18 +; CHECK-NEXT: str.w r1, [r8, #8] ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcmp.f32 s17, #0 +; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: lsrs r2, r6, #10 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, s20 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr @@ -1593,42 +1604,30 @@ ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #0 +; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s17, #0 +; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r7, #65535 -; CHECK-NEXT: movtgt r7, #3 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: movwgt r5, #65535 +; CHECK-NEXT: movtgt r5, #3 ; CHECK-NEXT: strb.w r2, [r8, #24] +; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 -; CHECK-NEXT: vcmp.f32 s17, s20 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 -; CHECK-NEXT: bfc r7, #18, #14 +; CHECK-NEXT: ubfx r2, r10, #14, #4 ; CHECK-NEXT: bfc r1, #18, #14 -; CHECK-NEXT: orr.w r2, r7, r0, lsl #18 -; CHECK-NEXT: lsrs r0, r0, #14 -; CHECK-NEXT: vcmp.f32 s18, #0 -; CHECK-NEXT: str.w r2, [r8, #4] -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #0 -; CHECK-NEXT: vcmp.f32 s18, s20 -; CHECK-NEXT: lsrs r2, r1, #14 -; CHECK-NEXT: orr.w r0, r0, r1, lsl #18 +; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 +; CHECK-NEXT: lsrs r0, r0, #28 +; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 ; CHECK-NEXT: bfc r5, #18, #14 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r9, #-1 -; CHECK-NEXT: orr.w r2, r2, r9, lsl #4 ; CHECK-NEXT: str.w r2, [r8, #12] -; CHECK-NEXT: str.w r0, [r8, #8] -; CHECK-NEXT: lsr.w r0, r9, #28 -; CHECK-NEXT: orr.w r0, r0, r5, lsl #4 -; CHECK-NEXT: orr.w r0, r0, r10, lsl #22 +; CHECK-NEXT: orr.w r2, r5, r9, lsl #18 +; CHECK-NEXT: str.w r2, [r8, #4] +; CHECK-NEXT: orr.w r0, r0, r4, lsl #22 ; CHECK-NEXT: str.w r0, [r8, #16] ; CHECK-NEXT: vpop {d8, d9, d10} ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} @@ -3837,14 +3836,14 @@ ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: vldr s4, .LCPI46_0 ; CHECK-NEXT: vcvtb.f32.f16 s8, s1 -; CHECK-NEXT: vcvtt.f32.f16 s12, s1 +; CHECK-NEXT: vcvtt.f32.f16 s10, s1 ; CHECK-NEXT: vcvtt.f32.f16 s1, s3 ; CHECK-NEXT: vldr s6, .LCPI46_1 ; CHECK-NEXT: vmaxnm.f32 s1, s1, s4 -; CHECK-NEXT: vcvtb.f32.f16 s10, s2 -; CHECK-NEXT: vcvtb.f32.f16 s14, s0 +; CHECK-NEXT: vcvtb.f32.f16 s12, s2 +; CHECK-NEXT: vcvtt.f32.f16 s14, s0 ; CHECK-NEXT: vminnm.f32 s1, s1, s6 -; CHECK-NEXT: vcvtt.f32.f16 s0, s0 +; CHECK-NEXT: vcvtb.f32.f16 s0, s0 ; CHECK-NEXT: vcvt.u32.f32 s1, s1 ; CHECK-NEXT: vcvtt.f32.f16 s2, s2 ; CHECK-NEXT: vcvtb.f32.f16 s3, s3 @@ -3862,45 +3861,41 @@ ; CHECK-NEXT: vmov r1, s1 ; CHECK-NEXT: vminnm.f32 s14, s14, s6 ; CHECK-NEXT: vcvt.u32.f32 s2, s2 -; CHECK-NEXT: vminnm.f32 s10, s10, s6 -; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vminnm.f32 s12, s12, s6 +; CHECK-NEXT: vcvt.u32.f32 s0, s0 +; CHECK-NEXT: vminnm.f32 s10, s10, s6 ; CHECK-NEXT: vcvt.u32.f32 s14, s14 ; CHECK-NEXT: vminnm.f32 s8, s8, s6 -; CHECK-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-NEXT: vcvt.u32.f32 s12, s12 +; CHECK-NEXT: vcvt.u32.f32 s10, s10 ; CHECK-NEXT: vcvt.u32.f32 s8, s8 ; CHECK-NEXT: vmov r12, s2 -; CHECK-NEXT: vmov lr, s0 +; CHECK-NEXT: vmov lr, s14 ; CHECK-NEXT: lsrs r2, r1, #11 ; CHECK-NEXT: strb r2, [r0, #18] ; CHECK-NEXT: vmov r2, s4 -; CHECK-NEXT: bfc r12, #19, #13 -; CHECK-NEXT: bfc lr, #19, #13 -; CHECK-NEXT: bfc r2, #19, #13 -; CHECK-NEXT: lsrs r3, r2, #14 +; CHECK-NEXT: ubfx r3, r2, #14, #5 ; CHECK-NEXT: orr.w r1, r3, r1, lsl #5 -; CHECK-NEXT: lsr.w r3, r12, #1 +; CHECK-NEXT: ubfx r3, r12, #1, #18 ; CHECK-NEXT: orr.w r2, r3, r2, lsl #18 -; CHECK-NEXT: vmov r3, s14 ; CHECK-NEXT: strh r1, [r0, #16] -; CHECK-NEXT: vmov r1, s10 ; CHECK-NEXT: str r2, [r0, #12] -; CHECK-NEXT: bfc r3, #19, #13 -; CHECK-NEXT: orr.w r3, r3, lr, lsl #19 -; CHECK-NEXT: str r3, [r0] -; CHECK-NEXT: vmov r3, s12 -; CHECK-NEXT: bfc r1, #19, #13 -; CHECK-NEXT: bfc r3, #19, #13 -; CHECK-NEXT: lsrs r2, r3, #7 -; CHECK-NEXT: orr.w r1, r2, r1, lsl #12 -; CHECK-NEXT: orr.w r1, r1, r12, lsl #31 -; CHECK-NEXT: str r1, [r0, #8] -; CHECK-NEXT: vmov r1, s8 -; CHECK-NEXT: lsr.w r2, lr, #13 -; CHECK-NEXT: bfc r1, #19, #13 -; CHECK-NEXT: orr.w r1, r2, r1, lsl #6 -; CHECK-NEXT: orr.w r1, r1, r3, lsl #25 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: bfc r2, #19, #13 +; CHECK-NEXT: orr.w r2, r2, lr, lsl #19 +; CHECK-NEXT: str r2, [r0] +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: ubfx r3, r1, #7, #12 +; CHECK-NEXT: bfc r2, #19, #13 +; CHECK-NEXT: orr.w r2, r3, r2, lsl #12 +; CHECK-NEXT: ubfx r3, lr, #13, #6 +; CHECK-NEXT: orr.w r2, r2, r12, lsl #31 +; CHECK-NEXT: str r2, [r0, #8] +; CHECK-NEXT: vmov r2, s8 +; CHECK-NEXT: bfc r2, #19, #13 +; CHECK-NEXT: orr.w r2, r3, r2, lsl #6 +; CHECK-NEXT: orr.w r1, r2, r1, lsl #25 ; CHECK-NEXT: str r1, [r0, #4] ; CHECK-NEXT: pop {r7, pc} ; CHECK-NEXT: .p2align 2 @@ -3952,236 +3947,236 @@ ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14} -; CHECK-NEXT: .pad #8 -; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 ; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: mov r10, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vcvtb.f32.f16 s24, s18 ; CHECK-NEXT: vmov r0, s24 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcvtt.f32.f16 s28, s19 -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: vmov r0, s28 +; CHECK-NEXT: vcvtt.f32.f16 s26, s19 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: vmov r0, s26 ; CHECK-NEXT: vcvtb.f32.f16 s22, s16 -; CHECK-NEXT: vcvtt.f32.f16 s26, s17 +; CHECK-NEXT: vcvtt.f32.f16 s18, s18 ; CHECK-NEXT: vcmp.f32 s24, #0 +; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: mov r7, r1 ; CHECK-NEXT: vmov r5, s22 ; CHECK-NEXT: vldr s20, .LCPI48_0 -; CHECK-NEXT: vmov r8, s26 +; CHECK-NEXT: vmov r8, s18 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #0 +; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcmp.f32 s28, #0 -; CHECK-NEXT: mov r4, r1 +; CHECK-NEXT: vcmp.f32 s26, #0 +; CHECK-NEXT: mov r10, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s28, s20 -; CHECK-NEXT: mov r9, r0 +; CHECK-NEXT: vcmp.f32 s26, s20 +; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #0 +; CHECK-NEXT: movlt.w r10, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: vcmp.f32 s24, s20 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r4, #65535 -; CHECK-NEXT: movtgt r4, #3 +; CHECK-NEXT: movwgt r10, #65535 +; CHECK-NEXT: movtgt r10, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r6, #-1 -; CHECK-NEXT: str.w r6, [r10, #25] +; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: str.w r7, [r4, #25] ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s22, #0 -; CHECK-NEXT: mov r11, r1 +; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 -; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s26, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 -; CHECK-NEXT: vcmp.f32 s28, #0 -; CHECK-NEXT: str.w r0, [r10] +; CHECK-NEXT: str r0, [r4] ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #0 -; CHECK-NEXT: vcmp.f32 s28, s20 -; CHECK-NEXT: lsls r0, r4, #22 +; CHECK-NEXT: movlt r6, #0 +; CHECK-NEXT: vcmp.f32 s26, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r9, #-1 -; CHECK-NEXT: orr.w r5, r0, r9, lsr #10 +; CHECK-NEXT: movgt.w r6, #-1 +; CHECK-NEXT: lsl.w r0, r10, #22 +; CHECK-NEXT: str r6, [sp, #12] @ 4-byte Spill +; CHECK-NEXT: orr.w r6, r0, r6, lsr #10 ; CHECK-NEXT: mov r0, r8 -; CHECK-NEXT: str.w r9, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcmp.f32 s26, #0 -; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: vcmp.f32 s18, #0 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s26, s20 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #0 +; CHECK-NEXT: movlt r5, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s26, #0 -; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r6, #65535 -; CHECK-NEXT: movtgt r6, #3 +; CHECK-NEXT: movwgt r5, #65535 +; CHECK-NEXT: movtgt r5, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: str.w r5, [r10, #45] +; CHECK-NEXT: str.w r6, [r4, #45] +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #0 -; CHECK-NEXT: vcmp.f32 s26, s20 -; CHECK-NEXT: lsls r0, r6, #22 +; CHECK-NEXT: movlt r7, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r1, #-1 -; CHECK-NEXT: orr.w r0, r0, r1, lsr #10 -; CHECK-NEXT: vcvtt.f32.f16 s18, s18 -; CHECK-NEXT: str r1, [sp] @ 4-byte Spill -; CHECK-NEXT: lsrs r1, r4, #10 -; CHECK-NEXT: str.w r0, [r10, #20] +; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: lsrs r0, r7, #14 +; CHECK-NEXT: orr.w r0, r0, r5, lsl #18 +; CHECK-NEXT: vcvtt.f32.f16 s18, s17 +; CHECK-NEXT: str.w r0, [r4, #33] ; CHECK-NEXT: vmov r0, s18 -; CHECK-NEXT: strb.w r1, [r10, #49] ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 -; CHECK-NEXT: mov r9, r0 +; CHECK-NEXT: mov r9, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r9, #0 +; CHECK-NEXT: movlt r0, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s24, #0 +; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r9, #-1 +; CHECK-NEXT: movgt.w r0, #-1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: str r0, [sp] @ 4-byte Spill ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #0 -; CHECK-NEXT: vcmp.f32 s24, s20 -; CHECK-NEXT: vcvtt.f32.f16 s16, s16 +; CHECK-NEXT: movlt.w r9, #0 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: mov r1, r0 ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r7, #65535 -; CHECK-NEXT: movtgt r7, #3 -; CHECK-NEXT: bfc r7, #18, #14 -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: orr.w r0, r7, r9, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #29] +; CHECK-NEXT: movwgt r9, #65535 +; CHECK-NEXT: movtgt r9, #3 +; CHECK-NEXT: lsl.w r0, r9, #22 +; CHECK-NEXT: orr.w r0, r0, r1, lsr #10 +; CHECK-NEXT: vcvtt.f32.f16 s16, s16 +; CHECK-NEXT: str r0, [r4, #20] ; CHECK-NEXT: vmov r0, s16 -; CHECK-NEXT: lsrs r1, r6, #10 -; CHECK-NEXT: strb.w r1, [r10, #24] ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s16, #0 -; CHECK-NEXT: mov r8, r0 +; CHECK-NEXT: mov r11, r1 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r8, #0 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s22, #0 -; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r8, #-1 -; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s22, s20 -; CHECK-NEXT: it lt ; CHECK-NEXT: movlt.w r11, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: mov r8, r0 +; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r11, #65535 ; CHECK-NEXT: movtgt r11, #3 -; CHECK-NEXT: vcvtb.f32.f16 s22, s19 -; CHECK-NEXT: bfc r11, #18, #14 -; CHECK-NEXT: mov r6, r1 -; CHECK-NEXT: orr.w r0, r11, r8, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #4] -; CHECK-NEXT: vmov r0, s22 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt.w r8, #0 +; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: it gt +; CHECK-NEXT: movgt.w r8, #-1 +; CHECK-NEXT: lsr.w r0, r8, #14 +; CHECK-NEXT: vcvtb.f32.f16 s16, s19 +; CHECK-NEXT: orr.w r0, r0, r11, lsl #18 +; CHECK-NEXT: str r0, [r4, #8] +; CHECK-NEXT: lsr.w r0, r10, #10 +; CHECK-NEXT: strb.w r0, [r4, #49] +; CHECK-NEXT: vmov r0, s16 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: vcmp.f32 s22, #0 -; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #0 +; CHECK-NEXT: movlt r6, #0 +; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: ubfx r0, r5, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, #0 ; CHECK-NEXT: it gt -; CHECK-NEXT: movgt.w r7, #-1 +; CHECK-NEXT: movgt.w r6, #-1 +; CHECK-NEXT: orr.w r0, r0, r6, lsl #4 +; CHECK-NEXT: str.w r0, [r4, #37] +; CHECK-NEXT: vcmp.f32 s24, #0 +; CHECK-NEXT: ldr r0, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #0 +; CHECK-NEXT: movlt r0, #0 +; CHECK-NEXT: vcmp.f32 s24, s20 +; CHECK-NEXT: vcvtb.f32.f16 s18, s17 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r5, #65535 -; CHECK-NEXT: movtgt r5, #3 -; CHECK-NEXT: vcvtb.f32.f16 s18, s17 -; CHECK-NEXT: bfc r5, #18, #14 -; CHECK-NEXT: mov r4, r1 -; CHECK-NEXT: lsrs r0, r5, #14 -; CHECK-NEXT: orr.w r0, r0, r7, lsl #4 -; CHECK-NEXT: str.w r0, [r10, #37] -; CHECK-NEXT: lsr.w r0, r9, #14 -; CHECK-NEXT: orr.w r0, r0, r5, lsl #18 -; CHECK-NEXT: str.w r0, [r10, #33] +; CHECK-NEXT: movwgt r0, #65535 +; CHECK-NEXT: movtgt r0, #3 +; CHECK-NEXT: bfc r0, #18, #14 +; CHECK-NEXT: mov r10, r1 +; CHECK-NEXT: orr.w r0, r0, r7, lsl #18 +; CHECK-NEXT: str.w r0, [r4, #29] +; CHECK-NEXT: lsr.w r0, r9, #10 +; CHECK-NEXT: strb r0, [r4, #24] ; CHECK-NEXT: vmov r0, s18 ; CHECK-NEXT: bl __aeabi_f2ulz ; CHECK-NEXT: vcmp.f32 s18, #0 +; CHECK-NEXT: ubfx r2, r11, #14, #4 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r0, #0 +; CHECK-NEXT: vcmp.f32 s18, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: it gt ; CHECK-NEXT: movgt.w r0, #-1 +; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 +; CHECK-NEXT: str r2, [r4, #12] +; CHECK-NEXT: vcmp.f32 s22, #0 +; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-NEXT: vcmp.f32 s22, s20 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #0 -; CHECK-NEXT: vcmp.f32 s16, s20 +; CHECK-NEXT: movlt r2, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r6, #65535 -; CHECK-NEXT: movtgt r6, #3 -; CHECK-NEXT: bfc r6, #18, #14 ; CHECK-NEXT: vcmp.f32 s18, #0 +; CHECK-NEXT: itt gt +; CHECK-NEXT: movwgt r2, #65535 +; CHECK-NEXT: movtgt r2, #3 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vcmp.f32 s18, s20 -; CHECK-NEXT: lsr.w r2, r6, #14 -; CHECK-NEXT: orr.w r2, r2, r0, lsl #4 -; CHECK-NEXT: str.w r2, [r10, #12] ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r1, #0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-NEXT: vcmp.f32 s22, #0 +; CHECK-NEXT: vcmp.f32 s16, #0 ; CHECK-NEXT: itt gt ; CHECK-NEXT: movwgt r1, #65535 ; CHECK-NEXT: movtgt r1, #3 -; CHECK-NEXT: lsr.w r2, r8, #14 +; CHECK-NEXT: bfc r2, #18, #14 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #0 -; CHECK-NEXT: vcmp.f32 s22, s20 +; CHECK-NEXT: movlt.w r10, #0 +; CHECK-NEXT: vcmp.f32 s16, s20 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: itt gt -; CHECK-NEXT: movwgt r4, #65535 -; CHECK-NEXT: movtgt r4, #3 -; CHECK-NEXT: orr.w r2, r2, r6, lsl #18 -; CHECK-NEXT: str.w r2, [r10, #8] -; CHECK-NEXT: bfc r4, #18, #14 -; CHECK-NEXT: ldr r3, [sp, #4] @ 4-byte Reload -; CHECK-NEXT: lsrs r2, r7, #28 +; CHECK-NEXT: movwgt r10, #65535 +; CHECK-NEXT: movtgt r10, #3 +; CHECK-NEXT: orr.w r2, r2, r8, lsl #18 +; CHECK-NEXT: str r2, [r4, #4] +; CHECK-NEXT: bfc r10, #18, #14 +; CHECK-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; CHECK-NEXT: lsrs r2, r6, #28 ; CHECK-NEXT: bfc r1, #18, #14 -; CHECK-NEXT: orr.w r2, r2, r4, lsl #4 +; CHECK-NEXT: orr.w r2, r2, r10, lsl #4 ; CHECK-NEXT: lsrs r0, r0, #28 ; CHECK-NEXT: orr.w r2, r2, r3, lsl #22 -; CHECK-NEXT: str.w r2, [r10, #41] +; CHECK-NEXT: str.w r2, [r4, #41] ; CHECK-NEXT: orr.w r0, r0, r1, lsl #4 ; CHECK-NEXT: ldr r1, [sp] @ 4-byte Reload ; CHECK-NEXT: orr.w r0, r0, r1, lsl #22 -; CHECK-NEXT: str.w r0, [r10, #16] -; CHECK-NEXT: add sp, #8 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14} +; CHECK-NEXT: str r0, [r4, #16] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 2 diff --git a/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll b/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll --- a/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll +++ b/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll @@ -162,18 +162,18 @@ ; CHECK-DSP-LABEL: test10: ; CHECK-DSP: @ %bb.0: ; CHECK-DSP-NEXT: mov.w r1, #16253176 -; CHECK-DSP-NEXT: and.w r0, r1, r0, lsr #7 -; CHECK-DSP-NEXT: lsrs r1, r0, #5 -; CHECK-DSP-NEXT: uxtb16 r1, r1 +; CHECK-DSP-NEXT: mov.w r2, #458759 +; CHECK-DSP-NEXT: and.w r1, r1, r0, lsr #7 +; CHECK-DSP-NEXT: and.w r0, r2, r0, lsr #12 ; CHECK-DSP-NEXT: add r0, r1 ; CHECK-DSP-NEXT: bx lr ; ; CHECK-NO-DSP-LABEL: test10: ; CHECK-NO-DSP: @ %bb.0: ; CHECK-NO-DSP-NEXT: mov.w r1, #16253176 -; CHECK-NO-DSP-NEXT: and.w r0, r1, r0, lsr #7 -; CHECK-NO-DSP-NEXT: mov.w r1, #458759 -; CHECK-NO-DSP-NEXT: and.w r1, r1, r0, lsr #5 +; CHECK-NO-DSP-NEXT: mov.w r2, #458759 +; CHECK-NO-DSP-NEXT: and.w r1, r1, r0, lsr #7 +; CHECK-NO-DSP-NEXT: and.w r0, r2, r0, lsr #12 ; CHECK-NO-DSP-NEXT: add r0, r1 ; CHECK-NO-DSP-NEXT: bx lr diff --git a/llvm/test/CodeGen/X86/ctpop-combine.ll b/llvm/test/CodeGen/X86/ctpop-combine.ll --- a/llvm/test/CodeGen/X86/ctpop-combine.ll +++ b/llvm/test/CodeGen/X86/ctpop-combine.ll @@ -88,16 +88,16 @@ ; ; NO-POPCOUNT-LABEL: test4: ; NO-POPCOUNT: # %bb.0: -; NO-POPCOUNT-NEXT: andb $127, %dil -; NO-POPCOUNT-NEXT: movl %edi, %eax -; NO-POPCOUNT-NEXT: shrb %al -; NO-POPCOUNT-NEXT: andb $21, %al -; NO-POPCOUNT-NEXT: subb %al, %dil ; NO-POPCOUNT-NEXT: movl %edi, %ecx +; NO-POPCOUNT-NEXT: andb $127, %cl +; NO-POPCOUNT-NEXT: shrb %dil +; NO-POPCOUNT-NEXT: andb $21, %dil +; NO-POPCOUNT-NEXT: subb %dil, %cl +; NO-POPCOUNT-NEXT: movl %ecx, %eax +; NO-POPCOUNT-NEXT: andb $51, %al +; NO-POPCOUNT-NEXT: shrb $2, %cl ; NO-POPCOUNT-NEXT: andb $51, %cl -; NO-POPCOUNT-NEXT: shrb $2, %dil -; NO-POPCOUNT-NEXT: andb $51, %dil -; NO-POPCOUNT-NEXT: addb %dil, %cl +; NO-POPCOUNT-NEXT: addb %al, %cl ; NO-POPCOUNT-NEXT: movl %ecx, %eax ; NO-POPCOUNT-NEXT: shrb $4, %al ; NO-POPCOUNT-NEXT: addb %cl, %al diff --git a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll --- a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll +++ b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll @@ -41,7 +41,7 @@ ; X86-NEXT: shll $16, %ecx ; X86-NEXT: orl %edx, %ecx ; X86-NEXT: orl $384, %ecx # imm = 0x180 -; X86-NEXT: andl $16777088, %ecx # imm = 0xFFFF80 +; X86-NEXT: andl $-128, %ecx ; X86-NEXT: movw %cx, (%eax) ; X86-NEXT: retl ; @@ -53,7 +53,7 @@ ; X64-NEXT: shll $16, %ecx ; X64-NEXT: orl %eax, %ecx ; X64-NEXT: orl $384, %ecx # imm = 0x180 -; X64-NEXT: andl $16777088, %ecx # imm = 0xFFFF80 +; X64-NEXT: andl $-128, %ecx ; X64-NEXT: movw %cx, (%rdi) ; X64-NEXT: retq %b = load i24, i24* %a, align 1 @@ -121,12 +121,11 @@ ; X64-NEXT: shll $16, %ecx ; X64-NEXT: orl %eax, %ecx ; X64-NEXT: shlq $32, %rcx -; X64-NEXT: movl (%rdi), %eax -; X64-NEXT: orq %rcx, %rax -; X64-NEXT: orq $384, %rax # imm = 0x180 -; X64-NEXT: movl %eax, (%rdi) -; X64-NEXT: shrq $32, %rax +; X64-NEXT: movl (%rdi), %edx +; X64-NEXT: orq %rcx, %rdx +; X64-NEXT: orq $384, %rdx # imm = 0x180 ; X64-NEXT: movw %ax, 4(%rdi) +; X64-NEXT: movl %edx, (%rdi) ; X64-NEXT: retq %aa = load i56, i56* %a, align 1 %b = or i56 %aa, 384 @@ -191,15 +190,14 @@ ; X64-NEXT: shll $16, %edx ; X64-NEXT: orl %ecx, %edx ; X64-NEXT: shlq $32, %rdx -; X64-NEXT: movl (%rdi), %ecx -; X64-NEXT: orq %rdx, %rcx +; X64-NEXT: movl (%rdi), %esi +; X64-NEXT: orq %rdx, %rsi ; X64-NEXT: shlq $13, %rax ; X64-NEXT: movabsq $72057594037919743, %rdx # imm = 0xFFFFFFFFFFDFFF -; X64-NEXT: andq %rcx, %rdx +; X64-NEXT: andq %rsi, %rdx ; X64-NEXT: orq %rax, %rdx +; X64-NEXT: movw %cx, 4(%rdi) ; X64-NEXT: movl %edx, (%rdi) -; X64-NEXT: shrq $32, %rdx -; X64-NEXT: movw %dx, 4(%rdi) ; X64-NEXT: retq %extbit = zext i1 %bit to i56 %b = load i56, i56* %a, align 1 diff --git a/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll b/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll --- a/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll +++ b/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll @@ -5,8 +5,9 @@ ; CHECK-LABEL: t: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movzwl 0, %eax -; CHECK-NEXT: orl $2, %eax -; CHECK-NEXT: movw %ax, 0 +; CHECK-NEXT: movl %eax, %ecx +; CHECK-NEXT: orl $2, %ecx +; CHECK-NEXT: movw %cx, 0 ; CHECK-NEXT: shrl $3, %eax ; CHECK-NEXT: andl $1, %eax ; CHECK-NEXT: retl diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll --- a/llvm/test/CodeGen/X86/load-local-v3i129.ll +++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll @@ -6,12 +6,14 @@ ; CHECK: # %bb.0: # %Entry ; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rcx -; CHECK-NEXT: shrdq $2, %rcx, %rax +; CHECK-NEXT: movq %rcx, %rdx +; CHECK-NEXT: shlq $62, %rdx ; CHECK-NEXT: shrq $2, %rcx -; CHECK-NEXT: leaq 1(,%rax,4), %rdx -; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: shrdq $62, %rcx, %rax +; CHECK-NEXT: shldq $2, %rdx, %rcx +; CHECK-NEXT: andq $-4, %rax +; CHECK-NEXT: orq $1, %rax ; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: orq $-2, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $-1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/load-local-v4i5.ll b/llvm/test/CodeGen/X86/load-local-v4i5.ll --- a/llvm/test/CodeGen/X86/load-local-v4i5.ll +++ b/llvm/test/CodeGen/X86/load-local-v4i5.ll @@ -11,6 +11,9 @@ ; CHECK-NEXT: movb -{{[0-9]+}}(%rsp), %cl ; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; CHECK-NEXT: movzbl %cl, %edi +; CHECK-NEXT: shrb %cl +; CHECK-NEXT: movb %cl, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: andl $31, %eax ; CHECK-NEXT: andl $31, %esi ; CHECK-NEXT: shll $5, %esi @@ -18,16 +21,12 @@ ; CHECK-NEXT: andl $31, %edx ; CHECK-NEXT: shll $10, %edx ; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl %cl, %eax -; CHECK-NEXT: movl %eax, %ecx -; CHECK-NEXT: shll $15, %ecx -; CHECK-NEXT: orl %edx, %ecx -; CHECK-NEXT: movw %cx, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: shrl $16, %ecx -; CHECK-NEXT: andl $15, %ecx -; CHECK-NEXT: movb %cl, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: movb %al, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: cmpb $31, %al +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: shll $15, %eax +; CHECK-NEXT: orl %edx, %eax +; CHECK-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: movb %dil, -{{[0-9]+}}(%rsp) +; CHECK-NEXT: cmpb $31, %dil ; CHECK-NEXT: je .LBB0_2 ; CHECK-NEXT: # %bb.1: # %Then ; CHECK-NEXT: int3 diff --git a/llvm/test/CodeGen/X86/masked_compressstore.ll b/llvm/test/CodeGen/X86/masked_compressstore.ll --- a/llvm/test/CodeGen/X86/masked_compressstore.ll +++ b/llvm/test/CodeGen/X86/masked_compressstore.ll @@ -517,21 +517,20 @@ ; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2 ; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k1 ; AVX512F-NEXT: kmovw %k1, %eax -; AVX512F-NEXT: movzbl %al, %eax -; AVX512F-NEXT: movl %eax, %ecx -; AVX512F-NEXT: shrl %ecx -; AVX512F-NEXT: andl $-43, %ecx -; AVX512F-NEXT: subl %ecx, %eax -; AVX512F-NEXT: movl %eax, %ecx -; AVX512F-NEXT: andl $858993459, %ecx ## imm = 0x33333333 -; AVX512F-NEXT: shrl $2, %eax +; AVX512F-NEXT: movzbl %al, %ecx +; AVX512F-NEXT: shrl %eax +; AVX512F-NEXT: andl $85, %eax +; AVX512F-NEXT: subl %eax, %ecx +; AVX512F-NEXT: movl %ecx, %eax ; AVX512F-NEXT: andl $858993459, %eax ## imm = 0x33333333 -; AVX512F-NEXT: addl %ecx, %eax -; AVX512F-NEXT: movl %eax, %ecx -; AVX512F-NEXT: shrl $4, %ecx +; AVX512F-NEXT: shrl $2, %ecx +; AVX512F-NEXT: andl $858993459, %ecx ## imm = 0x33333333 ; AVX512F-NEXT: addl %eax, %ecx -; AVX512F-NEXT: andl $252645135, %ecx ## imm = 0xF0F0F0F -; AVX512F-NEXT: imull $16843009, %ecx, %eax ## imm = 0x1010101 +; AVX512F-NEXT: movl %ecx, %eax +; AVX512F-NEXT: shrl $4, %eax +; AVX512F-NEXT: addl %ecx, %eax +; AVX512F-NEXT: andl $252645135, %eax ## imm = 0xF0F0F0F +; AVX512F-NEXT: imull $16843009, %eax, %eax ## imm = 0x1010101 ; AVX512F-NEXT: shrl $24, %eax ; AVX512F-NEXT: kshiftrw $8, %k1, %k2 ; AVX512F-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2} @@ -571,21 +570,20 @@ ; AVX512VLBW-NEXT: vpsllw $7, %xmm2, %xmm2 ; AVX512VLBW-NEXT: vpmovb2m %xmm2, %k1 ; AVX512VLBW-NEXT: kmovd %k1, %eax -; AVX512VLBW-NEXT: movzbl %al, %eax -; AVX512VLBW-NEXT: movl %eax, %ecx -; AVX512VLBW-NEXT: shrl %ecx -; AVX512VLBW-NEXT: andl $-43, %ecx -; AVX512VLBW-NEXT: subl %ecx, %eax -; AVX512VLBW-NEXT: movl %eax, %ecx -; AVX512VLBW-NEXT: andl $858993459, %ecx ## imm = 0x33333333 -; AVX512VLBW-NEXT: shrl $2, %eax +; AVX512VLBW-NEXT: movzbl %al, %ecx +; AVX512VLBW-NEXT: shrl %eax +; AVX512VLBW-NEXT: andl $85, %eax +; AVX512VLBW-NEXT: subl %eax, %ecx +; AVX512VLBW-NEXT: movl %ecx, %eax ; AVX512VLBW-NEXT: andl $858993459, %eax ## imm = 0x33333333 -; AVX512VLBW-NEXT: addl %ecx, %eax -; AVX512VLBW-NEXT: movl %eax, %ecx -; AVX512VLBW-NEXT: shrl $4, %ecx +; AVX512VLBW-NEXT: shrl $2, %ecx +; AVX512VLBW-NEXT: andl $858993459, %ecx ## imm = 0x33333333 ; AVX512VLBW-NEXT: addl %eax, %ecx -; AVX512VLBW-NEXT: andl $252645135, %ecx ## imm = 0xF0F0F0F -; AVX512VLBW-NEXT: imull $16843009, %ecx, %eax ## imm = 0x1010101 +; AVX512VLBW-NEXT: movl %ecx, %eax +; AVX512VLBW-NEXT: shrl $4, %eax +; AVX512VLBW-NEXT: addl %ecx, %eax +; AVX512VLBW-NEXT: andl $252645135, %eax ## imm = 0xF0F0F0F +; AVX512VLBW-NEXT: imull $16843009, %eax, %eax ## imm = 0x1010101 ; AVX512VLBW-NEXT: shrl $24, %eax ; AVX512VLBW-NEXT: kshiftrw $8, %k1, %k2 ; AVX512VLBW-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2} diff --git a/llvm/test/CodeGen/X86/mul128.ll b/llvm/test/CodeGen/X86/mul128.ll --- a/llvm/test/CodeGen/X86/mul128.ll +++ b/llvm/test/CodeGen/X86/mul128.ll @@ -107,13 +107,14 @@ define void @PR13897() nounwind { ; X64-LABEL: PR13897: ; X64: # %bb.0: # %"0x0" -; X64-NEXT: movl bbb(%rip), %ecx +; X64-NEXT: movq bbb(%rip), %rsi +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shlq $32, %rsi ; X64-NEXT: movabsq $4294967297, %rdx # imm = 0x100000001 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: mulq %rdx ; X64-NEXT: addq %rcx, %rdx -; X64-NEXT: shlq $32, %rcx -; X64-NEXT: addq %rcx, %rdx +; X64-NEXT: addq %rsi, %rdx ; X64-NEXT: movq %rax, aaa(%rip) ; X64-NEXT: movq %rdx, aaa+8(%rip) ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/shift-mask.ll b/llvm/test/CodeGen/X86/shift-mask.ll --- a/llvm/test/CodeGen/X86/shift-mask.ll +++ b/llvm/test/CodeGen/X86/shift-mask.ll @@ -555,10 +555,11 @@ ; X86-LABEL: test_i64_lshr_lshr_1: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: shldl $3, %eax, %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: leal (,%ecx,8), %edx +; X86-NEXT: shldl $3, %eax, %ecx ; X86-NEXT: shll $3, %eax -; X86-NEXT: shrdl $5, %edx, %eax +; X86-NEXT: shrdl $5, %ecx, %eax ; X86-NEXT: shrl $5, %edx ; X86-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/udiv_fix_sat.ll b/llvm/test/CodeGen/X86/udiv_fix_sat.ll --- a/llvm/test/CodeGen/X86/udiv_fix_sat.ll +++ b/llvm/test/CodeGen/X86/udiv_fix_sat.ll @@ -285,15 +285,14 @@ ; X86: # %bb.0: ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movzwl %cx, %ecx -; X86-NEXT: addl %ecx, %ecx ; X86-NEXT: movl %ecx, %edx -; X86-NEXT: shrl $16, %edx -; X86-NEXT: shll $16, %ecx +; X86-NEXT: shll $17, %edx +; X86-NEXT: shrl $15, %ecx +; X86-NEXT: andl $1, %ecx ; X86-NEXT: pushl $0 ; X86-NEXT: pushl %eax -; X86-NEXT: pushl %edx ; X86-NEXT: pushl %ecx +; X86-NEXT: pushl %edx ; X86-NEXT: calll __udivdi3 ; X86-NEXT: addl $16, %esp ; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF diff --git a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll --- a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll +++ b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll @@ -57,15 +57,15 @@ ; CHECK-NEXT: movl $32768, %ecx # imm = 0x8000 ; CHECK-NEXT: cmovll %ecx, %edx ; CHECK-NEXT: pextrw $1, %xmm0, %esi -; CHECK-NEXT: movswl %si, %edi -; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: shrl $16, %eax -; CHECK-NEXT: leal (%rdi,%rdi), %esi -; CHECK-NEXT: shrdw $15, %ax, %si -; CHECK-NEXT: sarl $15, %edi -; CHECK-NEXT: cmpl $16384, %edi # imm = 0x4000 +; CHECK-NEXT: leal (%rsi,%rsi), %edi +; CHECK-NEXT: movswl %si, %eax +; CHECK-NEXT: movl %eax, %esi +; CHECK-NEXT: shrl $16, %esi +; CHECK-NEXT: shldw $1, %di, %si +; CHECK-NEXT: sarl $15, %eax +; CHECK-NEXT: cmpl $16384, %eax # imm = 0x4000 ; CHECK-NEXT: cmovgel %r8d, %esi -; CHECK-NEXT: cmpl $-16384, %edi # imm = 0xC000 +; CHECK-NEXT: cmpl $-16384, %eax # imm = 0xC000 ; CHECK-NEXT: cmovll %ecx, %esi ; CHECK-NEXT: movd %xmm0, %eax ; CHECK-NEXT: cwtl @@ -82,11 +82,11 @@ ; CHECK-NEXT: pinsrw $1, %esi, %xmm1 ; CHECK-NEXT: pinsrw $2, %edx, %xmm1 ; CHECK-NEXT: pextrw $3, %xmm0, %eax +; CHECK-NEXT: leal (,%rax,4), %edx ; CHECK-NEXT: cwtl -; CHECK-NEXT: movl %eax, %edx -; CHECK-NEXT: shrl $14, %edx -; CHECK-NEXT: leal (,%rax,4), %esi -; CHECK-NEXT: shrdw $15, %dx, %si +; CHECK-NEXT: movl %eax, %esi +; CHECK-NEXT: shrl $14, %esi +; CHECK-NEXT: shldw $1, %dx, %si ; CHECK-NEXT: sarl $14, %eax ; CHECK-NEXT: cmpl $16384, %eax # imm = 0x4000 ; CHECK-NEXT: cmovgel %r8d, %esi