diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -333,6 +333,9 @@ setOperationAction(ISD::SUBE, VT, Legal); } + // The hardware supports 32-bit FSHR, but not FSHL. + setOperationAction(ISD::FSHR, MVT::i32, Legal); + // The hardware supports 32-bit ROTR, but not ROTL. setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i64, Expand); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -736,6 +736,12 @@ >; } +// fshr pattern +class FSHRPattern : AMDGPUPat < + (fshr i32:$src0, i32:$src1, i32:$src2), + (BIT_ALIGN $src0, $src1, $src2) +>; + // rotr pattern class ROTRPattern : AMDGPUPat < (rotr i32:$src0, i32:$src1), diff --git a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td --- a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td +++ b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td @@ -422,6 +422,7 @@ def : UMad24Pat; def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>; +def : FSHRPattern ; def : ROTRPattern ; def MULADD_eg : MULADD_Common<0x14>; def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>; diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -1484,6 +1484,7 @@ // FIXME: This should only be done for VALU inputs defm : BFIPatterns ; +def : FSHRPattern ; def : ROTRPattern ; def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), diff --git a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll --- a/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll +++ b/llvm/test/CodeGen/AMDGPU/build-vector-packed-partial-undef.ll @@ -163,9 +163,8 @@ ; GFX8-LABEL: undef_lo2_v4i16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; GFX8-NEXT: ;;#ASMSTART ; GFX8-NEXT: ; use v[0:1] ; GFX8-NEXT: ;;#ASMEND @@ -190,9 +189,8 @@ ; GFX8-LABEL: undef_lo2_v4f16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; GFX8-NEXT: ;;#ASMSTART ; GFX8-NEXT: ; use v[0:1] ; GFX8-NEXT: ;;#ASMEND diff --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll --- a/llvm/test/CodeGen/AMDGPU/fshl.ll +++ b/llvm/test/CodeGen/AMDGPU/fshl.ll @@ -97,10 +97,8 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshr_b32 s1, s1, 25 -; SI-NEXT: s_lshl_b32 s0, s0, 7 -; SI-NEXT: s_or_b32 s0, s0, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v0, s0, v0, 25 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -109,12 +107,10 @@ ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_alignbit_b32 v2, s0, v0, 25 ; VI-NEXT: v_mov_b32_e32 v0, s2 -; VI-NEXT: s_lshr_b32 s1, s1, 25 -; VI-NEXT: s_lshl_b32 s0, s0, 7 -; VI-NEXT: s_or_b32 s0, s0, s1 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm ; @@ -123,28 +119,24 @@ ; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_alignbit_b32 v2, s0, v0, 25 ; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_lshr_b32 s1, s1, 25 -; GFX9-NEXT: s_lshl_b32 s0, s0, 7 -; GFX9-NEXT: s_or_b32 s0, s0, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: v_mov_b32_e32 v2, s0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshl_i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 5, @4, KC0[CB0:0-32], KC1[] -; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 +; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[2].Z, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[2].W, literal.y, -; R600-NEXT: 7(9.809089e-45), 25(3.503246e-44) -; R600-NEXT: OR_INT T0.X, PV.W, PS, -; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, +; R600-NEXT: LSHR * T0.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, literal.x, +; R600-NEXT: 25(3.503246e-44), 0(0.000000e+00) entry: %0 = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 7) store i32 %0, i32 addrspace(1)* %in @@ -283,14 +275,10 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshl_b32 s3, s3, 9 -; SI-NEXT: s_lshr_b32 s1, s1, 23 -; SI-NEXT: s_lshr_b32 s0, s0, 25 -; SI-NEXT: s_lshl_b32 s2, s2, 7 -; SI-NEXT: s_or_b32 s1, s3, s1 -; SI-NEXT: s_or_b32 s0, s2, s0 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v1, s3, v0, 23 ; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 +; SI-NEXT: v_alignbit_b32 v0, s2, v0, 25 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -300,15 +288,11 @@ ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_alignbit_b32 v1, s5, v0, 23 +; VI-NEXT: v_alignbit_b32 v0, s4, v2, 25 ; VI-NEXT: v_mov_b32_e32 v2, s2 -; VI-NEXT: s_lshl_b32 s5, s5, 9 -; VI-NEXT: s_lshr_b32 s1, s1, 23 -; VI-NEXT: s_lshr_b32 s0, s0, 25 -; VI-NEXT: s_lshl_b32 s4, s4, 7 -; VI-NEXT: s_or_b32 s1, s5, s1 -; VI-NEXT: s_or_b32 s0, s4, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm @@ -319,34 +303,26 @@ ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 +; GFX9-NEXT: v_alignbit_b32 v1, s5, v0, 23 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v2, 25 ; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: s_lshl_b32 s5, s5, 9 -; GFX9-NEXT: s_lshr_b32 s1, s1, 23 -; GFX9-NEXT: s_lshr_b32 s0, s0, 25 -; GFX9-NEXT: s_lshl_b32 s4, s4, 7 -; GFX9-NEXT: s_or_b32 s1, s5, s1 -; GFX9-NEXT: s_or_b32 s0, s4, s0 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: v_mov_b32_e32 v3, s3 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshl_v2i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: ALU 5, @4, KC0[CB0:0-32], KC1[] ; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[3].X, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[3].Z, literal.y, -; R600-NEXT: 9(1.261169e-44), 23(3.222986e-44) -; R600-NEXT: OR_INT T0.Y, PV.W, PS, -; R600-NEXT: LSHL T0.W, KC0[2].W, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[3].Y, literal.y, -; R600-NEXT: 7(9.809089e-45), 25(3.503246e-44) -; R600-NEXT: OR_INT T0.X, PV.W, PS, +; R600-NEXT: BIT_ALIGN_INT * T0.Y, KC0[3].X, KC0[3].Z, literal.x, +; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.X, KC0[2].W, KC0[3].Y, literal.x, +; R600-NEXT: 25(3.503246e-44), 0(0.000000e+00) ; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: @@ -557,22 +533,14 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshl_b32 s11, s11, 1 -; SI-NEXT: s_lshr_b32 s3, s3, 31 -; SI-NEXT: s_lshr_b32 s2, s2, 23 -; SI-NEXT: s_lshl_b32 s10, s10, 9 -; SI-NEXT: s_lshr_b32 s1, s1, 25 -; SI-NEXT: s_lshl_b32 s9, s9, 7 -; SI-NEXT: s_lshr_b32 s0, s0, 31 -; SI-NEXT: s_lshl_b32 s8, s8, 1 -; SI-NEXT: s_or_b32 s3, s11, s3 -; SI-NEXT: s_or_b32 s2, s10, s2 -; SI-NEXT: s_or_b32 s1, s9, s1 -; SI-NEXT: s_or_b32 s0, s8, s0 +; SI-NEXT: v_mov_b32_e32 v0, s3 +; SI-NEXT: v_alignbit_b32 v3, s11, v0, 31 +; SI-NEXT: v_mov_b32_e32 v0, s2 +; SI-NEXT: v_alignbit_b32 v2, s10, v0, 23 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v1, s9, v0, 25 ; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 -; SI-NEXT: v_mov_b32_e32 v2, s2 -; SI-NEXT: v_mov_b32_e32 v3, s3 +; SI-NEXT: v_alignbit_b32 v0, s8, v0, 31 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -583,23 +551,15 @@ ; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x44 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v4, s8 -; VI-NEXT: s_lshl_b32 s7, s7, 1 -; VI-NEXT: s_lshr_b32 s3, s3, 31 -; VI-NEXT: s_lshr_b32 s2, s2, 23 -; VI-NEXT: s_lshl_b32 s6, s6, 9 -; VI-NEXT: s_lshr_b32 s1, s1, 25 -; VI-NEXT: s_lshl_b32 s5, s5, 7 -; VI-NEXT: s_lshr_b32 s0, s0, 31 -; VI-NEXT: s_lshl_b32 s4, s4, 1 -; VI-NEXT: s_or_b32 s3, s7, s3 -; VI-NEXT: s_or_b32 s2, s6, s2 -; VI-NEXT: s_or_b32 s1, s5, s1 -; VI-NEXT: s_or_b32 s0, s4, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 -; VI-NEXT: v_mov_b32_e32 v2, s2 -; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: v_alignbit_b32 v3, s7, v0, 31 +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_alignbit_b32 v2, s6, v1, 23 +; VI-NEXT: v_alignbit_b32 v1, s5, v0, 25 +; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: v_alignbit_b32 v0, s4, v0, 31 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm ; @@ -610,49 +570,33 @@ ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x44 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s8 -; GFX9-NEXT: s_lshl_b32 s7, s7, 1 -; GFX9-NEXT: s_lshr_b32 s3, s3, 31 -; GFX9-NEXT: s_lshr_b32 s2, s2, 23 -; GFX9-NEXT: s_lshl_b32 s6, s6, 9 -; GFX9-NEXT: s_lshr_b32 s1, s1, 25 -; GFX9-NEXT: s_lshl_b32 s5, s5, 7 -; GFX9-NEXT: s_lshr_b32 s0, s0, 31 -; GFX9-NEXT: s_lshl_b32 s4, s4, 1 -; GFX9-NEXT: s_or_b32 s3, s7, s3 -; GFX9-NEXT: s_or_b32 s2, s6, s2 -; GFX9-NEXT: s_or_b32 s1, s5, s1 -; GFX9-NEXT: s_or_b32 s0, s4, s0 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: v_mov_b32_e32 v3, s3 ; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_alignbit_b32 v3, s7, v0, 31 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_alignbit_b32 v2, s6, v1, 23 +; GFX9-NEXT: v_alignbit_b32 v1, s5, v0, 25 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 31 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshl_v4i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 17, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[] ; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[4].X, 1, -; R600-NEXT: LSHR * T1.W, KC0[5].X, literal.x, +; R600-NEXT: BIT_ALIGN_INT * T0.W, KC0[4].X, KC0[5].X, literal.x, ; R600-NEXT: 31(4.344025e-44), 0(0.000000e+00) -; R600-NEXT: LSHL T0.Z, KC0[3].W, literal.x, -; R600-NEXT: LSHR T2.W, KC0[4].W, literal.y, -; R600-NEXT: OR_INT * T0.W, PV.W, PS, -; R600-NEXT: 9(1.261169e-44), 23(3.222986e-44) -; R600-NEXT: OR_INT T0.Z, PV.Z, PV.W, -; R600-NEXT: LSHL T1.W, KC0[3].Z, literal.x, -; R600-NEXT: LSHR * T2.W, KC0[4].Z, literal.y, -; R600-NEXT: 7(9.809089e-45), 25(3.503246e-44) -; R600-NEXT: OR_INT T0.Y, PV.W, PS, -; R600-NEXT: LSHL T1.W, KC0[3].Y, 1, -; R600-NEXT: LSHR * T2.W, KC0[4].Y, literal.x, +; R600-NEXT: BIT_ALIGN_INT * T0.Z, KC0[3].W, KC0[4].W, literal.x, +; R600-NEXT: 23(3.222986e-44), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.Y, KC0[3].Z, KC0[4].Z, literal.x, +; R600-NEXT: 25(3.503246e-44), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.X, KC0[3].Y, KC0[4].Y, literal.x, ; R600-NEXT: 31(4.344025e-44), 0(0.000000e+00) -; R600-NEXT: OR_INT T0.X, PV.W, PS, ; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: diff --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll --- a/llvm/test/CodeGen/AMDGPU/fshr.ll +++ b/llvm/test/CodeGen/AMDGPU/fshr.ll @@ -16,15 +16,9 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_and_b32 s2, s2, 31 -; SI-NEXT: s_sub_i32 s8, 32, s2 -; SI-NEXT: s_lshr_b32 s3, s1, s2 -; SI-NEXT: s_lshl_b32 s0, s0, s8 -; SI-NEXT: s_or_b32 s0, s0, s3 -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 -; SI-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 -; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: v_alignbit_b32 v0, s0, v0, v1 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -33,15 +27,9 @@ ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_and_b32 s2, s2, 31 -; VI-NEXT: s_sub_i32 s3, 32, s2 ; VI-NEXT: v_mov_b32_e32 v0, s1 -; VI-NEXT: s_lshr_b32 s1, s1, s2 -; VI-NEXT: s_lshl_b32 s0, s0, s3 -; VI-NEXT: s_or_b32 s0, s0, s1 -; VI-NEXT: v_mov_b32_e32 v1, s0 -; VI-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 -; VI-NEXT: v_cndmask_b32_e32 v2, v1, v0, vcc +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: v_alignbit_b32 v2, s0, v0, v1 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: flat_store_dword v[0:1], v2 @@ -52,15 +40,9 @@ ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x2c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_and_b32 s2, s2, 31 -; GFX9-NEXT: s_sub_i32 s3, 32, s2 ; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_lshr_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s0, s0, s3 -; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v1, v0, vcc +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_alignbit_b32 v2, s0, v0, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -68,21 +50,14 @@ ; ; R600-LABEL: fshr_i32: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[] -; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 +; R600-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: AND_INT * T0.W, KC0[3].X, literal.x, -; R600-NEXT: 31(4.344025e-44), 0(0.000000e+00) -; R600-NEXT: SUB_INT * T1.W, literal.x, PV.W, -; R600-NEXT: 32(4.484155e-44), 0(0.000000e+00) -; R600-NEXT: LSHL T1.W, KC0[2].Z, PV.W, -; R600-NEXT: LSHR * T2.W, KC0[2].W, T0.W, -; R600-NEXT: OR_INT * T1.W, PV.W, PS, -; R600-NEXT: CNDE_INT T0.X, T0.W, KC0[2].W, PV.W, -; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, +; R600-NEXT: LSHR * T0.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, KC0[3].X, entry: %0 = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z) store i32 %0, i32 addrspace(1)* %in @@ -97,10 +72,8 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshr_b32 s1, s1, 7 -; SI-NEXT: s_lshl_b32 s0, s0, 25 -; SI-NEXT: s_or_b32 s0, s0, s1 -; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v0, s0, v0, 7 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -109,12 +82,10 @@ ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_alignbit_b32 v2, s0, v0, 7 ; VI-NEXT: v_mov_b32_e32 v0, s2 -; VI-NEXT: s_lshr_b32 s1, s1, 7 -; VI-NEXT: s_lshl_b32 s0, s0, 25 -; VI-NEXT: s_or_b32 s0, s0, s1 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dword v[0:1], v2 ; VI-NEXT: s_endpgm ; @@ -123,28 +94,24 @@ ; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_alignbit_b32 v2, s0, v0, 7 ; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_lshr_b32 s1, s1, 7 -; GFX9-NEXT: s_lshl_b32 s0, s0, 25 -; GFX9-NEXT: s_or_b32 s0, s0, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: v_mov_b32_e32 v2, s0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshr_i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 5, @4, KC0[CB0:0-32], KC1[] -; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 +; R600-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T1.X, T0.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[2].Z, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[2].W, literal.y, -; R600-NEXT: 25(3.503246e-44), 7(9.809089e-45) -; R600-NEXT: OR_INT T0.X, PV.W, PS, -; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, +; R600-NEXT: LSHR * T0.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T1.X, KC0[2].Z, KC0[2].W, literal.x, +; R600-NEXT: 7(9.809089e-45), 0(0.000000e+00) entry: %0 = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 7) store i32 %0, i32 addrspace(1)* %in @@ -283,14 +250,10 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshl_b32 s3, s3, 23 -; SI-NEXT: s_lshr_b32 s1, s1, 9 -; SI-NEXT: s_lshr_b32 s0, s0, 7 -; SI-NEXT: s_lshl_b32 s2, s2, 25 -; SI-NEXT: s_or_b32 s1, s3, s1 -; SI-NEXT: s_or_b32 s0, s2, s0 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v1, s3, v0, 9 ; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 +; SI-NEXT: v_alignbit_b32 v0, s2, v0, 7 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -300,15 +263,11 @@ ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_alignbit_b32 v1, s5, v0, 9 +; VI-NEXT: v_alignbit_b32 v0, s4, v2, 7 ; VI-NEXT: v_mov_b32_e32 v2, s2 -; VI-NEXT: s_lshl_b32 s5, s5, 23 -; VI-NEXT: s_lshr_b32 s1, s1, 9 -; VI-NEXT: s_lshr_b32 s0, s0, 7 -; VI-NEXT: s_lshl_b32 s4, s4, 25 -; VI-NEXT: s_or_b32 s1, s5, s1 -; VI-NEXT: s_or_b32 s0, s4, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm @@ -319,34 +278,26 @@ ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s0 +; GFX9-NEXT: v_alignbit_b32 v1, s5, v0, 9 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v2, 7 ; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: s_lshl_b32 s5, s5, 23 -; GFX9-NEXT: s_lshr_b32 s1, s1, 9 -; GFX9-NEXT: s_lshr_b32 s0, s0, 7 -; GFX9-NEXT: s_lshl_b32 s4, s4, 25 -; GFX9-NEXT: s_or_b32 s1, s5, s1 -; GFX9-NEXT: s_or_b32 s0, s4, s0 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: v_mov_b32_e32 v3, s3 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshr_v2i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: ALU 5, @4, KC0[CB0:0-32], KC1[] ; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[3].X, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[3].Z, literal.y, -; R600-NEXT: 23(3.222986e-44), 9(1.261169e-44) -; R600-NEXT: OR_INT T0.Y, PV.W, PS, -; R600-NEXT: LSHL T0.W, KC0[2].W, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[3].Y, literal.y, -; R600-NEXT: 25(3.503246e-44), 7(9.809089e-45) -; R600-NEXT: OR_INT T0.X, PV.W, PS, +; R600-NEXT: BIT_ALIGN_INT * T0.Y, KC0[3].X, KC0[3].Z, literal.x, +; R600-NEXT: 9(1.261169e-44), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.X, KC0[2].W, KC0[3].Y, literal.x, +; R600-NEXT: 7(9.809089e-45), 0(0.000000e+00) ; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: @@ -557,22 +508,14 @@ ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_lshl_b32 s11, s11, 31 -; SI-NEXT: s_lshr_b32 s3, s3, 1 -; SI-NEXT: s_lshr_b32 s2, s2, 9 -; SI-NEXT: s_lshl_b32 s10, s10, 23 -; SI-NEXT: s_lshr_b32 s1, s1, 7 -; SI-NEXT: s_lshl_b32 s9, s9, 25 -; SI-NEXT: s_lshr_b32 s0, s0, 1 -; SI-NEXT: s_lshl_b32 s8, s8, 31 -; SI-NEXT: s_or_b32 s3, s11, s3 -; SI-NEXT: s_or_b32 s2, s10, s2 -; SI-NEXT: s_or_b32 s1, s9, s1 -; SI-NEXT: s_or_b32 s0, s8, s0 +; SI-NEXT: v_mov_b32_e32 v0, s3 +; SI-NEXT: v_alignbit_b32 v3, s11, v0, 1 +; SI-NEXT: v_mov_b32_e32 v0, s2 +; SI-NEXT: v_alignbit_b32 v2, s10, v0, 9 +; SI-NEXT: v_mov_b32_e32 v0, s1 +; SI-NEXT: v_alignbit_b32 v1, s9, v0, 7 ; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_mov_b32_e32 v1, s1 -; SI-NEXT: v_mov_b32_e32 v2, s2 -; SI-NEXT: v_mov_b32_e32 v3, s3 +; SI-NEXT: v_alignbit_b32 v0, s8, v0, 1 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -583,23 +526,15 @@ ; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x44 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v4, s8 -; VI-NEXT: s_lshl_b32 s7, s7, 31 -; VI-NEXT: s_lshr_b32 s3, s3, 1 -; VI-NEXT: s_lshr_b32 s2, s2, 9 -; VI-NEXT: s_lshl_b32 s6, s6, 23 -; VI-NEXT: s_lshr_b32 s1, s1, 7 -; VI-NEXT: s_lshl_b32 s5, s5, 25 -; VI-NEXT: s_lshr_b32 s0, s0, 1 -; VI-NEXT: s_lshl_b32 s4, s4, 31 -; VI-NEXT: s_or_b32 s3, s7, s3 -; VI-NEXT: s_or_b32 s2, s6, s2 -; VI-NEXT: s_or_b32 s1, s5, s1 -; VI-NEXT: s_or_b32 s0, s4, s0 -; VI-NEXT: v_mov_b32_e32 v0, s0 -; VI-NEXT: v_mov_b32_e32 v1, s1 -; VI-NEXT: v_mov_b32_e32 v2, s2 -; VI-NEXT: v_mov_b32_e32 v3, s3 ; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: v_alignbit_b32 v3, s7, v0, 1 +; VI-NEXT: v_mov_b32_e32 v0, s1 +; VI-NEXT: v_alignbit_b32 v2, s6, v1, 9 +; VI-NEXT: v_alignbit_b32 v1, s5, v0, 7 +; VI-NEXT: v_mov_b32_e32 v0, s0 +; VI-NEXT: v_alignbit_b32 v0, s4, v0, 1 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; VI-NEXT: s_endpgm ; @@ -610,49 +545,31 @@ ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x44 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s8 -; GFX9-NEXT: s_lshl_b32 s7, s7, 31 -; GFX9-NEXT: s_lshr_b32 s3, s3, 1 -; GFX9-NEXT: s_lshr_b32 s2, s2, 9 -; GFX9-NEXT: s_lshl_b32 s6, s6, 23 -; GFX9-NEXT: s_lshr_b32 s1, s1, 7 -; GFX9-NEXT: s_lshl_b32 s5, s5, 25 -; GFX9-NEXT: s_lshr_b32 s0, s0, 1 -; GFX9-NEXT: s_lshl_b32 s4, s4, 31 -; GFX9-NEXT: s_or_b32 s3, s7, s3 -; GFX9-NEXT: s_or_b32 s2, s6, s2 -; GFX9-NEXT: s_or_b32 s1, s5, s1 -; GFX9-NEXT: s_or_b32 s0, s4, s0 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: v_mov_b32_e32 v3, s3 ; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_alignbit_b32 v3, s7, v0, 1 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_alignbit_b32 v2, s6, v1, 9 +; GFX9-NEXT: v_alignbit_b32 v1, s5, v0, 7 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 1 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm ; ; R600-LABEL: fshr_v4i32_imm: ; R600: ; %bb.0: ; %entry -; R600-NEXT: ALU 17, @4, KC0[CB0:0-32], KC1[] +; R600-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[] ; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1 ; R600-NEXT: CF_END ; R600-NEXT: PAD ; R600-NEXT: ALU clause starting at 4: -; R600-NEXT: LSHL T0.W, KC0[4].X, literal.x, -; R600-NEXT: LSHR * T1.W, KC0[5].X, 1, -; R600-NEXT: 31(4.344025e-44), 0(0.000000e+00) -; R600-NEXT: LSHL T0.Z, KC0[3].W, literal.x, -; R600-NEXT: LSHR T2.W, KC0[4].W, literal.y, -; R600-NEXT: OR_INT * T0.W, PV.W, PS, -; R600-NEXT: 23(3.222986e-44), 9(1.261169e-44) -; R600-NEXT: OR_INT T0.Z, PV.Z, PV.W, -; R600-NEXT: LSHL T1.W, KC0[3].Z, literal.x, -; R600-NEXT: LSHR * T2.W, KC0[4].Z, literal.y, -; R600-NEXT: 25(3.503246e-44), 7(9.809089e-45) -; R600-NEXT: OR_INT T0.Y, PV.W, PS, -; R600-NEXT: LSHL T1.W, KC0[3].Y, literal.x, -; R600-NEXT: LSHR * T2.W, KC0[4].Y, 1, -; R600-NEXT: 31(4.344025e-44), 0(0.000000e+00) -; R600-NEXT: OR_INT T0.X, PV.W, PS, +; R600-NEXT: BIT_ALIGN_INT * T0.W, KC0[4].X, KC0[5].X, 1, +; R600-NEXT: BIT_ALIGN_INT * T0.Z, KC0[3].W, KC0[4].W, literal.x, +; R600-NEXT: 9(1.261169e-44), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.Y, KC0[3].Z, KC0[4].Z, literal.x, +; R600-NEXT: 7(9.809089e-45), 0(0.000000e+00) +; R600-NEXT: BIT_ALIGN_INT * T0.X, KC0[3].Y, KC0[4].Y, 1, ; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.x, ; R600-NEXT: 2(2.802597e-45), 0(0.000000e+00) entry: diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll --- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll +++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll @@ -329,13 +329,12 @@ ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: v_mov_b32_e32 v0, s0 ; CI-NEXT: s_load_dword s0, s[2:3], 0x0 +; CI-NEXT: v_mov_b32_e32 v2, s4 ; CI-NEXT: v_mov_b32_e32 v1, s1 ; CI-NEXT: s_lshr_b32 s1, s4, 16 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_lshr_b32 s0, s0, 16 -; CI-NEXT: s_lshl_b32 s2, s0, 16 -; CI-NEXT: s_or_b32 s2, s1, s2 -; CI-NEXT: v_mov_b32_e32 v2, s2 +; CI-NEXT: v_alignbit_b32 v2, s0, v2, 16 ; CI-NEXT: flat_store_dword v[0:1], v2 ; CI-NEXT: ;;#ASMSTART ; CI-NEXT: ; use s1 diff --git a/llvm/test/CodeGen/AMDGPU/permute.ll b/llvm/test/CodeGen/AMDGPU/permute.ll --- a/llvm/test/CodeGen/AMDGPU/permute.ll +++ b/llvm/test/CodeGen/AMDGPU/permute.ll @@ -62,8 +62,7 @@ } ; GCN-LABEL: {{^}}lsh8_or_lsr24: -; GCN: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x6050403 -; GCN: v_perm_b32 v{{[0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[MASK]] +; GCN: v_alignbit_b32 v{{[0-9]+}}, {{[vs][0-9]+}}, {{[vs][0-9]+}}, 24 define amdgpu_kernel void @lsh8_or_lsr24(i32 addrspace(1)* nocapture %arg, i32 %arg1) { bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll --- a/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll +++ b/llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll @@ -16,9 +16,8 @@ ; SI-NEXT: s_mov_b32 s9, s3 ; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: s_mov_b32 s4, s0 ; SI-NEXT: s_mov_b32 s5, s1 ; SI-NEXT: v_mov_b32_e32 v1, v0 @@ -39,9 +38,8 @@ ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; VI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; VI-NEXT: v_mov_b32_e32 v1, v0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm @@ -65,9 +63,8 @@ ; SI-NEXT: s_mov_b32 s9, s3 ; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: s_mov_b32 s4, s0 ; SI-NEXT: s_mov_b32 s5, s1 ; SI-NEXT: v_mov_b32_e32 v1, v0 @@ -88,9 +85,8 @@ ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0 -; VI-NEXT: v_or_b32_e32 v0, v0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; VI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; VI-NEXT: v_mov_b32_e32 v1, v0 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll --- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll +++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll @@ -83,9 +83,7 @@ ; GCN-LABEL: v_shl_i128_vk: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_lshrrev_b32_e32 v4, 15, v1 -; GCN-NEXT: v_lshlrev_b32_e32 v5, 17, v2 -; GCN-NEXT: v_or_b32_e32 v4, v5, v4 +; GCN-NEXT: v_alignbit_b32 v4, v2, v1, 15 ; GCN-NEXT: v_alignbit_b32 v1, v1, v0, 15 ; GCN-NEXT: v_alignbit_b32 v3, v3, v2, 15 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 17, v0 @@ -113,9 +111,7 @@ ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_ashr_i64 v[4:5], v[2:3], 33 -; GCN-NEXT: v_lshlrev_b32_e32 v0, 31, v2 -; GCN-NEXT: v_lshrrev_b32_e32 v1, 1, v1 -; GCN-NEXT: v_or_b32_e32 v0, v1, v0 +; GCN-NEXT: v_alignbit_b32 v0, v2, v1, 1 ; GCN-NEXT: v_alignbit_b32 v1, v3, v2, 1 ; GCN-NEXT: v_mov_b32_e32 v2, v4 ; GCN-NEXT: v_mov_b32_e32 v3, v5