diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -546,8 +546,8 @@ setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); - setOperationAction(ISD::ROTR, MVT::i16, Promote); - setOperationAction(ISD::ROTL, MVT::i16, Promote); + setOperationAction(ISD::ROTR, MVT::i16, Expand); + setOperationAction(ISD::ROTL, MVT::i16, Expand); setOperationAction(ISD::SDIV, MVT::i16, Promote); setOperationAction(ISD::UDIV, MVT::i16, Promote); diff --git a/llvm/test/CodeGen/AMDGPU/rotl.ll b/llvm/test/CodeGen/AMDGPU/rotl.ll --- a/llvm/test/CodeGen/AMDGPU/rotl.ll +++ b/llvm/test/CodeGen/AMDGPU/rotl.ll @@ -55,3 +55,28 @@ store <4 x i32> %3, <4 x i32> addrspace(1)* %in ret void } + +; GCN-LABEL: @test_rotl_i16 +; GCN: global_load_ushort [[X:v[0-9]+]] +; GCN: global_load_ushort [[D:v[0-9]+]] +; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]] +; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]] +; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]] +; GCN: v_lshlrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]] +; GCN: v_lshrrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]] +; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]] +; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]] + +declare i16 @llvm.fshl.i16(i16, i16, i16) + +define void @test_rotl_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) { +entry: + %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16 + %a = load i16, i16 addrspace(1)* %arrayidx + %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24 + %b = load i16, i16 addrspace(1)* %arrayidx2 + %c = tail call i16 @llvm.fshl.i16(i16 %a, i16 %a, i16 %b) + %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4 + store i16 %c, i16 addrspace(1)* %arrayidx5 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/rotr.ll b/llvm/test/CodeGen/AMDGPU/rotr.ll --- a/llvm/test/CodeGen/AMDGPU/rotr.ll +++ b/llvm/test/CodeGen/AMDGPU/rotr.ll @@ -51,3 +51,28 @@ store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in ret void } + +; GCN-LABEL: @test_rotr_i16 +; GCN: global_load_ushort [[X:v[0-9]+]] +; GCN: global_load_ushort [[D:v[0-9]+]] +; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]] +; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]] +; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]] +; GCN: v_lshrrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]] +; GCN: v_lshlrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]] +; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]] +; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]] + +declare i16 @llvm.fshr.i16(i16, i16, i16) + +define void @test_rotr_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) { +entry: + %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16 + %a = load i16, i16 addrspace(1)* %arrayidx + %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24 + %b = load i16, i16 addrspace(1)* %arrayidx2 + %c = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 %b) + %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4 + store i16 %c, i16 addrspace(1)* %arrayidx5 + ret void +}