Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -59,6 +59,8 @@ SDValue getSegmentAperture(unsigned AS, SelectionDAG &DAG) const; SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const; void adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const; @@ -167,6 +169,9 @@ MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override; bool isFMAFasterThanFMulAndFAdd(EVT VT) const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + void ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, + SelectionDAG &DAG) const override; + SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override; void AdjustInstrPostInstrSelection(MachineInstr &MI, Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -189,6 +189,13 @@ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); + // Avoid stack access for these. + // TODO: Generalize to more vector types. + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); + // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, // and output demarshalling setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); @@ -1872,6 +1879,10 @@ case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); + case ISD::INSERT_VECTOR_ELT: + return lowerINSERT_VECTOR_ELT(Op, DAG); + case ISD::EXTRACT_VECTOR_ELT: + return lowerEXTRACT_VECTOR_ELT(Op, DAG); case ISD::TRAP: return lowerTRAP(Op, DAG); case ISD::FP_ROUND: return lowerFP_ROUND(Op, DAG); @@ -1879,6 +1890,25 @@ return SDValue(); } +void SITargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const { + switch (N->getOpcode()) { + case ISD::INSERT_VECTOR_ELT: { + if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) + Results.push_back(Res); + return; + } + case ISD::EXTRACT_VECTOR_ELT: { + if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) + Results.push_back(Res); + return; + } + default: + break; + } +} + /// \brief Helper function for LowerBRCOND static SDNode *findUser(SDValue Value, unsigned Opcode) { @@ -2174,6 +2204,76 @@ return DAG.getUNDEF(ASC->getValueType(0)); } +SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + SDValue Idx = Op.getOperand(2); + if (isa(Idx)) + return SDValue(); + + // Avoid stack access for dynamic indexing. + SDLoc SL(Op); + SDValue Vec = Op.getOperand(0); + SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1)); + + // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec + SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val); + + // Convert vector index to bit-index. + SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, + DAG.getConstant(16, SL, MVT::i32)); + + SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); + + SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32, + DAG.getConstant(0xffff, SL, MVT::i32), + ScaledIdx); + + SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal); + SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32, + DAG.getNOT(SL, BFM, MVT::i32), BCVec); + + SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS); + return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI); +} + +SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + SDLoc SL(Op); + + EVT ResultVT = Op.getValueType(); + SDValue Vec = Op.getOperand(0); + SDValue Idx = Op.getOperand(1); + + if (const ConstantSDNode *CIdx = dyn_cast(Idx)) { + SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); + + if (CIdx->getZExtValue() == 1) { + Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result, + DAG.getConstant(16, SL, MVT::i32)); + } else { + assert(CIdx->getZExtValue() == 0); + } + + if (ResultVT.bitsLT(MVT::i32)) + Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); + return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); + } + + SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32); + + // Convert vector index to bit-index. + SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen); + + SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); + SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx); + + SDValue Result = Elt; + if (ResultVT.bitsLT(MVT::i32)) + Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result); + + return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); +} + bool SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { // We can fold offsets for anything that doesn't require a GOT relocation. Index: test/CodeGen/AMDGPU/amdgpu.private-memory.ll =================================================================== --- test/CodeGen/AMDGPU/amdgpu.private-memory.ll +++ test/CodeGen/AMDGPU/amdgpu.private-memory.ll @@ -227,10 +227,14 @@ ; R600: MOVA_INT -; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} ; encoding: [0x00,0x00,0x68,0xe0 -; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:2 ; encoding: [0x02,0x00,0x68,0xe0 +; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} ; encoding: [0x00,0x00,0x68,0xe0 +; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:2 ; encoding: [0x02,0x00,0x68,0xe0 ; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort. -; SI-PROMOTE: buffer_load_ushort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} +; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} + +; SI-PROMOTE: s_load_dword [[IDX:s[0-9]+]] +; SI-PROMOTE: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16 +; SI-PROMOTE: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[SCALED_IDX]], 16 define void @short_array(i32 addrspace(1)* %out, i32 %index) #0 { entry: %0 = alloca [2 x i16] Index: test/CodeGen/AMDGPU/extract_vector_elt-f16.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/extract_vector_elt-f16.ll @@ -0,0 +1,128 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}extract_vector_elt_v2f16: +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16 +; GCN-DAG: v_mov_b32_e32 [[VELT0:v[0-9]+]], [[VEC]] +; GCN-DAG: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]] +; GCN-DAG: buffer_store_short [[VELT0]] +; GCN-DAG: buffer_store_short [[VELT1]] +define void @extract_vector_elt_v2f16(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr + %p0 = extractelement <2 x half> %vec, i32 0 + %p1 = extractelement <2 x half> %vec, i32 1 + %out1 = getelementptr half, half addrspace(1)* %out, i32 10 + store half %p1, half addrspace(1)* %out, align 2 + store half %p0, half addrspace(1)* %out1, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_sgpr: +; GCN: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_lshl_b32 [[IDX_SCALED:s[0-9]+]], [[IDX]], 16 +; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], [[IDX_SCALED]] +; GCN: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]] +; GCN: buffer_store_short [[VELT1]] +; GCN: ScratchSize: 0 +define void @extract_vector_elt_v2f16_dynamic_sgpr(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr, i32 %idx) #0 { + %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr + %elt = extractelement <2 x half> %vec, i32 %idx + store half %elt, half addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v2f16_dynamic_vgpr: +; GCN-DAG: s_load_dword [[VEC:s[0-9]+]] +; GCN-DAG: {{flat|buffer}}_load_dword [[IDX:v[0-9]+]] +; GCN: v_lshlrev_b32_e32 [[IDX_SCALED:v[0-9]+]], 16, [[IDX]] + +; SI: v_lshr_b32_e32 [[ELT:v[0-9]+]], [[VEC]], [[IDX_SCALED]] +; VI: v_lshrrev_b32_e64 [[ELT:v[0-9]+]], [[IDX_SCALED]], [[VEC]] + + +; SI: buffer_store_short [[ELT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[ELT]] +; GCN: ScratchSize: 0{{$}} +define void @extract_vector_elt_v2f16_dynamic_vgpr(half addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.ext = sext i32 %tid to i64 + %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr + %idx = load i32, i32 addrspace(1)* %gep + %elt = extractelement <2 x half> %vec, i32 %idx + store half %elt, half addrspace(1)* %out.gep, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v3f16: +; GCN: buffer_load_ushort +; GCN: buffer_store_short +; GCN: buffer_store_short +define void @extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo) #0 { + %p0 = extractelement <3 x half> %foo, i32 0 + %p1 = extractelement <3 x half> %foo, i32 2 + %out1 = getelementptr half, half addrspace(1)* %out, i32 1 + store half %p1, half addrspace(1)* %out, align 2 + store half %p0, half addrspace(1)* %out1, align 2 + ret void +} + +; GCN-LABEL: {{^}}extract_vector_elt_v4f16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_store_short +; GCN: buffer_store_short +define void @extract_vector_elt_v4f16(half addrspace(1)* %out, <4 x half> %foo) #0 { + %p0 = extractelement <4 x half> %foo, i32 0 + %p1 = extractelement <4 x half> %foo, i32 2 + %out1 = getelementptr half, half addrspace(1)* %out, i32 10 + store half %p1, half addrspace(1)* %out, align 2 + store half %p0, half addrspace(1)* %out1, align 2 + ret void +} + +; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v3f16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort + +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short + +; GCN: buffer_load_ushort +; GCN: buffer_store_short +define void @dynamic_extract_vector_elt_v3f16(half addrspace(1)* %out, <3 x half> %foo, i32 %idx) #0 { + %p0 = extractelement <3 x half> %foo, i32 %idx + %out1 = getelementptr half, half addrspace(1)* %out, i32 1 + store half %p0, half addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}dynamic_extract_vector_elt_v4f16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort + +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short + +; GCN: buffer_load_ushort +; GCN: buffer_store_short +define void @dynamic_extract_vector_elt_v4f16(half addrspace(1)* %out, <4 x half> %foo, i32 %idx) #0 { + %p0 = extractelement <4 x half> %foo, i32 %idx + %out1 = getelementptr half, half addrspace(1)* %out, i32 1 + store half %p0, half addrspace(1)* %out + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/extract_vector_elt-i16.ll =================================================================== --- test/CodeGen/AMDGPU/extract_vector_elt-i16.ll +++ test/CodeGen/AMDGPU/extract_vector_elt-i16.ll @@ -2,13 +2,16 @@ ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s ; GCN-LABEL: {{^}}extract_vector_elt_v2i16: -; GCN: buffer_load_ushort -; GCN: buffer_load_ushort -; GCN: buffer_store_short -; GCN: buffer_store_short -define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 { - %p0 = extractelement <2 x i16> %foo, i32 0 - %p1 = extractelement <2 x i16> %foo, i32 1 +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], 16 +; GCN-DAG: v_mov_b32_e32 [[VELT0:v[0-9]+]], [[VEC]] +; GCN-DAG: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]] +; GCN-DAG: buffer_store_short [[VELT0]] +; GCN-DAG: buffer_store_short [[VELT1]] +define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %p0 = extractelement <2 x i16> %vec, i32 0 + %p1 = extractelement <2 x i16> %vec, i32 1 %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10 store i16 %p1, i16 addrspace(1)* %out, align 2 store i16 %p0, i16 addrspace(1)* %out1, align 2 @@ -16,10 +19,13 @@ } ; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_sgpr: -; GCN: s_load_dword [[VEC:s[0-9]+]] ; GCN: s_load_dword [[IDX:s[0-9]+]] -; GCN: s_lshr_b32 s{{[0-9]+}}, [[IDX]], 16 -; GCN: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]] +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN: s_lshl_b32 [[IDX_SCALED:s[0-9]+]], [[IDX]], 16 +; GCN: s_lshr_b32 [[ELT1:s[0-9]+]], [[VEC]], [[IDX_SCALED]] +; GCN: v_mov_b32_e32 [[VELT1:v[0-9]+]], [[ELT1]] +; GCN: buffer_store_short [[VELT1]] +; GCN: ScratchSize: 0 define void @extract_vector_elt_v2i16_dynamic_sgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %idx) #0 { %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr %elt = extractelement <2 x i16> %vec, i32 %idx @@ -28,16 +34,23 @@ } ; GCN-LABEL: {{^}}extract_vector_elt_v2i16_dynamic_vgpr: -; GCN: {{buffer|flat}}_load_dword [[IDX:v[0-9]+]] -; GCN: buffer_load_dword [[VEC:v[0-9]+]] -; GCN: v_lshrrev_b32_e32 [[ELT:v[0-9]+]], 16, [[VEC]] -define void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(1)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 { +; GCN-DAG: s_load_dword [[VEC:s[0-9]+]] +; GCN-DAG: {{flat|buffer}}_load_dword [[IDX:v[0-9]+]] +; GCN: v_lshlrev_b32_e32 [[IDX_SCALED:v[0-9]+]], 16, [[IDX]] + +; SI: v_lshr_b32_e32 [[ELT:v[0-9]+]], [[VEC]], [[IDX_SCALED]] +; VI: v_lshrrev_b32_e64 [[ELT:v[0-9]+]], [[IDX_SCALED]], [[VEC]] + +; SI: buffer_store_short [[ELT]] +; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[ELT]] +; GCN: ScratchSize: 0{{$}} +define void @extract_vector_elt_v2i16_dynamic_vgpr(i16 addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 addrspace(1)* %idx.ptr) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 %gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext %out.gep = getelementptr inbounds i16, i16 addrspace(1)* %out, i64 %tid.ext %idx = load volatile i32, i32 addrspace(1)* %gep - %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vec.ptr + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr %elt = extractelement <2 x i16> %vec, i32 %idx store i16 %elt, i16 addrspace(1)* %out.gep, align 2 ret void Index: test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll @@ -0,0 +1,350 @@ +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=fiji -mattr=+flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=CIVI -check-prefix=VI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=hawaii -mattr=+flat-for-global < %s | FileCheck -check-prefix=GCN -check-prefix=CIVI -check-prefix=CI %s + +; GCN-LABEL: {{^}}s_insertelement_v2i16_0: +; GCN: s_load_dword [[VEC:s[0-9]+]] + +; CIVI: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}} +; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT1]], 0x3e7{{$}} +define void @s_insertelement_v2i16_0(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x i16> %vec, i16 999, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reg: +; GCN: s_load_dword [[ELT0:s[0-9]+]] +; GCN: s_load_dword [[VEC:s[0-9]+]] + +; CIVI-DAG: s_and_b32 [[ELT0]], [[ELT0]], 0xffff{{$}} +; CIVI-DAG: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}} +; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]] +define void @s_insertelement_v2i16_0_reg(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i16 %elt) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2i16_0_reghi: +; GCN: s_load_dword [[ELT0:s[0-9]+]] +; GCN: s_load_dword [[VEC:s[0-9]+]] + +; CIVI-DAG: s_and_b32 [[ELT1:s[0-9]+]], [[VEC]], 0xffff0000{{$}} +; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]] +define void @s_insertelement_v2i16_0_reghi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 %elt.arg) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %elt.hi = lshr i32 %elt.arg, 16 + %elt = trunc i32 %elt.hi to i16 + %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2i16_1: +; GCN: s_load_dword [[VEC:s[0-9]+]] + +; GCN-NOT: s_lshr +; GCN: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}} +; GCN: s_or_b32 [[INS:s[0-9]+]], [[ELT0]], 0x3e70000 +define void @s_insertelement_v2i16_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x i16> %vec, i16 999, i32 1 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2i16_1_reg: +; GCN: s_load_dword [[ELT1:s[0-9]+]] +; GCN: s_load_dword [[VEC:s[0-9]+]] + +; CIVI: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}} +; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT0]], [[ELT1]] + +; GCN-NOT: shlr +define void @s_insertelement_v2i16_1_reg(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i16 %elt) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 1 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2f16_0: +; GCN: s_load_dword [[VEC:s[0-9]+]] +; CIVI: s_and_b32 [[ELT1:s[0-9]+]], [[VEC:s[0-9]+]], 0xffff0000 +; CIVI: s_or_b32 s{{[0-9]+}}, [[ELT1]], 0x4500 +define void @s_insertelement_v2f16_0(<2 x half> addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 0 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}s_insertelement_v2f16_1: +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN-NOT: s_lshr +; GCN: s_and_b32 [[ELT0:s[0-9]+]], [[VEC]], 0xffff{{$}} +; GCN: s_or_b32 [[INS:s[0-9]+]], [[ELT0]], 0x45000000 +define void @s_insertelement_v2f16_1(<2 x half> addrspace(1)* %out, <2 x half> addrspace(2)* %vec.ptr) #0 { + %vec = load <2 x half>, <2 x half> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 1 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_0: +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x3e7, [[ELT1]] +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2i16_0(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 999, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_0_reghi: +; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]] +; GCN-DAG: s_load_dword [[ELT0:s[0-9]+]] + +; CIVI-DAG: s_lshr_b32 [[ELT0_SHIFT:s[0-9]+]], [[ELT0]], 16 +; CIVI-DAG: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], [[ELT0_SHIFT]], [[ELT1]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2i16_0_reghi(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 %elt.arg) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %elt.hi = lshr i32 %elt.arg, 16 + %elt = trunc i32 %elt.hi to i16 + %vecins = insertelement <2 x i16> %vec, i16 %elt, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_0_inlineimm: +; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]] + +; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 53, [[ELT1]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2i16_0_inlineimm(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 53, i32 0 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; FIXME: fold lshl_or c0, c1, v0 -> or (c0 << c1), v0 + +; GCN-LABEL: {{^}}v_insertelement_v2i16_1: +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x3e70000, [[VEC]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2i16_1(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 999, i32 1 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_1_inlineimm: +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; GCN: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0xfff10000, [[ELT0]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2i16_1_inlineimm(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 -15, i32 1 + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2f16_0: +; GCN: flat_load_dword [[VEC:v[0-9]+]] + +; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x4500, [[ELT1]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2f16_0(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep + %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 0 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2f16_0_inlineimm: +; GCN: flat_load_dword [[VEC:v[0-9]+]] + +; CIVI: v_and_b32_e32 [[ELT1:v[0-9]+]], 0xffff0000, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 53, [[ELT1]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2f16_0_inlineimm(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep + %vecins = insertelement <2 x half> %vec, half 0xH0035, i32 0 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2f16_1: +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x45000000, [[VEC]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2f16_1(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep + %vecins = insertelement <2 x half> %vec, half 5.000000e+00, i32 1 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2f16_1_inlineimm: +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; GCN: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VEC]] +; CIVI: v_or_b32_e32 [[RES:v[0-9]+]], 0x230000, [[ELT0]] + +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RES]] +define void @v_insertelement_v2f16_1_inlineimm(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep + %vecins = insertelement <2 x half> %vec, half 0xH0023, i32 1 + store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep + ret void +} + +; FIXME: Enable for others when argument load not split +; GCN-LABEL: {{^}}s_insertelement_v2i16_dynamic: +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7 +; GCN: s_load_dword [[IDX:s[0-9]+]] +; GCN: s_load_dword [[VEC:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[VVEC:v[0-9]+]], [[VEC]] +; GCN-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16 +; GCN-DAG: s_lshl_b32 [[MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]] +; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VVEC]] +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @s_insertelement_v2i16_dynamic(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(2)* %vec.ptr, i32 addrspace(2)* %idx.ptr) #0 { + %idx = load volatile i32, i32 addrspace(2)* %idx.ptr + %vec = load <2 x i16>, <2 x i16> addrspace(2)* %vec.ptr + %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_dynamic_sgpr: +; GCN-DAG: flat_load_dword [[VEC:v[0-9]+]] +; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7 +; GCN-DAG: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16 +; GCN-DAG: s_lshl_b32 [[MASK:s[0-9]+]], 0xffff, [[SCALED_IDX]] +; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]] +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_insertelement_v2i16_dynamic_sgpr(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 %idx) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2i16_dynamic_vgpr: +; GCN: flat_load_dword [[IDX:v[0-9]+]] +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7 + +; VI-DAG: s_mov_b32 [[MASKK:s[0-9]+]], 0xffff{{$}} +; VI-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]] +; VI: v_lshlrev_b32_e64 [[MASK:v[0-9]+]], [[SCALED_IDX]], [[MASKK]] + +; CI: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]] +; CI: v_lshl_b32_e32 [[MASK:v[0-9]+]], 0xffff, [[SCALED_IDX]] + +; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]] +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_insertelement_v2i16_dynamic_vgpr(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, i32 addrspace(1)* %idx.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext + %idx.gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext + %idx = load i32, i32 addrspace(1)* %idx.gep + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep + %vecins = insertelement <2 x i16> %vec, i16 999, i32 %idx + store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_insertelement_v2f16_dynamic_vgpr: +; GCN: flat_load_dword [[IDX:v[0-9]+]] +; GCN: flat_load_dword [[VEC:v[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1234 + +; VI-DAG: s_mov_b32 [[MASKK:s[0-9]+]], 0xffff{{$}} +; VI-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]] +; VI: v_lshlrev_b32_e64 [[MASK:v[0-9]+]], [[SCALED_IDX]], [[MASKK]] + +; CI: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 16, [[IDX]] +; CI: v_lshl_b32_e32 [[MASK:v[0-9]+]], 0xffff, [[SCALED_IDX]] + +; GCN: v_bfi_b32 [[RESULT:v[0-9]+]], [[MASK]], [[K]], [[VEC]] +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]] +define void @v_insertelement_v2f16_dynamic_vgpr(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in, i32 addrspace(1)* %idx.ptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tid.ext = sext i32 %tid to i64 + %in.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i64 %tid.ext + %idx.gep = getelementptr inbounds i32, i32 addrspace(1)* %idx.ptr, i64 %tid.ext + %out.gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %out, i64 %tid.ext + %idx = load i32, i32 addrspace(1)* %idx.gep + %vec = load <2 x half>, <2 x half> addrspace(1)* %in.gep + %vecins = insertelement <2 x half> %vec, half 0xH1234, i32 %idx + store <2 x half> %vecins, <2 x half> addrspace(1)* %out.gep + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone }