Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -488,7 +488,6 @@ STORE_MSKOR, LOAD_CONSTANT, TBUFFER_STORE_FORMAT, - TBUFFER_STORE_FORMAT_X3, TBUFFER_STORE_FORMAT_D16, TBUFFER_LOAD_FORMAT, TBUFFER_LOAD_FORMAT_D16, Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -4302,7 +4302,6 @@ NODE_NAME_CASE(STORE_MSKOR) NODE_NAME_CASE(LOAD_CONSTANT) NODE_NAME_CASE(TBUFFER_STORE_FORMAT) - NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3) NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) Index: lib/Target/AMDGPU/BUFInstructions.td =================================================================== --- lib/Target/AMDGPU/BUFInstructions.td +++ lib/Target/AMDGPU/BUFInstructions.td @@ -1013,11 +1013,11 @@ defm TBUFFER_LOAD_FORMAT_X : MTBUF_Pseudo_Loads <"tbuffer_load_format_x", VGPR_32>; defm TBUFFER_LOAD_FORMAT_XY : MTBUF_Pseudo_Loads <"tbuffer_load_format_xy", VReg_64>; -defm TBUFFER_LOAD_FORMAT_XYZ : MTBUF_Pseudo_Loads <"tbuffer_load_format_xyz", VReg_128>; +defm TBUFFER_LOAD_FORMAT_XYZ : MTBUF_Pseudo_Loads <"tbuffer_load_format_xyz", VReg_96>; defm TBUFFER_LOAD_FORMAT_XYZW : MTBUF_Pseudo_Loads <"tbuffer_load_format_xyzw", VReg_128>; defm TBUFFER_STORE_FORMAT_X : MTBUF_Pseudo_Stores <"tbuffer_store_format_x", VGPR_32>; defm TBUFFER_STORE_FORMAT_XY : MTBUF_Pseudo_Stores <"tbuffer_store_format_xy", VReg_64>; -defm TBUFFER_STORE_FORMAT_XYZ : MTBUF_Pseudo_Stores <"tbuffer_store_format_xyz", VReg_128>; +defm TBUFFER_STORE_FORMAT_XYZ : MTBUF_Pseudo_Stores <"tbuffer_store_format_xyz", VReg_96>; defm TBUFFER_STORE_FORMAT_XYZW : MTBUF_Pseudo_Stores <"tbuffer_store_format_xyzw", VReg_128>; let SubtargetPredicate = HasUnpackedD16VMem, D16Buf = 1 in { @@ -1106,6 +1106,8 @@ defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; +defm : MUBUF_LoadIntrinsicPat; +defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; @@ -1129,6 +1131,8 @@ defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; +defm : MUBUF_LoadIntrinsicPat; +defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; defm : MUBUF_LoadIntrinsicPat; @@ -1172,6 +1176,8 @@ defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; +defm : MUBUF_StoreIntrinsicPat; +defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; @@ -1195,6 +1201,8 @@ defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; +defm : MUBUF_StoreIntrinsicPat; +defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; defm : MUBUF_StoreIntrinsicPat; @@ -1562,9 +1570,11 @@ defm : MTBUF_LoadIntrinsicPat; defm : MTBUF_LoadIntrinsicPat; +defm : MTBUF_LoadIntrinsicPat; defm : MTBUF_LoadIntrinsicPat; defm : MTBUF_LoadIntrinsicPat; defm : MTBUF_LoadIntrinsicPat; +defm : MTBUF_LoadIntrinsicPat; defm : MTBUF_LoadIntrinsicPat; let SubtargetPredicate = HasUnpackedD16VMem in { @@ -1618,11 +1628,11 @@ defm : MTBUF_StoreIntrinsicPat; defm : MTBUF_StoreIntrinsicPat; -defm : MTBUF_StoreIntrinsicPat; +defm : MTBUF_StoreIntrinsicPat; defm : MTBUF_StoreIntrinsicPat; defm : MTBUF_StoreIntrinsicPat; defm : MTBUF_StoreIntrinsicPat; -defm : MTBUF_StoreIntrinsicPat; +defm : MTBUF_StoreIntrinsicPat; defm : MTBUF_StoreIntrinsicPat; let SubtargetPredicate = HasUnpackedD16VMem in { Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -94,6 +94,12 @@ SelectionDAG &DAG, ArrayRef Ops, bool IsIntrinsic = false) const; + // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to + // dwordx4 if on SI. + SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, + ArrayRef Ops, EVT MemVT, + MachineMemOperand *MMO, SelectionDAG &DAG) const; + SDValue handleD16VData(SDValue VData, SelectionDAG &DAG) const; /// Converts \p Op, which must be of floating point type, to the Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -5663,8 +5663,8 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, - M->getMemOperand()); + return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, + M->getMemOperand(), DAG); } case Intrinsic::amdgcn_raw_buffer_load: case Intrinsic::amdgcn_raw_buffer_load_format: { @@ -5691,8 +5691,8 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, - M->getMemOperand()); + return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, + M->getMemOperand(), DAG); } case Intrinsic::amdgcn_struct_buffer_load: case Intrinsic::amdgcn_struct_buffer_load_format: { @@ -5719,8 +5719,8 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, - M->getMemOperand()); + return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, + M->getMemOperand(), DAG); } case Intrinsic::amdgcn_tbuffer_load: { MemSDNode *M = cast(Op); @@ -5748,9 +5748,9 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, - Op->getVTList(), Ops, LoadVT, - M->getMemOperand()); + return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, + Op->getVTList(), Ops, LoadVT, M->getMemOperand(), + DAG); } case Intrinsic::amdgcn_raw_tbuffer_load: { MemSDNode *M = cast(Op); @@ -5772,9 +5772,9 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, - Op->getVTList(), Ops, LoadVT, - M->getMemOperand()); + return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, + Op->getVTList(), Ops, LoadVT, M->getMemOperand(), + DAG); } case Intrinsic::amdgcn_struct_tbuffer_load: { MemSDNode *M = cast(Op); @@ -5796,9 +5796,9 @@ if (LoadVT.getScalarType() == MVT::f16) return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG, Ops); - return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, - Op->getVTList(), Ops, LoadVT, - M->getMemOperand()); + return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, + Op->getVTList(), Ops, LoadVT, M->getMemOperand(), + DAG); } case Intrinsic::amdgcn_buffer_atomic_swap: case Intrinsic::amdgcn_buffer_atomic_add: @@ -6073,6 +6073,39 @@ } } +// Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to +// dwordx4 if on SI. +SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, + SDVTList VTList, + ArrayRef Ops, EVT MemVT, + MachineMemOperand *MMO, + SelectionDAG &DAG) const { + EVT VT = VTList.VTs[0]; + EVT WidenedVT = VT; + EVT WidenedMemVT = MemVT; + if (!Subtarget->hasDwordx3LoadStores() && + (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { + WidenedVT = EVT::getVectorVT(*DAG.getContext(), + WidenedVT.getVectorElementType(), 4); + WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), + WidenedMemVT.getVectorElementType(), 4); + MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); + } + + assert(VTList.NumVTs == 2); + SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); + + auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, + WidenedMemVT, MMO); + if (WidenedVT != VT) { + auto Extract = DAG.getNode( + ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, + DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); + NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); + } + return NewOp; +} + SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG) const { EVT StoreVT = VData.getValueType(); Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -103,9 +103,6 @@ def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; -def SItbuffer_store_x3 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_X3", - SDTtbuffer_store, - [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16", SDTtbuffer_store, [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>; Index: test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.dwordx3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.dwordx3.ll @@ -0,0 +1,60 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx600 -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,SI +;RUN: llc < %s -march=amdgcn -mcpu=gfx700 -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,GCNX3 + +;CHECK-LABEL: {{^}}buffer_load_format_immoffs_x3: +;SI: buffer_load_format_xyzw v[0:3], off, s[0:3], 0 offset:42 +;GCNX3: buffer_load_format_xyz v[0:2], off, s[0:3], 0 offset:42 +;CHECK: s_waitcnt +define amdgpu_ps <3 x float> @buffer_load_format_immoffs_x3(<4 x i32> inreg) { +main_body: + %data = call <3 x float> @llvm.amdgcn.buffer.load.format.v3f32(<4 x i32> %0, i32 0, i32 42, i1 0, i1 0) + ret <3 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_load_immoffs_x3: +;SI: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:40 +;GCNX3: buffer_load_dwordx3 v[0:2], off, s[0:3], 0 offset:40 +;CHECK: s_waitcnt +define amdgpu_ps <3 x float> @buffer_load_immoffs_x3(<4 x i32> inreg) { +main_body: + %data = call <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32> %0, i32 0, i32 40, i1 0, i1 0) + ret <3 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_raw_load_immoffs_x3: +;SI: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:40 +;GCNX3: buffer_load_dwordx3 v[0:2], off, s[0:3], 0 offset:40 +;CHECK: s_waitcnt +define amdgpu_ps <3 x float> @buffer_raw_load_immoffs_x3(<4 x i32> inreg) { +main_body: + %data = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %0, i32 40, i32 0, i32 0) + ret <3 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_struct_load_format_immoffs_x3: +;SI: buffer_load_format_xyzw v[0:3], {{v[0-9]+}}, s[0:3], 0 idxen offset:42 +;GCNX3: buffer_load_format_xyz v[0:2], {{v[0-9]+}}, s[0:3], 0 idxen offset:42 +;CHECK: s_waitcnt +define amdgpu_ps <3 x float> @buffer_struct_load_format_immoffs_x3(<4 x i32> inreg) { +main_body: + %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32> %0, i32 0, i32 42, i32 0, i32 0) + ret <3 x float> %data +} + +;CHECK-LABEL: {{^}}struct_buffer_load_immoffs_x3: +;SI: buffer_load_dwordx4 v[0:3], {{v[0-9]+}}, s[0:3], 0 idxen offset:40 +;GCNX3: buffer_load_dwordx3 v[0:2], {{v[0-9]+}}, s[0:3], 0 idxen offset:40 +;CHECK: s_waitcnt +define amdgpu_ps <3 x float> @struct_buffer_load_immoffs_x3(<4 x i32> inreg) { +main_body: + %data = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %0, i32 0, i32 40, i32 0, i32 0) + ret <3 x float> %data +} + +declare <3 x float> @llvm.amdgcn.buffer.load.format.v3f32(<4 x i32>, i32, i32, i1, i1) #0 +declare <3 x float> @llvm.amdgcn.buffer.load.v3f32(<4 x i32>, i32, i32, i1, i1) #0 +declare <3 x float> @llvm.amdgcn.raw.buffer.load.format.v3f32(<4 x i32>, i32, i32, i32) #0 +declare <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32>, i32, i32, i32) #0 +declare <3 x float> @llvm.amdgcn.struct.buffer.load.format.v3f32(<4 x i32>, i32, i32, i32, i32) #0 +declare <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32>, i32, i32, i32, i32) #0 + Index: test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.dwordx3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.dwordx3.ll @@ -0,0 +1,53 @@ +;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s -check-prefix=CHECK + +;CHECK-LABEL: {{^}}buffer_store_format_immoffs_x3: +;CHECK-NOT: s_waitcnt +;CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], 0 offset:42 +define amdgpu_ps void @buffer_store_format_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + call void @llvm.amdgcn.buffer.store.format.v3f32(<3 x float> %1, <4 x i32> %0, i32 0, i32 42, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_immoffs_x3: +;CHECK-NOT: s_waitcnt +;CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], 0 offset:42 +define amdgpu_ps void @buffer_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + call void @llvm.amdgcn.buffer.store.v3f32(<3 x float> %1, <4 x i32> %0, i32 0, i32 42, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}raw_buffer_store_format_immoffs_x3: +;CHECK-NOT: s_waitcnt +;CHECK: buffer_store_format_xyz v[0:2], off, s[0:3], 0 offset:42 +define amdgpu_ps void @raw_buffer_store_format_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + call void @llvm.amdgcn.raw.buffer.store.format.v3f32(<3 x float> %1, <4 x i32> %0, i32 42, i32 0, i32 0) + ret void +} + +;CHECK-LABEL: {{^}}raw_buffer_store_immoffs_x3: +;CHECK-NOT: s_waitcnt +;CHECK: buffer_store_dwordx3 v[0:2], off, s[0:3], 0 offset:42 +define amdgpu_ps void @raw_buffer_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + call void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float> %1, <4 x i32> %0, i32 42, i32 0, i32 0) + ret void +} + +;CHECK-LABEL: {{^}}struct_buffer_store_immoffs_x3: +;CHECK-NOT: s_waitcnt +;CHECK: buffer_store_dwordx3 v[0:2], {{v[0-9]+}}, s[0:3], 0 idxen offset:42 +define amdgpu_ps void @struct_buffer_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 0) + ret void +} + +declare void @llvm.amdgcn.buffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i1, i1) #0 +declare void @llvm.amdgcn.buffer.store.format.v3f32(<3 x float>, <4 x i32>, i32, i32, i1, i1) #0 +declare void @llvm.amdgcn.raw.buffer.store.format.v3f32(<3 x float>, <4 x i32>, i32, i32, i32) #0 +declare void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i32) #0 +declare void @llvm.amdgcn.struct.buffer.store.format.v3f32(<3 x float>, <4 x i32>, i32, i32, i32, i32) #0 +declare void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i32, i32) #0 Index: test/CodeGen/AMDGPU/llvm.amdgcn.tbuffer.load.dwordx3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.tbuffer.load.dwordx3.ll @@ -0,0 +1,40 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx600 -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,SI +;RUN: llc < %s -march=amdgcn -mcpu=gfx700 -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,GCNX3 + +; GCN-LABEL: {{^}}tbuffer_raw_load_immoffs_x3: +; SI: tbuffer_load_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offset:42 +; GCNX3: tbuffer_load_format_xyz {{v\[[0-9]+:[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offset:42 +define amdgpu_vs <3 x float> @tbuffer_raw_load_immoffs_x3(<4 x i32> inreg) { +main_body: + %vdata = call <3 x i32> @llvm.amdgcn.raw.tbuffer.load.v3i32(<4 x i32> %0, i32 42, i32 0, i32 78, i32 0) + %vdata.f = bitcast <3 x i32> %vdata to <3 x float> + ret <3 x float> %vdata.f +} + + +; GCN-LABEL: {{^}}tbuffer_struct_load_immoffs_x3: +; GCN: v_mov_b32_e32 [[ZEROREG:v[0-9]+]], 0 +; SI: tbuffer_load_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, [[ZEROREG]], {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 idxen offset:42 +; GCNX3: tbuffer_load_format_xyz {{v\[[0-9]+:[0-9]+\]}}, [[ZEROREG]], {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 idxen offset:42 +define amdgpu_vs <3 x float> @tbuffer_struct_load_immoffs_x3(<4 x i32> inreg) { +main_body: + %vdata = call <3 x i32> @llvm.amdgcn.struct.tbuffer.load.v3i32(<4 x i32> %0, i32 0, i32 42, i32 0, i32 78, i32 0) + %vdata.f = bitcast <3 x i32> %vdata to <3 x float> + ret <3 x float> %vdata.f +} + + +; GCN-LABEL: {{^}}tbuffer_load_format_immoffs_x3: +; SI: tbuffer_load_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offset:42 +; GCNX3: tbuffer_load_format_xyz {{v\[[0-9]+:[0-9]+\]}}, off, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offset:42 +define amdgpu_vs <3 x float> @tbuffer_load_format_immoffs_x3(<4 x i32> inreg) { +main_body: + %vdata = call <3 x i32> @llvm.amdgcn.tbuffer.load.v3i32(<4 x i32> %0, i32 0, i32 0, i32 0, i32 42, i32 14, i32 4, i1 0, i1 0) + %vdata.f = bitcast <3 x i32> %vdata to <3 x float> + ret <3 x float> %vdata.f +} + +declare <3 x i32> @llvm.amdgcn.raw.tbuffer.load.v3i32(<4 x i32>, i32, i32, i32, i32) +declare <3 x i32> @llvm.amdgcn.struct.tbuffer.load.v3i32(<4 x i32>, i32, i32, i32, i32, i32) +declare <3 x i32> @llvm.amdgcn.tbuffer.load.v3i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1) + Index: test/CodeGen/AMDGPU/llvm.amdgcn.tbuffer.store.dwordx3.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.tbuffer.store.dwordx3.ll @@ -0,0 +1,35 @@ +;RUN: llc < %s -march=amdgcn -mcpu=gfx700 -verify-machineinstrs | FileCheck %s -check-prefixes=GCN + +; GCN-LABEL: {{^}}tbuffer_raw_store_immoffs_x3: +; GCN: tbuffer_store_format_xyz v[0:2], off, s[0:3], dfmt:5, nfmt:7, 0 offset:42 +define amdgpu_ps void @tbuffer_raw_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + %in1 = bitcast <3 x float> %1 to <3 x i32> + call void @llvm.amdgcn.raw.tbuffer.store.v3i32(<3 x i32> %in1, <4 x i32> %0, i32 42, i32 0, i32 117, i32 0) + ret void +} + + +; GCN-LABEL: {{^}}tbuffer_struct_store_immoffs_x3: +; GCN: v_mov_b32_e32 [[ZEROREG:v[0-9]+]], 0 +; GCN: tbuffer_store_format_xyz v[0:2], [[ZEROREG]], s[0:3], dfmt:5, nfmt:7, 0 idxen offset:42 +define amdgpu_ps void @tbuffer_struct_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + %in1 = bitcast <3 x float> %1 to <3 x i32> + call void @llvm.amdgcn.struct.tbuffer.store.v3i32(<3 x i32> %in1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 117, i32 0) + ret void +} + +; GCN-LABEL: {{^}}tbuffer_store_immoffs_x3: +; GCN: tbuffer_store_format_xyz v[0:2], off, s[0:3], dfmt:5, nfmt:7, 0 offset:42 +define amdgpu_ps void @tbuffer_store_immoffs_x3(<4 x i32> inreg, <3 x float>) { +main_body: + %in1 = bitcast <3 x float> %1 to <3 x i32> + call void @llvm.amdgcn.tbuffer.store.v3i32(<3 x i32> %in1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 42, i32 5, i32 7, i1 0, i1 0) + ret void +} + +declare void @llvm.amdgcn.raw.tbuffer.store.v3i32(<3 x i32>, <4 x i32>, i32, i32, i32, i32) #0 +declare void @llvm.amdgcn.struct.tbuffer.store.v3i32(<3 x i32>, <4 x i32>, i32, i32, i32, i32, i32) #0 +declare void @llvm.amdgcn.tbuffer.store.v3i32(<3 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1) #0 +