diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5659,11 +5659,16 @@ SDValue Offset, SDValue GLC, SDValue DLC, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); + + const DataLayout &DataLayout = DAG.getDataLayout(); + unsigned Align = + DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); + MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant, - VT.getStoreSize(), VT.getStoreSize()); + VT.getStoreSize(), Align); if (!Offset->isDivergent()) { SDValue Ops[] = { @@ -5672,6 +5677,20 @@ GLC, DLC, }; + + // Widen vec3 load to vec4. + if (VT.isVector() && VT.getVectorNumElements() == 3) { + EVT WidenedVT = + EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); + auto WidenedOp = DAG.getMemIntrinsicNode( + AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, + MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); + auto Subvector = DAG.getNode( + ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, + DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout()))); + return Subvector; + } + return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(VT), Ops, VT, MMO); } @@ -5683,11 +5702,10 @@ MVT LoadVT = VT.getSimpleVT(); unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; assert((LoadVT.getScalarType() == MVT::i32 || - LoadVT.getScalarType() == MVT::f32) && - isPowerOf2_32(NumElts)); + LoadVT.getScalarType() == MVT::f32)); if (NumElts == 8 || NumElts == 16) { - NumLoads = NumElts == 16 ? 4 : 2; + NumLoads = NumElts / 4; LoadVT = MVT::v4i32; } @@ -5711,8 +5729,8 @@ uint64_t InstOffset = cast(Ops[5])->getZExtValue(); for (unsigned i = 0; i < NumLoads; ++i) { Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); - Loads.push_back(DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, - Ops, LoadVT, MMO)); + Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, + LoadVT, MMO, DAG)); } if (VT == MVT::v8i32 || VT == MVT::v16i32) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.buffer.load.ll @@ -1,8 +1,10 @@ -;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s +;RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,SI +;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,VI -;CHECK-LABEL: {{^}}s_buffer_load_imm: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x4 +;GCN-LABEL: {{^}}s_buffer_load_imm: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x1 +;VI: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x4 define amdgpu_ps void @s_buffer_load_imm(<4 x i32> inreg %desc) { main_body: %load = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 4, i32 0) @@ -11,9 +13,9 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_load_index: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} +;GCN-LABEL: {{^}}s_buffer_load_index: +;GCN-NOT: s_waitcnt; +;GCN: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} define amdgpu_ps void @s_buffer_load_index(<4 x i32> inreg %desc, i32 inreg %index) { main_body: %load = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 %index, i32 0) @@ -22,9 +24,21 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_loadx2_imm: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x40 +;GCN-LABEL: {{^}}s_buffer_load_index_divergent: +;GCN-NOT: s_waitcnt; +;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +define amdgpu_ps void @s_buffer_load_index_divergent(<4 x i32> inreg %desc, i32 %index) { +main_body: + %load = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 %index, i32 0) + %bitcast = bitcast i32 %load to float + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %bitcast, float undef, float undef, float undef, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_loadx2_imm: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x10 +;VI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x40 define amdgpu_ps void @s_buffer_loadx2_imm(<4 x i32> inreg %desc) { main_body: %load = call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 64, i32 0) @@ -35,9 +49,9 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_loadx2_index: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} +;GCN-LABEL: {{^}}s_buffer_loadx2_index: +;GCN-NOT: s_waitcnt; +;GCN: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} define amdgpu_ps void @s_buffer_loadx2_index(<4 x i32> inreg %desc, i32 inreg %index) { main_body: %load = call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 %index, i32 0) @@ -48,9 +62,67 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_loadx4_imm: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0xc8 +;GCN-LABEL: {{^}}s_buffer_loadx2_index_divergent: +;GCN-NOT: s_waitcnt; +;GCN: buffer_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +define amdgpu_ps void @s_buffer_loadx2_index_divergent(<4 x i32> inreg %desc, i32 %index) { +main_body: + %load = call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 %index, i32 0) + %bitcast = bitcast <2 x i32> %load to <2 x float> + %x = extractelement <2 x float> %bitcast, i32 0 + %y = extractelement <2 x float> %bitcast, i32 1 + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %x, float %y, float undef, float undef, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_loadx3_imm: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x10 +;VI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x40 +define amdgpu_ps void @s_buffer_loadx3_imm(<4 x i32> inreg %desc) { +main_body: + %load = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> %desc, i32 64, i32 0) + %bitcast = bitcast <3 x i32> %load to <3 x float> + %x = extractelement <3 x float> %bitcast, i32 0 + %y = extractelement <3 x float> %bitcast, i32 1 + %z = extractelement <3 x float> %bitcast, i32 2 + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %x, float %y, float %z, float undef, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_loadx3_index: +;GCN-NOT: s_waitcnt; +;GCN: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} +define amdgpu_ps void @s_buffer_loadx3_index(<4 x i32> inreg %desc, i32 inreg %index) { +main_body: + %load = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> %desc, i32 %index, i32 0) + %bitcast = bitcast <3 x i32> %load to <3 x float> + %x = extractelement <3 x float> %bitcast, i32 0 + %y = extractelement <3 x float> %bitcast, i32 1 + %z = extractelement <3 x float> %bitcast, i32 2 + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %x, float %y, float %z, float undef, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_loadx3_index_divergent: +;GCN-NOT: s_waitcnt; +;SI: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +;VI: buffer_load_dwordx3 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +define amdgpu_ps void @s_buffer_loadx3_index_divergent(<4 x i32> inreg %desc, i32 %index) { +main_body: + %load = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> %desc, i32 %index, i32 0) + %bitcast = bitcast <3 x i32> %load to <3 x float> + %x = extractelement <3 x float> %bitcast, i32 0 + %y = extractelement <3 x float> %bitcast, i32 1 + %z = extractelement <3 x float> %bitcast, i32 2 + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %x, float %y, float %z, float undef, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_loadx4_imm: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x32 +;VI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0xc8 define amdgpu_ps void @s_buffer_loadx4_imm(<4 x i32> inreg %desc) { main_body: %load = call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 200, i32 0) @@ -63,9 +135,9 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_loadx4_index: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} +;GCN-LABEL: {{^}}s_buffer_loadx4_index: +;GCN-NOT: s_waitcnt; +;GCN: buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} define amdgpu_ps void @s_buffer_loadx4_index(<4 x i32> inreg %desc, i32 inreg %index) { main_body: %load = call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 %index, i32 0) @@ -78,9 +150,25 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_load_imm_mergex2: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x4 +;GCN-LABEL: {{^}}s_buffer_loadx4_index_divergent: +;GCN-NOT: s_waitcnt; +;GCN: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +define amdgpu_ps void @s_buffer_loadx4_index_divergent(<4 x i32> inreg %desc, i32 %index) { +main_body: + %load = call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 %index, i32 0) + %bitcast = bitcast <4 x i32> %load to <4 x float> + %x = extractelement <4 x float> %bitcast, i32 0 + %y = extractelement <4 x float> %bitcast, i32 1 + %z = extractelement <4 x float> %bitcast, i32 2 + %w = extractelement <4 x float> %bitcast, i32 3 + call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %x, float %y, float %z, float %w, i1 true, i1 true) + ret void +} + +;GCN-LABEL: {{^}}s_buffer_load_imm_mergex2: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x1 +;VI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x4 define amdgpu_ps void @s_buffer_load_imm_mergex2(<4 x i32> inreg %desc) { main_body: %load0 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 4, i32 0) @@ -91,9 +179,10 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_load_imm_mergex4: -;CHECK-NOT: s_waitcnt; -;CHECK: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x8 +;GCN-LABEL: {{^}}s_buffer_load_imm_mergex4: +;GCN-NOT: s_waitcnt; +;SI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x2 +;VI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0x8 define amdgpu_ps void @s_buffer_load_imm_mergex4(<4 x i32> inreg %desc) { main_body: %load0 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %desc, i32 8, i32 0) @@ -108,10 +197,10 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_load_index_across_bb: -;CHECK-NOT: s_waitcnt; -;CHECK: v_or_b32 -;CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +;GCN-LABEL: {{^}}s_buffer_load_index_across_bb: +;GCN-NOT: s_waitcnt; +;GCN: v_or_b32 +;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen define amdgpu_ps void @s_buffer_load_index_across_bb(<4 x i32> inreg %desc, i32 %index) { main_body: %tmp = shl i32 %index, 4 @@ -125,12 +214,12 @@ ret void } -;CHECK-LABEL: {{^}}s_buffer_load_index_across_bb_merged: -;CHECK-NOT: s_waitcnt; -;CHECK: v_or_b32 -;CHECK: v_or_b32 -;CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen -;CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +;GCN-LABEL: {{^}}s_buffer_load_index_across_bb_merged: +;GCN-NOT: s_waitcnt; +;GCN: v_or_b32 +;GCN: v_or_b32 +;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen +;GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen define amdgpu_ps void @s_buffer_load_index_across_bb_merged(<4 x i32> inreg %desc, i32 %index) { main_body: %tmp = shl i32 %index, 4 @@ -150,4 +239,5 @@ declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32) declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32) +declare <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32>, i32, i32) declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32)