Index: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td @@ -205,6 +205,28 @@ llvm_i1_ty], // slc(imm) []>; +def int_amdgcn_buffer_load_format : Intrinsic < + [llvm_v4f32_ty], + [llvm_v4i32_ty, // rsrc(SGPR) + llvm_i32_ty, // soffset(SGPR) + llvm_i32_ty, // offset(imm) + llvm_i32_ty, // vindex(VGPR) + llvm_i32_ty, // voffset(VGPR) + llvm_i1_ty, // glc(imm) + llvm_i1_ty], // slc(imm) + [IntrReadMem]>; + +def int_amdgcn_buffer_store_format : Intrinsic < + [], + [llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select v4f32 + llvm_v4i32_ty, // rsrc(SGPR) + llvm_i32_ty, // soffset(SGPR) + llvm_i32_ty, // offset(imm) + llvm_i32_ty, // vindex(VGPR) + llvm_i32_ty, // voffset(VGPR) + llvm_i1_ty, // glc(imm) + llvm_i1_ty], // slc(imm) + []>; def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic < "__builtin_amdgcn_read_workdim">; Index: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td @@ -949,18 +949,23 @@ defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper < mubuf<0x03>, "buffer_load_format_xyzw", VReg_128 >; -defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper < - mubuf<0x04>, "buffer_store_format_x", VGPR_32 ->; -defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper < - mubuf<0x05>, "buffer_store_format_xy", VReg_64 ->; -defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper < - mubuf<0x06>, "buffer_store_format_xyz", VReg_96 ->; -defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper < - mubuf<0x07>, "buffer_store_format_xyzw", VReg_128 ->; +// Without mayLoad and hasSideEffects, TableGen complains about the pattern +// matching llvm.amdgcn.buffer.store.format. Eventually, we'll need a way +// to express the effects of the intrinsic more precisely. +let mayLoad = 1, hasSideEffects = 1 in { + defm BUFFER_STORE_FORMAT_X : MUBUF_Store_Helper < + mubuf<0x04>, "buffer_store_format_x", VGPR_32 + >; + defm BUFFER_STORE_FORMAT_XY : MUBUF_Store_Helper < + mubuf<0x05>, "buffer_store_format_xy", VReg_64 + >; + defm BUFFER_STORE_FORMAT_XYZ : MUBUF_Store_Helper < + mubuf<0x06>, "buffer_store_format_xyz", VReg_96 + >; + defm BUFFER_STORE_FORMAT_XYZW : MUBUF_Store_Helper < + mubuf<0x07>, "buffer_store_format_xyzw", VReg_128 + >; +} defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper < mubuf<0x08, 0x10>, "buffer_load_ubyte", VGPR_32, i32, az_extloadi8_global >; @@ -2094,6 +2099,74 @@ >; //===----------------------------------------------------------------------===// +// buffer_load/store_format patterns +//===----------------------------------------------------------------------===// +def : Pat< + (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, 0, + imm:$glc, imm:$slc), + (BUFFER_LOAD_FORMAT_XYZW_OFFSET $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, 0, + imm:$glc, imm:$slc), + (BUFFER_LOAD_FORMAT_XYZW_IDXEN $vindex, $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, 0, i32:$voffset, + imm:$glc, imm:$slc), + (BUFFER_LOAD_FORMAT_XYZW_OFFEN $voffset, $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_load_format v4i32:$rsrc, i32:$soffset, imm:$offset, i32:$vindex, i32:$voffset, + imm:$glc, imm:$slc), + (BUFFER_LOAD_FORMAT_XYZW_BOTHEN + (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), + $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, + i32:$soffset, imm:$offset, 0, 0, + imm:$glc, imm:$slc), + (BUFFER_STORE_FORMAT_XYZW_OFFSET $vdata, $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, + i32:$soffset, imm:$offset, i32:$vindex, 0, + imm:$glc, imm:$slc), + (BUFFER_STORE_FORMAT_XYZW_IDXEN $vdata, $vindex, $rsrc, $soffset, + (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, + i32:$soffset, imm:$offset, 0, i32:$voffset, + imm:$glc, imm:$slc), + (BUFFER_STORE_FORMAT_XYZW_OFFEN $vdata, $voffset, $rsrc, $soffset, + (as_i16imm $offset), (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +def : Pat< + (int_amdgcn_buffer_store_format v4f32:$vdata, v4i32:$rsrc, i32:$soffset, + imm:$offset, i32:$vindex, i32:$voffset, + imm:$glc, imm:$slc), + (BUFFER_STORE_FORMAT_XYZW_BOTHEN + $vdata, + (REG_SEQUENCE VReg_64, $vindex, sub0, $voffset, sub1), + $rsrc, $soffset, (as_i16imm $offset), + (as_i1imm $glc), (as_i1imm $slc), 0) +>; + +//===----------------------------------------------------------------------===// // S_GETREG_B32 Intrinsic Pattern. //===----------------------------------------------------------------------===// def : Pat < Index: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -236,7 +236,7 @@ static_cast(MF->getSubtarget().getInstrInfo()); LLVMContext &Ctx = MF->getFunction()->getContext(); DebugLoc DL = MI->getDebugLoc(); - bool IsLoad = TII->get(LoadStoreOp).mayLoad(); + bool IsStore = TII->get(LoadStoreOp).mayStore(); bool RanOutOfSGPRs = false; bool Scavenged = false; @@ -272,14 +272,14 @@ SOffsetRegState |= RegState::Kill; BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) - .addReg(SubReg, getDefRegState(IsLoad)) + .addReg(SubReg, getDefRegState(!IsStore)) .addReg(ScratchRsrcReg) .addReg(SOffset, SOffsetRegState) .addImm(Offset) .addImm(0) // glc .addImm(0) // slc .addImm(0) // tfe - .addReg(Value, RegState::Implicit | getDefRegState(IsLoad)) + .addReg(Value, RegState::Implicit | getDefRegState(!IsStore)) .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); } } Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.format.ll @@ -0,0 +1,69 @@ +;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s +;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s + +;CHECK-LABEL: {{^}}buffer_load: +;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4 +;CHECK: buffer_load_format_xyzw v[4:7], s[0:3], s4 glc +;CHECK: buffer_load_format_xyzw v[8:11], s[0:3], s4 slc +;CHECK: s_waitcnt +define {<4 x float>, <4 x float>, <4 x float>} @buffer_load(<4 x i32> inreg, i32 inreg) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0) + %data_glc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0) + %data_slc = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1) + %r0 = insertvalue {<4 x float>, <4 x float>, <4 x float>} undef, <4 x float> %data, 0 + %r1 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r0, <4 x float> %data_glc, 1 + %r2 = insertvalue {<4 x float>, <4 x float>, <4 x float>} %r1, <4 x float> %data_slc, 2 + ret {<4 x float>, <4 x float>, <4 x float>} %r2 +} + +;CHECK-LABEL: {{^}}buffer_load_immoffs: +;CHECK: buffer_load_format_xyzw v[0:3], s[0:3], s4 offset:42 +;CHECK: s_waitcnt +define <4 x float> @buffer_load_immoffs(<4 x i32> inreg, i32 inreg) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0) + ret <4 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_load_idx: +;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 idxen +;CHECK: s_waitcnt +define <4 x float> @buffer_load_idx(<4 x i32> inreg, i32) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 0, i1 0, i1 0) + ret <4 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_load_ofs: +;CHECK: buffer_load_format_xyzw v[0:3], v0, s[0:3], 0 offen +;CHECK: s_waitcnt +define <4 x float> @buffer_load_ofs(<4 x i32> inreg, i32) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 0, i32 %1, i1 0, i1 0) + ret <4 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_load_both: +;CHECK: buffer_load_format_xyzw v[0:3], v[0:1], s[0:3], 0 idxen offen +;CHECK: s_waitcnt +define <4 x float> @buffer_load_both(<4 x i32> inreg, i32, i32) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %1, i32 %2, i1 0, i1 0) + ret <4 x float> %data +} + +;CHECK-LABEL: {{^}}buffer_load_both_reversed: +;CHECK: v_mov_b32_e32 v2, v0 +;CHECK: buffer_load_format_xyzw v[0:3], v[1:2], s[0:3], 0 idxen offen +;CHECK: s_waitcnt +define <4 x float> @buffer_load_both_reversed(<4 x i32> inreg, i32, i32) #0 { +main_body: + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %2, i32 %1, i1 0, i1 0) + ret <4 x float> %data +} + +declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #1 + +attributes #0 = { "ShaderType"="0" } +attributes #1 = { nounwind readonly } Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.store.format.ll @@ -0,0 +1,78 @@ +;RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck %s +;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s + +;CHECK-LABEL: {{^}}buffer_store: +;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4 +;CHECK: buffer_store_format_xyzw v[4:7], s[0:3], s4 glc +;CHECK: buffer_store_format_xyzw v[8:11], s[0:3], s4 slc +define void @buffer_store(<4 x i32> inreg, i32 inreg, <4 x float>, <4 x float>, <4 x float>) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 0) + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %3, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 1, i1 0) + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %4, <4 x i32> %0, i32 %1, i32 0, i32 0, i32 0, i1 0, i1 1) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_immoffs: +;CHECK: buffer_store_format_xyzw v[0:3], s[0:3], s4 offset:42 +define void @buffer_store_immoffs(<4 x i32> inreg, i32 inreg, <4 x float>) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 %1, i32 42, i32 0, i32 0, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_idx: +;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen +define void @buffer_store_idx(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_ofs: +;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 offen +define void @buffer_store_ofs(<4 x i32> inreg, i32 inreg, <4 x float>, i32) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 %3, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_both: +;CHECK: buffer_store_format_xyzw v[0:3], v[4:5], s[0:3], 0 idxen offen +define void @buffer_store_both(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 %4, i1 0, i1 0) + ret void +} + +;CHECK-LABEL: {{^}}buffer_store_both_reversed: +;CHECK: v_mov_b32_e32 v6, v4 +;CHECK: buffer_store_format_xyzw v[0:3], v[5:6], s[0:3], 0 idxen offen +define void @buffer_store_both_reversed(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %4, i32 %3, i1 0, i1 0) + ret void +} + +; Ideally, the register allocator would avoid the wait here +; +;CHECK-LABEL: {{^}}buffer_store_wait: +;CHECK: buffer_store_format_xyzw v[0:3], v4, s[0:3], 0 idxen +;CHECK: s_waitcnt vmcnt(0) expcnt(0) +;CHECK: buffer_load_format_xyzw v[0:3], v5, s[0:3], 0 idxen +;CHECK: s_waitcnt vmcnt(0) +;CHECK: buffer_store_format_xyzw v[0:3], v6, s[0:3], 0 idxen +define void @buffer_store_wait(<4 x i32> inreg, i32 inreg, <4 x float>, i32, i32, i32) #0 { +main_body: + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 %3, i32 0, i1 0, i1 0) + %data = call <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32> %0, i32 0, i32 0, i32 %4, i32 0, i1 0, i1 0) + call void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float> %data, <4 x i32> %0, i32 0, i32 0, i32 %5, i32 0, i1 0, i1 0) + ret void +} + +declare void @llvm.amdgcn.buffer.store.format.v4f32(<4 x float>, <4 x i32>, i32, i32, i32, i32, i1, i1) #1 +declare <4 x float> @llvm.amdgcn.buffer.load.format(<4 x i32>, i32, i32, i32, i32, i1, i1) #2 + +attributes #0 = { "ShaderType"="0" } +attributes #1 = { nounwind } +attributes #2 = { nounwind readonly }