Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -113,8 +113,11 @@ bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, SDValue &SOffset, SDValue &Offset, SDValue &SLC) const; - bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr, - SDValue &SOffset, SDValue &ImmOffset) const; + bool SelectMUBUFScratchOffen(SDValue Addr, SDValue &RSrc, SDValue &VAddr, + SDValue &SOffset, SDValue &ImmOffset) const; + bool SelectMUBUFScratchOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, + SDValue &Offset) const; + bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, SDValue &Offset, SDValue &GLC, SDValue &SLC, SDValue &TFE) const; @@ -947,8 +950,12 @@ return true; } +static bool isLegalMUBUFImmOffset(unsigned Imm) { + return isUInt<12>(Imm); +} + static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) { - return isUInt<12>(Imm->getZExtValue()); + return isLegalMUBUFImmOffset(Imm->getZExtValue()); } bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, @@ -1070,9 +1077,9 @@ return N; } -bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc, - SDValue &VAddr, SDValue &SOffset, - SDValue &ImmOffset) const { +bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDValue Addr, SDValue &Rsrc, + SDValue &VAddr, SDValue &SOffset, + SDValue &ImmOffset) const { SDLoc DL(Addr); MachineFunction &MF = CurDAG->getMachineFunction(); @@ -1081,8 +1088,22 @@ Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); SOffset = CurDAG->getRegister(Info->getScratchWaveOffsetReg(), MVT::i32); - // (add n0, c1) + if (ConstantSDNode *CAddr = dyn_cast(Addr)) { + unsigned Imm = CAddr->getZExtValue(); + assert(!isLegalMUBUFImmOffset(Imm) && + "should have been selected by other pattern"); + + SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32); + MachineSDNode *MovHighBits = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, + DL, MVT::i32, HighBits); + VAddr = SDValue(MovHighBits, 0); + ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16); + return true; + } + if (CurDAG->isBaseWithConstantOffset(Addr)) { + // (add n0, c1) + SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); @@ -1101,6 +1122,24 @@ return true; } +bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDValue Addr, + SDValue &SRsrc, + SDValue &SOffset, + SDValue &Offset) const { + ConstantSDNode *CAddr = dyn_cast(Addr); + if (!CAddr || !isLegalMUBUFImmOffset(CAddr)) + return false; + + SDLoc DL(Addr); + MachineFunction &MF = CurDAG->getMachineFunction(); + const SIMachineFunctionInfo *Info = MF.getInfo(); + + SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); + SOffset = CurDAG->getRegister(Info->getScratchWaveOffsetReg(), MVT::i32); + Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); + return true; +} + bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, SDValue &Offset, SDValue &GLC, SDValue &SLC, Index: lib/Target/AMDGPU/BUFInstructions.td =================================================================== --- lib/Target/AMDGPU/BUFInstructions.td +++ lib/Target/AMDGPU/BUFInstructions.td @@ -11,7 +11,9 @@ def MUBUFAddr64 : ComplexPattern; def MUBUFAddr64Atomic : ComplexPattern; -def MUBUFScratch : ComplexPattern; +def MUBUFScratchOffen : ComplexPattern; +def MUBUFScratchOffset : ComplexPattern; + def MUBUFOffset : ComplexPattern; def MUBUFOffsetNoGLC : ComplexPattern; def MUBUFOffsetAtomic : ComplexPattern; @@ -964,21 +966,30 @@ } // End Predicates = [Has16BitInsts] -class MUBUFScratchLoadPat : Pat < - (vt (ld (MUBUFScratch v4i32:$srsrc, i32:$vaddr, - i32:$soffset, u16imm:$offset))), - (Instr $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) ->; +multiclass MUBUFScratchLoadPat { + def : Pat < + (vt (ld (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr, + i32:$soffset, u16imm:$offset))), + (InstrOffen $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) + >; + + def : Pat < + (vt (ld (MUBUFScratchOffset v4i32:$srsrc, i32:$soffset, u16imm:$offset))), + (InstrOffset $srsrc, $soffset, $offset, 0, 0, 0) + >; +} -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; -def : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; +defm : MUBUFScratchLoadPat ; // BUFFER_LOAD_DWORD*, addr64=0 multiclass MUBUF_Load_Dword ; defm : MUBUFStore_Pattern ; -class MUBUFScratchStorePat : Pat < - (st vt:$value, (MUBUFScratch v4i32:$srsrc, i32:$vaddr, i32:$soffset, - u16imm:$offset)), - (Instr $value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) ->; +multiclass MUBUFScratchStorePat { + def : Pat < + (st vt:$value, (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr, + i32:$soffset, u16imm:$offset)), + (InstrOffen $value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) + >; + + def : Pat < + (st vt:$value, (MUBUFScratchOffset v4i32:$srsrc, i32:$soffset, + u16imm:$offset)), + (InstrOffset $value, $srsrc, $soffset, $offset, 0, 0, 0) + >; +} -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; -def : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; +defm : MUBUFScratchStorePat ; //===----------------------------------------------------------------------===// // MTBUF Patterns Index: test/CodeGen/AMDGPU/mubuf-offset-private.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/mubuf-offset-private.ll @@ -0,0 +1,136 @@ +; RUN: llc -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+max-private-element-size-16 < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s + +; Test addressing modes when the scratch base is not a frame index. + +; GCN-LABEL: {{^}}store_private_offset_i8: +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @store_private_offset_i8() #0 { + store volatile i8 5, i8* inttoptr (i32 8 to i8*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_i16: +; GCN: buffer_store_short v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @store_private_offset_i16() #0 { + store volatile i16 5, i16* inttoptr (i32 8 to i16*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_i32: +; GCN: buffer_store_dword v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @store_private_offset_i32() #0 { + store volatile i32 5, i32* inttoptr (i32 8 to i32*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_v2i32: +; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @store_private_offset_v2i32() #0 { + store volatile <2 x i32> , <2 x i32>* inttoptr (i32 8 to <2 x i32>*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_v4i32: +; GCN: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @store_private_offset_v4i32() #0 { + store volatile <4 x i32> , <4 x i32>* inttoptr (i32 8 to <4 x i32>*) + ret void +} + +; GCN-LABEL: {{^}}load_private_offset_i8: +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @load_private_offset_i8() #0 { + %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + ret void +} + +; GCN-LABEL: {{^}}sextload_private_offset_i8: +; GCN: buffer_load_sbyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @sextload_private_offset_i8(i32 addrspace(1)* %out) #0 { + %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + %sextload = sext i8 %load to i32 + store i32 %sextload, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}zextload_private_offset_i8: +; GCN: buffer_load_ubyte v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @zextload_private_offset_i8(i32 addrspace(1)* %out) #0 { + %load = load volatile i8, i8* inttoptr (i32 8 to i8*) + %zextload = zext i8 %load to i32 + store i32 %zextload, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}load_private_offset_i16: +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @load_private_offset_i16() #0 { + %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + ret void +} + +; GCN-LABEL: {{^}}sextload_private_offset_i16: +; GCN: buffer_load_sshort v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @sextload_private_offset_i16(i32 addrspace(1)* %out) #0 { + %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + %sextload = sext i16 %load to i32 + store i32 %sextload, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}zextload_private_offset_i16: +; GCN: buffer_load_ushort v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @zextload_private_offset_i16(i32 addrspace(1)* %out) #0 { + %load = load volatile i16, i16* inttoptr (i32 8 to i16*) + %zextload = zext i16 %load to i32 + store i32 %zextload, i32 addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}load_private_offset_i32: +; GCN: buffer_load_dword v{{[0-9]+}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @load_private_offset_i32() #0 { + %load = load volatile i32, i32* inttoptr (i32 8 to i32*) + ret void +} + +; GCN-LABEL: {{^}}load_private_offset_v2i32: +; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @load_private_offset_v2i32() #0 { + %load = load volatile <2 x i32>, <2 x i32>* inttoptr (i32 8 to <2 x i32>*) + ret void +} + +; GCN-LABEL: {{^}}load_private_offset_v4i32: +; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s[4:7], s8 offset:8 +define amdgpu_kernel void @load_private_offset_v4i32() #0 { + %load = load volatile <4 x i32>, <4 x i32>* inttoptr (i32 8 to <4 x i32>*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_i8_max_offset: +; GCN: buffer_store_byte v{{[0-9]+}}, off, s[4:7], s8 offset:4095 +define amdgpu_kernel void @store_private_offset_i8_max_offset() #0 { + store volatile i8 5, i8* inttoptr (i32 4095 to i8*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus1: +; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s8 offen{{$}} +define amdgpu_kernel void @store_private_offset_i8_max_offset_plus1() #0 { + store volatile i8 5, i8* inttoptr (i32 4096 to i8*) + ret void +} + +; GCN-LABEL: {{^}}store_private_offset_i8_max_offset_plus2: +; GCN: v_mov_b32_e32 [[OFFSET:v[0-9]+]], 0x1000 +; GCN: buffer_store_byte v{{[0-9]+}}, [[OFFSET]], s[4:7], s8 offen offset:1{{$}} +define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 { + store volatile i8 5, i8* inttoptr (i32 4097 to i8*) + ret void +} + +attributes #0 = { nounwind }