Index: llvm/lib/Target/AMDGPU/AMDGPU.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPU.td +++ llvm/lib/Target/AMDGPU/AMDGPU.td @@ -1060,6 +1060,11 @@ "Subtarget->getGeneration() == AMDGPUSubtarget::GFX10">, AssemblerPredicate<(all_of (not FeatureGCN3Encoding), FeatureCIInsts)>; +def isGFX7GFX8 : + Predicate<"Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS ||" + "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS">, + AssemblerPredicate<(all_of FeatureSouthernIslands, FeatureCIInsts)>; + def isGFX7GFX8GFX9 : Predicate<"Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS ||" "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS ||" Index: llvm/lib/Target/AMDGPU/AMDGPUGISel.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUGISel.td +++ llvm/lib/Target/AMDGPU/AMDGPUGISel.td @@ -92,6 +92,10 @@ GIComplexOperandMatcher, GIComplexPatternEquiv; +def gi_ds_128bit_8byte_aligned : + GIComplexOperandMatcher, + GIComplexPatternEquiv; + def gi_mubuf_addr64 : GIComplexOperandMatcher, GIComplexPatternEquiv; Index: llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -205,6 +205,8 @@ bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, SDValue &Offset1) const; + bool SelectDS128Bit8ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, + SDValue &Offset1) const; bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, SDValue &SOffset, SDValue &Offset, SDValue &Offen, SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC, @@ -1311,6 +1313,86 @@ return true; } +bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base, + SDValue &Offset0, + SDValue &Offset1) const { + SDLoc DL(Addr); + + if (CurDAG->isBaseWithConstantOffset(Addr)) { + SDValue N0 = Addr.getOperand(0); + SDValue N1 = Addr.getOperand(1); + ConstantSDNode *C1 = cast(N1); + unsigned DWordX2Offset0 = C1->getZExtValue() / 8; + unsigned DWordX2Offset1 = DWordX2Offset0 + 1; + // (add n0, c0) + if (isDSOffsetLegal(N0, DWordX2Offset1, 8)) { + Base = N0; + Offset0 = CurDAG->getTargetConstant(DWordX2Offset0, DL, MVT::i8); + Offset1 = CurDAG->getTargetConstant(DWordX2Offset1, DL, MVT::i8); + return true; + } + } else if (Addr.getOpcode() == ISD::SUB) { + // sub C, x -> add (sub 0, x), C + if (const ConstantSDNode *C = dyn_cast(Addr.getOperand(0))) { + unsigned DWordX2Offset0 = C->getZExtValue() / 8; + unsigned DWordX2Offset1 = DWordX2Offset0 + 1; + + if (isUInt<8>(DWordX2Offset0)) { + SDLoc DL(Addr); + SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); + + // XXX - This is kind of hacky. Create a dummy sub node so we can check + // the known bits in isDSOffsetLegal. We need to emit the selected node + // here, so this is thrown away. + SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, + Zero, Addr.getOperand(1)); + + if (isDSOffsetLegal(Sub, DWordX2Offset1, 8)) { + SmallVector Opnds; + Opnds.push_back(Zero); + Opnds.push_back(Addr.getOperand(1)); + unsigned SubOp = AMDGPU::V_SUB_I32_e32; + if (Subtarget->hasAddNoCarry()) { + SubOp = AMDGPU::V_SUB_U32_e64; + Opnds.push_back( + CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit + } + + MachineSDNode *MachineSub + = CurDAG->getMachineNode(SubOp, DL, MVT::i64, Opnds); + + Base = SDValue(MachineSub, 0); + Offset0 = CurDAG->getTargetConstant(DWordX2Offset0, DL, MVT::i8); + Offset1 = CurDAG->getTargetConstant(DWordX2Offset1, DL, MVT::i8); + + return true; + } + } + } + } else if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { + unsigned DWordX2Offset0 = CAddr->getZExtValue() / 8; + unsigned DWordX2Offset1 = DWordX2Offset0 + 1; + assert(4 * DWordX2Offset0 == CAddr->getZExtValue()); + + if (isUInt<8>(DWordX2Offset0) && isUInt<8>(DWordX2Offset1)) { + SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); + MachineSDNode *MovZero + = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, + DL, MVT::i32, Zero); + Base = SDValue(MovZero, 0); + Offset0 = CurDAG->getTargetConstant(DWordX2Offset0, DL, MVT::i8); + Offset1 = CurDAG->getTargetConstant(DWordX2Offset1, DL, MVT::i8); + return true; + } + } + + // default case + Base = Addr; + Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); + Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); + return true; +} + bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, SDValue &VAddr, SDValue &SOffset, SDValue &Offset, SDValue &Offen, Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h @@ -199,6 +199,11 @@ InstructionSelector::ComplexRendererFns selectDS64Bit4ByteAligned(MachineOperand &Root) const; + std::pair + selectDS128Bit8ByteAlignedImpl(MachineOperand &Root) const; + InstructionSelector::ComplexRendererFns + selectDS128Bit8ByteAligned(MachineOperand &Root) const; + std::pair getPtrBaseWithConstantOffset(Register Root, const MachineRegisterInfo &MRI) const; Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -3320,6 +3320,50 @@ return std::make_pair(Root.getReg(), 0); } +InstructionSelector::ComplexRendererFns +AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { + Register Reg; + unsigned Offset; + std::tie(Reg, Offset) = selectDS128Bit8ByteAlignedImpl(Root); + return {{ + [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, + [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, + [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } + }}; +} + +std::pair +AMDGPUInstructionSelector::selectDS128Bit8ByteAlignedImpl(MachineOperand &Root) const { + const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); + if (!RootDef) + return std::make_pair(Root.getReg(), 0); + + int64_t ConstAddr = 0; + + Register PtrBase; + int64_t Offset; + std::tie(PtrBase, Offset) = + getPtrBaseWithConstantOffset(Root.getReg(), *MRI); + + + if (Offset) { + int64_t DWordX2Offset0 = Offset / 8; + int64_t DWordX2Offset1 = DWordX2Offset0 + 1; + if (isDSOffsetLegal(PtrBase, DWordX2Offset1, 16)) { + // (add n0, c0) + return std::make_pair(PtrBase, DWordX2Offset0); + } + } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { + // TODO + + } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { + // TODO + + } + + return std::make_pair(Root.getReg(), 0); +} + /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return /// the base value with the constant offset. There may be intervening copies /// between \p Root and the identified constant. Returns \p Root, 0 if this does Index: llvm/lib/Target/AMDGPU/AMDGPUInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -485,17 +485,28 @@ defm atomic_load_fadd : ret_noret_binary_atomic_op; defm AMDGPUatomic_cmp_swap : ret_noret_binary_atomic_op; +def load_align4_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>, + Aligned<4> { + let IsLoad = 1; + let IsNonExtLoad = 1; +} -def load_align8_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> { +def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>, + Aligned<8> { let IsLoad = 1; let IsNonExtLoad = 1; - let MinAlignment = 8; } -def load_align16_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> { +def load_align16_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>, + Aligned<16> { let IsLoad = 1; let IsNonExtLoad = 1; - let MinAlignment = 16; +} + +def store_align4_local: PatFrag<(ops node:$val, node:$ptr), + (store_local node:$val, node:$ptr)>, Aligned<4> { + let IsStore = 1; + let IsTruncStore = 0; } def store_align8_local: PatFrag<(ops node:$val, node:$ptr), Index: llvm/lib/Target/AMDGPU/DSInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/DSInstructions.td +++ llvm/lib/Target/AMDGPU/DSInstructions.td @@ -680,7 +680,29 @@ defm : DSReadPat_mc ; } -defm : DSReadPat_mc ; +let SubtargetPredicate = isGFX7GFX8 in { + +foreach vt = VReg_96.RegTypes in { +defm : DSReadPat_mc ; +} + +foreach vt = VReg_128.RegTypes in { +defm : DSReadPat_mc ; +} + +} + +let SubtargetPredicate = isGFX9Plus in { + +foreach vt = VReg_96.RegTypes in { +defm : DSReadPat_mc ; +} + +foreach vt = VReg_128.RegTypes in { +defm : DSReadPat_mc ; +} + +} } // End AddedComplexity = 100 @@ -761,6 +783,18 @@ (i1 0)) >; +class DS128Bit8ByteAlignedReadPat : GCNPat < + (vt:$value (frag (DS128Bit8ByteAligned i32:$ptr, i8:$offset0, i8:$offset1))), + (inst $ptr, $offset0, $offset1, (i1 0)) +>; + +class DS128Bit8ByteAlignedWritePat : GCNPat< + (frag vt:$value, (DS128Bit8ByteAligned i32:$ptr, i8:$offset0, i8:$offset1)), + (inst $ptr, (i64 (EXTRACT_SUBREG VReg_128:$value, sub0_sub1)), + (i64 (EXTRACT_SUBREG VReg_128:$value, sub2_sub3)), $offset0, $offset1, + (i1 0)) +>; + multiclass DS64Bit4ByteAlignedPat_mc { let OtherPredicates = [LDSRequiresM0Init, isGFX7Plus] in { def : DS64Bit4ByteAlignedReadPat; @@ -773,19 +807,57 @@ } } +multiclass DS128Bit8ByteAlignedPat_mc { + let OtherPredicates = [LDSRequiresM0Init, isGFX7Plus] in { + def : DS128Bit8ByteAlignedReadPat; + def : DS128Bit8ByteAlignedWritePat; + } + + let OtherPredicates = [NotLDSRequiresM0Init] in { + def : DS128Bit8ByteAlignedReadPat; + def : DS128Bit8ByteAlignedWritePat; + } +} + // v2i32 loads are split into i32 loads on SI during lowering, due to a bug // related to bounds checking. foreach vt = VReg_64.RegTypes in { defm : DS64Bit4ByteAlignedPat_mc; } +foreach vt = VReg_128.RegTypes in { +defm : DS128Bit8ByteAlignedPat_mc; +} + let AddedComplexity = 100 in { foreach vt = VReg_64.RegTypes in { defm : DSWritePat_mc ; } -defm : DSWritePat_mc ; +let SubtargetPredicate = isGFX7GFX8 in { + +foreach vt = VReg_96.RegTypes in { +defm : DSWritePat_mc ; +} + +foreach vt = VReg_128.RegTypes in { +defm : DSWritePat_mc ; +} + +} + +let SubtargetPredicate = isGFX9Plus in { + +foreach vt = VReg_96.RegTypes in { +defm : DSWritePat_mc ; +} + +foreach vt = VReg_128.RegTypes in { +defm : DSWritePat_mc ; +} + +} } // End AddedComplexity = 100 class DSAtomicRetPat : GCNPat < Index: llvm/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -449,17 +449,22 @@ def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>; } +def load_align4_local_m0 : PatFrag<(ops node:$ptr), + (load_local_m0 node:$ptr)>, Aligned<4> { + let IsLoad = 1; + let IsNonExtLoad = 1; +} + def load_align8_local_m0 : PatFrag<(ops node:$ptr), - (load_local_m0 node:$ptr)> { + (load_local_m0 node:$ptr)>, Aligned<8> { let IsLoad = 1; let IsNonExtLoad = 1; - let MinAlignment = 8; } + def load_align16_local_m0 : PatFrag<(ops node:$ptr), - (load_local_m0 node:$ptr)> { + (load_local_m0 node:$ptr)>, Aligned<16> { let IsLoad = 1; let IsNonExtLoad = 1; - let MinAlignment = 16; } } // End IsLoad = 1 @@ -535,20 +540,25 @@ } } -def store_align16_local_m0 : PatFrag < - (ops node:$value, node:$ptr), - (store_local_m0 node:$value, node:$ptr)> { +def store_align4_local_m0 : PatFrag <(ops node:$value, node:$ptr), + (store_local_m0 node:$value, node:$ptr)>, + Aligned<4> { let IsStore = 1; let IsTruncStore = 0; - let MinAlignment = 16; } -def store_align8_local_m0 : PatFrag < - (ops node:$value, node:$ptr), - (store_local_m0 node:$value, node:$ptr)> { +def store_align8_local_m0 : PatFrag <(ops node:$value, node:$ptr), + (store_local_m0 node:$value, node:$ptr)>, + Aligned<8> { + let IsStore = 1; + let IsTruncStore = 0; +} + +def store_align16_local_m0 : PatFrag <(ops node:$value, node:$ptr), + (store_local_m0 node:$value, node:$ptr)>, + Aligned<16> { let IsStore = 1; let IsTruncStore = 0; - let MinAlignment = 8; } let AddressSpaces = StoreAddress_local.AddrSpaces in { @@ -1308,6 +1318,7 @@ def DS1Addr1Offset : ComplexPattern; def DS64Bit4ByteAligned : ComplexPattern; +def DS128Bit8ByteAligned : ComplexPattern; def MOVRELOffset : ComplexPattern; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/load-local.128.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/load-local.128.ll @@ -0,0 +1,304 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-NOUNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-NOUNALIGNED %s + +; FIXME: +; XUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s + +define <4 x i32> @v_load_lds_v3i32(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b128 v[0:3], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_b128 v[0:3], v0 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr + ret <4 x i32> %load +} + +define <4 x i32> @v_load_lds_v3i32_align1(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_u8 v1, v0 +; GFX9-NEXT: ds_read_u8 v2, v0 offset:1 +; GFX9-NEXT: ds_read_u8 v4, v0 offset:2 +; GFX9-NEXT: ds_read_u8 v5, v0 offset:3 +; GFX9-NEXT: ds_read_u8 v6, v0 offset:4 +; GFX9-NEXT: s_mov_b32 s5, 8 +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: s_waitcnt lgkmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_and_or_b32 v1, v1, s4, v2 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v2, s4, v4 +; GFX9-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-NEXT: v_and_b32_e32 v4, s4, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_or3_b32 v4, v1, v2, v4 +; GFX9-NEXT: ds_read_u8 v1, v0 offset:5 +; GFX9-NEXT: ds_read_u8 v2, v0 offset:6 +; GFX9-NEXT: ds_read_u8 v5, v0 offset:7 +; GFX9-NEXT: ds_read_u8 v7, v0 offset:8 +; GFX9-NEXT: ds_read_u8 v8, v0 offset:9 +; GFX9-NEXT: v_mov_b32_e32 v3, 0xff +; GFX9-NEXT: s_waitcnt lgkmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt lgkmcnt(3) +; GFX9-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v5, v5, v3 +; GFX9-NEXT: v_and_or_b32 v1, v6, s4, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX9-NEXT: v_or3_b32 v1, v1, v2, v5 +; GFX9-NEXT: v_mov_b32_e32 v5, 8 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_and_or_b32 v2, v7, v3, v2 +; GFX9-NEXT: ds_read_u8 v6, v0 offset:10 +; GFX9-NEXT: ds_read_u8 v7, v0 offset:11 +; GFX9-NEXT: ds_read_u8 v8, v0 offset:12 +; GFX9-NEXT: ds_read_u8 v9, v0 offset:13 +; GFX9-NEXT: ds_read_u8 v10, v0 offset:14 +; GFX9-NEXT: ds_read_u8 v0, v0 offset:15 +; GFX9-NEXT: s_waitcnt lgkmcnt(5) +; GFX9-NEXT: v_and_b32_e32 v6, v6, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(4) +; GFX9-NEXT: v_and_b32_e32 v7, v7, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v0, v0, v3 +; GFX9-NEXT: v_or3_b32 v2, v2, v6, v7 +; GFX9-NEXT: v_and_b32_e32 v6, v10, v3 +; GFX9-NEXT: v_and_or_b32 v5, v8, v3, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX9-NEXT: v_or3_b32 v3, v5, v6, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32_align1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_u8 v1, v0 +; GFX7-NEXT: ds_read_u8 v2, v0 offset:1 +; GFX7-NEXT: ds_read_u8 v4, v0 offset:2 +; GFX7-NEXT: ds_read_u8 v5, v0 offset:3 +; GFX7-NEXT: ds_read_u8 v6, v0 offset:4 +; GFX7-NEXT: s_movk_i32 s4, 0xff +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(1) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v4, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v6 +; GFX7-NEXT: v_mov_b32_e32 v3, 0xff +; GFX7-NEXT: ds_read_u8 v2, v0 offset:5 +; GFX7-NEXT: ds_read_u8 v5, v0 offset:6 +; GFX7-NEXT: ds_read_u8 v6, v0 offset:7 +; GFX7-NEXT: ds_read_u8 v7, v0 offset:8 +; GFX7-NEXT: ds_read_u8 v8, v0 offset:9 +; GFX7-NEXT: s_waitcnt lgkmcnt(4) +; GFX7-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v2, v5, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v2, v6, v3 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v5, v8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_and_b32_e32 v2, v7, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v5 +; GFX7-NEXT: ds_read_u8 v5, v0 offset:10 +; GFX7-NEXT: ds_read_u8 v6, v0 offset:11 +; GFX7-NEXT: ds_read_u8 v7, v0 offset:12 +; GFX7-NEXT: ds_read_u8 v8, v0 offset:13 +; GFX7-NEXT: ds_read_u8 v9, v0 offset:14 +; GFX7-NEXT: s_waitcnt lgkmcnt(4) +; GFX7-NEXT: v_and_b32_e32 v5, v5, v3 +; GFX7-NEXT: ds_read_u8 v0, v0 offset:15 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v5 +; GFX7-NEXT: s_waitcnt lgkmcnt(4) +; GFX7-NEXT: v_and_b32_e32 v5, v6, v3 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v6, v8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v5 +; GFX7-NEXT: v_and_b32_e32 v5, v7, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: s_waitcnt lgkmcnt(1) +; GFX7-NEXT: v_and_b32_e32 v6, v9, v3 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, v0, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v0 +; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 1 + ret <4 x i32> %load +} + +define <4 x i32> @v_load_lds_v3i32_align2(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_u16 v1, v0 +; GFX9-NEXT: ds_read_u16 v2, v0 offset:2 +; GFX9-NEXT: ds_read_u16 v3, v0 offset:4 +; GFX9-NEXT: ds_read_u16 v5, v0 offset:6 +; GFX9-NEXT: ds_read_u16 v6, v0 offset:8 +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt lgkmcnt(3) +; GFX9-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX9-NEXT: v_and_or_b32 v4, v1, s4, v2 +; GFX9-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-NEXT: v_and_b32_e32 v1, s4, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX9-NEXT: v_and_or_b32 v1, v3, s4, v1 +; GFX9-NEXT: ds_read_u16 v2, v0 offset:10 +; GFX9-NEXT: ds_read_u16 v3, v0 offset:12 +; GFX9-NEXT: ds_read_u16 v0, v0 offset:14 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v0, s4, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: v_and_or_b32 v3, v3, s4, v0 +; GFX9-NEXT: v_and_or_b32 v2, v6, s4, v2 +; GFX9-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32_align2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_u16 v1, v0 +; GFX7-NEXT: ds_read_u16 v2, v0 offset:2 +; GFX7-NEXT: ds_read_u16 v3, v0 offset:4 +; GFX7-NEXT: ds_read_u16 v5, v0 offset:6 +; GFX7-NEXT: ds_read_u16 v6, v0 offset:8 +; GFX7-NEXT: s_mov_b32 s4, 0xffff +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v4, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(1) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v5 +; GFX7-NEXT: v_and_b32_e32 v1, s4, v3 +; GFX7-NEXT: ds_read_u16 v3, v0 offset:10 +; GFX7-NEXT: ds_read_u16 v5, v0 offset:12 +; GFX7-NEXT: ds_read_u16 v0, v0 offset:14 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX7-NEXT: v_and_b32_e32 v2, s4, v6 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_and_b32_e32 v3, s4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v0 +; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 2 + ret <4 x i32> %load +} + +define <4 x i32> @v_load_lds_v3i32_align4(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b128 v[0:3], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read2_b32 v[0:1], v0 offset1:1 +; GFX7-NEXT: ds_read2_b32 v[2:3], v2 offset0:2 offset1:3 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 4 + ret <4 x i32> %load +} + +define <4 x i32> @v_load_lds_v3i32_align8(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32_align8: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b128 v[0:3], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32_align8: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read2_b64 v[0:3], v0 offset1:1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 8 + ret <4 x i32> %load +} + +define <4 x i32> @v_load_lds_v3i32_align16(<4 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: v_load_lds_v3i32_align16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b128 v[0:3], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_load_lds_v3i32_align16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_b128 v[0:3], v0 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 16 + ret <4 x i32> %load +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/load-local.96.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/load-local.96.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-NOUNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-NOUNALIGNED %s + +; FIXME: +; XUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s + +define <3 x i32> @load_lds_v3i32(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b96 v[0:2], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_b96 v[0:2], v0 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr + ret <3 x i32> %load +} + +define <3 x i32> @load_lds_v3i32_align1(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: ds_read_u8 v0, v0 +; GFX9-NEXT: ds_read_u8 v1, v2 offset:1 +; GFX9-NEXT: ds_read_u8 v4, v2 offset:2 +; GFX9-NEXT: ds_read_u8 v5, v2 offset:3 +; GFX9-NEXT: ds_read_u8 v6, v2 offset:4 +; GFX9-NEXT: s_mov_b32 s5, 8 +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: s_waitcnt lgkmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v1, s4, v4 +; GFX9-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-NEXT: v_and_b32_e32 v4, s4, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_or3_b32 v0, v0, v1, v4 +; GFX9-NEXT: ds_read_u8 v1, v2 offset:5 +; GFX9-NEXT: ds_read_u8 v4, v2 offset:6 +; GFX9-NEXT: ds_read_u8 v5, v2 offset:7 +; GFX9-NEXT: ds_read_u8 v7, v2 offset:8 +; GFX9-NEXT: ds_read_u8 v8, v2 offset:9 +; GFX9-NEXT: v_mov_b32_e32 v3, 0xff +; GFX9-NEXT: s_waitcnt lgkmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt lgkmcnt(3) +; GFX9-NEXT: v_and_b32_e32 v4, v4, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v5, v5, v3 +; GFX9-NEXT: v_and_or_b32 v1, v6, s4, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX9-NEXT: v_or3_b32 v1, v1, v4, v5 +; GFX9-NEXT: ds_read_u8 v4, v2 offset:10 +; GFX9-NEXT: ds_read_u8 v2, v2 offset:11 +; GFX9-NEXT: v_mov_b32_e32 v5, 8 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_and_or_b32 v5, v7, v3, v5 +; GFX9-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-NEXT: v_and_b32_e32 v4, v4, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_or3_b32 v2, v5, v4, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32_align1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: ds_read_u8 v0, v0 +; GFX7-NEXT: ds_read_u8 v1, v2 offset:1 +; GFX7-NEXT: ds_read_u8 v4, v2 offset:2 +; GFX7-NEXT: ds_read_u8 v5, v2 offset:3 +; GFX7-NEXT: ds_read_u8 v6, v2 offset:4 +; GFX7-NEXT: s_movk_i32 s4, 0xff +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: s_waitcnt lgkmcnt(1) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v6 +; GFX7-NEXT: v_mov_b32_e32 v3, 0xff +; GFX7-NEXT: ds_read_u8 v4, v2 offset:5 +; GFX7-NEXT: ds_read_u8 v5, v2 offset:6 +; GFX7-NEXT: ds_read_u8 v6, v2 offset:7 +; GFX7-NEXT: ds_read_u8 v7, v2 offset:8 +; GFX7-NEXT: ds_read_u8 v8, v2 offset:9 +; GFX7-NEXT: s_waitcnt lgkmcnt(4) +; GFX7-NEXT: v_and_b32_e32 v4, v4, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v4, v5, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v4, v6, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX7-NEXT: ds_read_u8 v4, v2 offset:10 +; GFX7-NEXT: ds_read_u8 v2, v2 offset:11 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v6, v8, v3 +; GFX7-NEXT: v_and_b32_e32 v5, v7, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; GFX7-NEXT: s_waitcnt lgkmcnt(1) +; GFX7-NEXT: v_and_b32_e32 v4, v4, v3 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_or_b32_e32 v4, v5, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 1 + ret <3 x i32> %load +} + +define <3 x i32> @load_lds_v3i32_align2(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: ds_read_u16 v0, v0 +; GFX9-NEXT: ds_read_u16 v1, v2 offset:2 +; GFX9-NEXT: ds_read_u16 v3, v2 offset:4 +; GFX9-NEXT: ds_read_u16 v4, v2 offset:6 +; GFX9-NEXT: ds_read_u16 v5, v2 offset:8 +; GFX9-NEXT: ds_read_u16 v2, v2 offset:10 +; GFX9-NEXT: s_mov_b32 s4, 0xffff +; GFX9-NEXT: s_waitcnt lgkmcnt(4) +; GFX9-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-NEXT: v_and_b32_e32 v1, s4, v4 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX9-NEXT: v_and_or_b32 v1, v3, s4, v1 +; GFX9-NEXT: v_and_or_b32 v2, v5, s4, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32_align2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_u16 v0, v0 +; GFX7-NEXT: ds_read_u16 v1, v2 offset:2 +; GFX7-NEXT: ds_read_u16 v3, v2 offset:4 +; GFX7-NEXT: ds_read_u16 v4, v2 offset:6 +; GFX7-NEXT: ds_read_u16 v5, v2 offset:8 +; GFX7-NEXT: ds_read_u16 v2, v2 offset:10 +; GFX7-NEXT: s_mov_b32 s4, 0xffff +; GFX7-NEXT: s_waitcnt lgkmcnt(4) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: s_waitcnt lgkmcnt(3) +; GFX7-NEXT: v_and_b32_e32 v1, s4, v3 +; GFX7-NEXT: s_waitcnt lgkmcnt(2) +; GFX7-NEXT: v_and_b32_e32 v3, s4, v4 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_and_b32_e32 v3, s4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 2 + ret <3 x i32> %load +} + +define <3 x i32> @load_lds_v3i32_align4(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b96 v[0:2], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read2_b32 v[0:1], v0 offset1:1 +; GFX7-NEXT: ds_read_b32 v2, v2 offset:8 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 4 + ret <3 x i32> %load +} + +define <3 x i32> @load_lds_v3i32_align8(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32_align8: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b96 v[0:2], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32_align8: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, v0 +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_b64 v[0:1], v0 +; GFX7-NEXT: ds_read_b32 v2, v2 offset:8 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 8 + ret <3 x i32> %load +} + +define <3 x i32> @load_lds_v3i32_align16(<3 x i32> addrspace(3)* %ptr) { +; GFX9-LABEL: load_lds_v3i32_align16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: ds_read_b96 v[0:2], v0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: load_lds_v3i32_align16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: ds_read_b96 v[0:2], v0 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: s_setpc_b64 s[30:31] + %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 16 + ret <3 x i32> %load +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.128.ll @@ -0,0 +1,399 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-NOUNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-NOUNALIGNED %s + +; FIXME: +; XUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s + +define amdgpu_kernel void @store_lds_v3i32(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: ds_write_b128 v4, v[0:3] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-NEXT: ds_write_b128 v4, v[0:3] +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align1(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_lshr_b32 s5, s0, 8 +; GFX9-NEXT: s_lshr_b32 s6, s0, 16 +; GFX9-NEXT: s_lshr_b32 s7, s0, 24 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 1 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 2 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 3 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 4 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 5 +; GFX9-NEXT: v_mov_b32_e32 v2, s5 +; GFX9-NEXT: v_mov_b32_e32 v4, s6 +; GFX9-NEXT: v_mov_b32_e32 v6, s7 +; GFX9-NEXT: v_mov_b32_e32 v8, s1 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: ds_write_b8 v5, v4 +; GFX9-NEXT: ds_write_b8 v7, v6 +; GFX9-NEXT: ds_write_b8 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 6 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 7 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 8 +; GFX9-NEXT: s_lshr_b32 s5, s1, 8 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 9 +; GFX9-NEXT: s_lshr_b32 s6, s1, 16 +; GFX9-NEXT: s_lshr_b32 s7, s1, 24 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: s_lshr_b32 s1, s2, 8 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 10 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v8, s1 +; GFX9-NEXT: v_mov_b32_e32 v4, s7 +; GFX9-NEXT: v_mov_b32_e32 v6, s2 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: ds_write_b8 v5, v4 +; GFX9-NEXT: ds_write_b8 v7, v6 +; GFX9-NEXT: ds_write_b8 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 11 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 12 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 13 +; GFX9-NEXT: s_lshr_b32 s5, s2, 16 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 14 +; GFX9-NEXT: s_lshr_b32 s6, s2, 24 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: s_lshr_b32 s1, s3, 8 +; GFX9-NEXT: s_lshr_b32 s2, s3, 16 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_lshr_b32 s5, s3, 24 +; GFX9-NEXT: s_add_u32 s0, s4, 15 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v4, s3 +; GFX9-NEXT: v_mov_b32_e32 v6, s1 +; GFX9-NEXT: v_mov_b32_e32 v8, s2 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: ds_write_b8 v5, v4 +; GFX9-NEXT: ds_write_b8 v7, v6 +; GFX9-NEXT: ds_write_b8 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: s_lshr_b32 s5, s0, 8 +; GFX7-NEXT: s_lshr_b32 s6, s0, 16 +; GFX7-NEXT: s_lshr_b32 s7, s0, 24 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 1 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 2 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 3 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 4 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 5 +; GFX7-NEXT: v_mov_b32_e32 v2, s5 +; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v6, s7 +; GFX7-NEXT: v_mov_b32_e32 v8, s1 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: ds_write_b8 v5, v4 +; GFX7-NEXT: ds_write_b8 v7, v6 +; GFX7-NEXT: ds_write_b8 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 6 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 7 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: s_lshr_b32 s5, s1, 8 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 9 +; GFX7-NEXT: s_lshr_b32 s6, s1, 16 +; GFX7-NEXT: s_lshr_b32 s7, s1, 24 +; GFX7-NEXT: v_mov_b32_e32 v0, s5 +; GFX7-NEXT: s_lshr_b32 s1, s2, 8 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 10 +; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s1 +; GFX7-NEXT: v_mov_b32_e32 v4, s7 +; GFX7-NEXT: v_mov_b32_e32 v6, s2 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: ds_write_b8 v5, v4 +; GFX7-NEXT: ds_write_b8 v7, v6 +; GFX7-NEXT: ds_write_b8 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 11 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 12 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 13 +; GFX7-NEXT: s_lshr_b32 s5, s2, 16 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 14 +; GFX7-NEXT: s_lshr_b32 s6, s2, 24 +; GFX7-NEXT: v_mov_b32_e32 v0, s5 +; GFX7-NEXT: s_lshr_b32 s1, s3, 8 +; GFX7-NEXT: s_lshr_b32 s2, s3, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_lshr_b32 s5, s3, 24 +; GFX7-NEXT: s_add_u32 s0, s4, 15 +; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s3 +; GFX7-NEXT: v_mov_b32_e32 v6, s1 +; GFX7-NEXT: v_mov_b32_e32 v8, s2 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: ds_write_b8 v5, v4 +; GFX7-NEXT: ds_write_b8 v7, v6 +; GFX7-NEXT: ds_write_b8 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v0, s5 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 1 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align2(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_lshr_b32 s5, s0, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 2 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 4 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 6 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 8 +; GFX9-NEXT: v_mov_b32_e32 v2, s5 +; GFX9-NEXT: s_lshr_b32 s5, s1, 16 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 10 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 +; GFX9-NEXT: v_mov_b32_e32 v6, s5 +; GFX9-NEXT: v_mov_b32_e32 v8, s2 +; GFX9-NEXT: ds_write_b16 v1, v0 +; GFX9-NEXT: ds_write_b16 v3, v2 +; GFX9-NEXT: ds_write_b16 v5, v4 +; GFX9-NEXT: ds_write_b16 v7, v6 +; GFX9-NEXT: ds_write_b16 v9, v8 +; GFX9-NEXT: s_lshr_b32 s1, s2, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 12 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: s_lshr_b32 s1, s3, 16 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 14 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: ds_write_b16 v1, v0 +; GFX9-NEXT: ds_write_b16 v3, v2 +; GFX9-NEXT: ds_write_b16 v5, v4 +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: s_lshr_b32 s5, s0, 16 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 2 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 4 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 6 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: v_mov_b32_e32 v2, s5 +; GFX7-NEXT: s_lshr_b32 s5, s1, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 10 +; GFX7-NEXT: v_mov_b32_e32 v4, s1 +; GFX7-NEXT: v_mov_b32_e32 v6, s5 +; GFX7-NEXT: v_mov_b32_e32 v8, s2 +; GFX7-NEXT: ds_write_b16 v1, v0 +; GFX7-NEXT: ds_write_b16 v3, v2 +; GFX7-NEXT: ds_write_b16 v5, v4 +; GFX7-NEXT: ds_write_b16 v7, v6 +; GFX7-NEXT: ds_write_b16 v9, v8 +; GFX7-NEXT: s_lshr_b32 s1, s2, 16 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 12 +; GFX7-NEXT: v_mov_b32_e32 v0, s1 +; GFX7-NEXT: s_lshr_b32 s1, s3, 16 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 14 +; GFX7-NEXT: v_mov_b32_e32 v2, s3 +; GFX7-NEXT: v_mov_b32_e32 v4, s1 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: ds_write_b16 v1, v0 +; GFX7-NEXT: ds_write_b16 v3, v2 +; GFX7-NEXT: ds_write_b16 v5, v4 +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 2 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align4(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: ds_write_b128 v4, v[0:3] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 4 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 12 +; GFX7-NEXT: v_mov_b32_e32 v2, s1 +; GFX7-NEXT: v_mov_b32_e32 v4, s2 +; GFX7-NEXT: v_mov_b32_e32 v6, s3 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: ds_write_b32 v1, v0 +; GFX7-NEXT: ds_write_b32 v3, v2 +; GFX7-NEXT: ds_write_b32 v5, v4 +; GFX7-NEXT: ds_write_b32 v7, v6 +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 4 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align8(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align8: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: ds_write_b128 v4, v[0:3] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align8: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-NEXT: ds_write2_b64 v4, v[0:1], v[2:3] offset1:1 +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 8 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align16(<4 x i32> addrspace(3)* %out, <4 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: ds_write_b128 v4, v[0:3] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v4, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-NEXT: ds_write_b128 v4, v[0:3] +; GFX7-NEXT: s_endpgm + store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 16 + ret void +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/store-local.96.ll @@ -0,0 +1,332 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-NOUNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-UNALIGNED %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-NOUNALIGNED %s + +; FIXME: +; XUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s + +define amdgpu_kernel void @store_lds_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: ds_write_b96 v3, v[0:2] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v3, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: ds_write_b96 v3, v[0:2] +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align1(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 8 +; GFX9-NEXT: s_lshr_b32 s5, s0, 16 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 1 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 2 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 3 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 4 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 5 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: v_mov_b32_e32 v4, s5 +; GFX9-NEXT: v_mov_b32_e32 v6, s6 +; GFX9-NEXT: v_mov_b32_e32 v8, s1 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: ds_write_b8 v5, v4 +; GFX9-NEXT: ds_write_b8 v7, v6 +; GFX9-NEXT: ds_write_b8 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 6 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 7 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 8 +; GFX9-NEXT: s_lshr_b32 s3, s1, 8 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 9 +; GFX9-NEXT: s_lshr_b32 s5, s1, 16 +; GFX9-NEXT: s_lshr_b32 s6, s1, 24 +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: s_lshr_b32 s1, s2, 8 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_lshr_b32 s3, s2, 16 +; GFX9-NEXT: s_add_u32 s0, s4, 10 +; GFX9-NEXT: v_mov_b32_e32 v2, s5 +; GFX9-NEXT: v_mov_b32_e32 v4, s6 +; GFX9-NEXT: s_lshr_b32 s5, s2, 24 +; GFX9-NEXT: v_mov_b32_e32 v6, s2 +; GFX9-NEXT: v_mov_b32_e32 v8, s1 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: ds_write_b8 v5, v4 +; GFX9-NEXT: ds_write_b8 v7, v6 +; GFX9-NEXT: ds_write_b8 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 11 +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v2, s5 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: ds_write_b8 v1, v0 +; GFX9-NEXT: ds_write_b8 v3, v2 +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align1: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: s_lshr_b32 s3, s0, 8 +; GFX7-NEXT: s_lshr_b32 s5, s0, 16 +; GFX7-NEXT: s_lshr_b32 s6, s0, 24 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 1 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 2 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 3 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 4 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 5 +; GFX7-NEXT: v_mov_b32_e32 v2, s3 +; GFX7-NEXT: v_mov_b32_e32 v4, s5 +; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s1 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: ds_write_b8 v5, v4 +; GFX7-NEXT: ds_write_b8 v7, v6 +; GFX7-NEXT: ds_write_b8 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 6 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 7 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: s_lshr_b32 s3, s1, 8 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 9 +; GFX7-NEXT: s_lshr_b32 s5, s1, 16 +; GFX7-NEXT: s_lshr_b32 s6, s1, 24 +; GFX7-NEXT: v_mov_b32_e32 v0, s3 +; GFX7-NEXT: s_lshr_b32 s1, s2, 8 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_lshr_b32 s3, s2, 16 +; GFX7-NEXT: s_add_u32 s0, s4, 10 +; GFX7-NEXT: v_mov_b32_e32 v2, s5 +; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: s_lshr_b32 s5, s2, 24 +; GFX7-NEXT: v_mov_b32_e32 v6, s2 +; GFX7-NEXT: v_mov_b32_e32 v8, s1 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: ds_write_b8 v5, v4 +; GFX7-NEXT: ds_write_b8 v7, v6 +; GFX7-NEXT: ds_write_b8 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 11 +; GFX7-NEXT: v_mov_b32_e32 v0, s3 +; GFX7-NEXT: v_mov_b32_e32 v2, s5 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: ds_write_b8 v1, v0 +; GFX7-NEXT: ds_write_b8 v3, v2 +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 1 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align2(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 2 +; GFX9-NEXT: v_mov_b32_e32 v3, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 4 +; GFX9-NEXT: v_mov_b32_e32 v5, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 6 +; GFX9-NEXT: v_mov_b32_e32 v7, s0 +; GFX9-NEXT: s_add_u32 s0, s4, 8 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: s_lshr_b32 s3, s1, 16 +; GFX9-NEXT: v_mov_b32_e32 v4, s1 +; GFX9-NEXT: v_mov_b32_e32 v9, s0 +; GFX9-NEXT: s_lshr_b32 s1, s2, 16 +; GFX9-NEXT: s_add_u32 s0, s4, 10 +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_mov_b32_e32 v8, s2 +; GFX9-NEXT: ds_write_b16 v1, v0 +; GFX9-NEXT: ds_write_b16 v3, v2 +; GFX9-NEXT: ds_write_b16 v5, v4 +; GFX9-NEXT: ds_write_b16 v7, v6 +; GFX9-NEXT: ds_write_b16 v9, v8 +; GFX9-NEXT: v_mov_b32_e32 v0, s1 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: ds_write_b16 v1, v0 +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align2: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v1, s4 +; GFX7-NEXT: s_lshr_b32 s3, s0, 16 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 2 +; GFX7-NEXT: v_mov_b32_e32 v3, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 4 +; GFX7-NEXT: v_mov_b32_e32 v5, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 6 +; GFX7-NEXT: v_mov_b32_e32 v7, s0 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: v_mov_b32_e32 v2, s3 +; GFX7-NEXT: s_lshr_b32 s3, s1, 16 +; GFX7-NEXT: v_mov_b32_e32 v4, s1 +; GFX7-NEXT: v_mov_b32_e32 v9, s0 +; GFX7-NEXT: s_lshr_b32 s1, s2, 16 +; GFX7-NEXT: s_add_u32 s0, s4, 10 +; GFX7-NEXT: v_mov_b32_e32 v6, s3 +; GFX7-NEXT: v_mov_b32_e32 v8, s2 +; GFX7-NEXT: ds_write_b16 v1, v0 +; GFX7-NEXT: ds_write_b16 v3, v2 +; GFX7-NEXT: ds_write_b16 v5, v4 +; GFX7-NEXT: ds_write_b16 v7, v6 +; GFX7-NEXT: ds_write_b16 v9, v8 +; GFX7-NEXT: v_mov_b32_e32 v0, s1 +; GFX7-NEXT: v_mov_b32_e32 v1, s0 +; GFX7-NEXT: ds_write_b16 v1, v0 +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 2 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align4(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: ds_write_b96 v3, v[0:2] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align4: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: v_mov_b32_e32 v3, s2 +; GFX7-NEXT: v_mov_b32_e32 v4, s0 +; GFX7-NEXT: ds_write2_b32 v2, v0, v1 offset1:1 +; GFX7-NEXT: ds_write_b32 v4, v3 +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 4 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align8(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align8: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: ds_write_b96 v3, v[0:2] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align8: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v2, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: s_add_u32 s0, s4, 8 +; GFX7-NEXT: v_mov_b32_e32 v3, s2 +; GFX7-NEXT: v_mov_b32_e32 v4, s0 +; GFX7-NEXT: ds_write_b64 v2, v[0:1] +; GFX7-NEXT: ds_write_b32 v4, v3 +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 8 + ret void +} + +define amdgpu_kernel void @store_lds_v3i32_align16(<3 x i32> addrspace(3)* %out, <3 x i32> %x) { +; GFX9-LABEL: store_lds_v3i32_align16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: ds_write_b96 v3, v[0:2] +; GFX9-NEXT: s_endpgm +; +; GFX7-LABEL: store_lds_v3i32_align16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_load_dword s4, s[0:1], 0x9 +; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd +; GFX7-NEXT: s_mov_b32 m0, -1 +; GFX7-NEXT: s_waitcnt lgkmcnt(0) +; GFX7-NEXT: v_mov_b32_e32 v3, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s0 +; GFX7-NEXT: v_mov_b32_e32 v1, s1 +; GFX7-NEXT: v_mov_b32_e32 v2, s2 +; GFX7-NEXT: ds_write_b96 v3, v[0:2] +; GFX7-NEXT: s_endpgm + store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 16 + ret void +}