Index: lib/Target/AMDGPU/AMDGPUGISel.td =================================================================== --- lib/Target/AMDGPU/AMDGPUGISel.td +++ lib/Target/AMDGPU/AMDGPUGISel.td @@ -69,8 +69,14 @@ // SelectionDAG. The GISel selector can just insert m0 initialization // directly before before selecting a glue-less load, so hide this // distinction. -def : GINodeEquiv; -def : GINodeEquiv; +def : GINodeEquiv { + let CheckMMOIsNonAtomic = 1; +} + +def : GINodeEquiv { + let CheckMMOIsNonAtomic = 1; +} + def : GINodeEquiv; def : GINodeEquiv; Index: lib/Target/AMDGPU/DSInstructions.td =================================================================== --- lib/Target/AMDGPU/DSInstructions.td +++ lib/Target/AMDGPU/DSInstructions.td @@ -714,8 +714,8 @@ defm : DSWritePat_mc ; } -defm : DSAtomicWritePat_mc ; -defm : DSAtomicWritePat_mc ; +defm : DSAtomicWritePat_mc ; +defm : DSAtomicWritePat_mc ; let OtherPredicates = [D16PreservesUnusedBits] in { def : DSWritePat ; Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -441,19 +441,32 @@ def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>; } -def load_align8_local_m0 : LoadFrag , LocalAddress { - let MinAlignment = 8; +def load_align8_local_m0 : PatFrag<(ops node:$ptr), + (load_local_m0 node:$ptr)> { + let IsLoad = 1; let IsNonExtLoad = 1; + let MinAlignment = 8; } -def load_align16_local_m0 : LoadFrag , LocalAddress { - let MinAlignment = 16; +def load_align16_local_m0 : PatFrag<(ops node:$ptr), + (load_local_m0 node:$ptr)> { + let IsLoad = 1; let IsNonExtLoad = 1; + let MinAlignment = 16; } } // End IsLoad = 1 -def atomic_load_32_local_m0 : LoadFrag, LocalAddress; -def atomic_load_64_local_m0 : LoadFrag, LocalAddress; +let AddressSpaces = LoadAddress_local.AddrSpaces in { + +def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr), + (atomic_load_32_glue node:$ptr)> { + let IsAtomic = 1; +} +def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr), + (atomic_load_64_glue node:$ptr)> { + let IsAtomic = 1; +} +} // End let AddressSpaces = LoadAddress_local.AddrSpaces def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore, @@ -464,10 +477,6 @@ [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue] >; -def atomic_store_glue : PatFrag<(ops node:$ptr, node:$val), - (AMDGPUatomic_st_glue node:$ptr, node:$val)> { -} - def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr), (AMDGPUst_glue node:$val, node:$ptr)> { let IsStore = 1; @@ -499,13 +508,8 @@ } let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in { -def store_glue_align8 : PatFrag<(ops node:$val, node:$ptr), - (store_glue node:$val, node:$ptr)>, Aligned<8>; -def store_glue_align16 : PatFrag<(ops node:$val, node:$ptr), - (store_glue node:$val, node:$ptr)>, Aligned<16>; - def store_local_m0 : PatFrag<(ops node:$val, node:$ptr), - (unindexedstore_glue node:$val, node:$ptr)> { + (store_glue node:$val, node:$ptr)> { let IsStore = 1; let IsTruncStore = 0; } @@ -516,23 +520,45 @@ let MemoryVT = i8; } - def truncstorei16_local_m0 : PatFrag<(ops node:$val, node:$ptr), (unindexedstore_glue node:$val, node:$ptr)> { let IsStore = 1; let MemoryVT = i16; } +} -// FIXME: atomic store doesn't work. -def atomic_store_local_m0 : StoreFrag, LocalAddress; -def store_align8_local_m0 : StoreFrag, LocalAddress { +def store_align16_local_m0 : PatFrag < + (ops node:$value, node:$ptr), + (store_local_m0 node:$value, node:$ptr)> { + let IsStore = 1; let IsTruncStore = 0; + let MinAlignment = 16; } -def store_align16_local_m0 : StoreFrag, LocalAddress { +def store_align8_local_m0 : PatFrag < + (ops node:$value, node:$ptr), + (store_local_m0 node:$value, node:$ptr)> { + let IsStore = 1; let IsTruncStore = 0; + let MinAlignment = 8; +} + +let AddressSpaces = StoreAddress_local.AddrSpaces in { + +def atomic_store_local_32_m0 : PatFrag < + (ops node:$value, node:$ptr), + (AMDGPUatomic_st_glue node:$value, node:$ptr)> { + let IsAtomic = 1; + let MemoryVT = i32; } +def atomic_store_local_64_m0 : PatFrag < + (ops node:$value, node:$ptr), + (AMDGPUatomic_st_glue node:$value, node:$ptr)> { + let IsAtomic = 1; + let MemoryVT = i64; } +} // End let AddressSpaces = StoreAddress_local.AddrSpaces + def si_setcc_uniform : PatFrag < (ops node:$lhs, node:$rhs, node:$cond), Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-load-local.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-load-local.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-load-local.mir @@ -215,7 +215,7 @@ --- -name: load_local_v4s32 +name: load_local_v4s32_align16 legalized: true regBankSelected: true tracksRegLiveness: true @@ -224,19 +224,53 @@ bb.0: liveins: $vgpr0 - ; GFX6-LABEL: name: load_local_v4s32 + ; GFX6-LABEL: name: load_local_v4s32_align16 + ; GFX6: liveins: $vgpr0 + ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: [[DS_READ_B128_:%[0-9]+]]:vreg_128 = DS_READ_B128 [[COPY]], 0, 0, implicit $m0, implicit $exec :: (load 16, addrspace 3) + ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[DS_READ_B128_]] + ; GFX7-LABEL: name: load_local_v4s32_align16 + ; GFX7: liveins: $vgpr0 + ; GFX7: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: [[DS_READ_B128_:%[0-9]+]]:vreg_128 = DS_READ_B128 [[COPY]], 0, 0, implicit $m0, implicit $exec :: (load 16, addrspace 3) + ; GFX7: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[DS_READ_B128_]] + ; GFX9-LABEL: name: load_local_v4s32_align16 + ; GFX9: liveins: $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX9: [[DS_READ_B128_gfx9_:%[0-9]+]]:vreg_128 = DS_READ_B128_gfx9 [[COPY]], 0, 0, implicit $exec :: (load 16, addrspace 3) + ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[DS_READ_B128_gfx9_]] + %0:vgpr(p3) = COPY $vgpr0 + %1:vgpr(<4 x s32>) = G_LOAD %0 :: (load 16, align 16, addrspace 3) + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 + +... + +--- + +name: load_local_v4s32_align_4 +legalized: true +regBankSelected: true +tracksRegLiveness: true + +body: | + bb.0: + liveins: $vgpr0 + + ; GFX6-LABEL: name: load_local_v4s32_align_4 ; GFX6: liveins: $vgpr0 ; GFX6: [[COPY:%[0-9]+]]:vgpr(p3) = COPY $vgpr0 ; GFX6: $m0 = S_MOV_B32 -1 ; GFX6: [[LOAD:%[0-9]+]]:vreg_128(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 4, addrspace 3) ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[LOAD]](<4 x s32>) - ; GFX7-LABEL: name: load_local_v4s32 + ; GFX7-LABEL: name: load_local_v4s32_align_4 ; GFX7: liveins: $vgpr0 ; GFX7: [[COPY:%[0-9]+]]:vgpr(p3) = COPY $vgpr0 ; GFX7: $m0 = S_MOV_B32 -1 ; GFX7: [[LOAD:%[0-9]+]]:vreg_128(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 4, addrspace 3) ; GFX7: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[LOAD]](<4 x s32>) - ; GFX9-LABEL: name: load_local_v4s32 + ; GFX9-LABEL: name: load_local_v4s32_align_4 ; GFX9: liveins: $vgpr0 ; GFX9: [[COPY:%[0-9]+]]:vgpr(p3) = COPY $vgpr0 ; GFX9: [[LOAD:%[0-9]+]]:vreg_128(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load 16, align 4, addrspace 3) Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-store-local.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-store-local.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-store-local.mir @@ -260,3 +260,307 @@ G_STORE %1, %0 :: (store 1, align 1, addrspace 3) ... + +--- + +name: store_local_s64_align4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_s64_align4 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: G_STORE [[COPY]](s64), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX7-LABEL: name: store_local_s64_align4 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: G_STORE [[COPY]](s64), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX9-LABEL: name: store_local_s64_align4 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX9: G_STORE [[COPY]](s64), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + %0:vgpr(s64) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 4, addrspace 3) + +... + +--- + +name: store_local_p1_align4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_p1_align4 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: G_STORE [[COPY]](p1), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX7-LABEL: name: store_local_p1_align4 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: G_STORE [[COPY]](p1), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX9-LABEL: name: store_local_p1_align4 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX9: G_STORE [[COPY]](p1), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + %0:vgpr(p1) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 4, addrspace 3) + +... + +--- + +name: store_local_v2s32_align4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_v2s32_align4 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: G_STORE [[COPY]](<2 x s32>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX7-LABEL: name: store_local_v2s32_align4 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: G_STORE [[COPY]](<2 x s32>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX9-LABEL: name: store_local_v2s32_align4 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX9: G_STORE [[COPY]](<2 x s32>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + %0:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 4, addrspace 3) + +... + +--- + +name: store_local_v4s16_align4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_v4s16_align4 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vgpr(<4 x s16>) = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: G_STORE [[COPY]](<4 x s16>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX7-LABEL: name: store_local_v4s16_align4 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vgpr(<4 x s16>) = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: G_STORE [[COPY]](<4 x s16>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + ; GFX9-LABEL: name: store_local_v4s16_align4 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vgpr(<4 x s16>) = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr(p3) = COPY $vgpr2 + ; GFX9: G_STORE [[COPY]](<4 x s16>), [[COPY1]](p3) :: (store 8, align 4, addrspace 3) + %0:vgpr(<4 x s16>) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 4, addrspace 3) + +... + +--- + +name: store_local_s64_align8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_s64_align8 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX7-LABEL: name: store_local_s64_align8 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX9-LABEL: name: store_local_s64_align8 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX9: DS_WRITE_B64_gfx9 [[COPY1]], [[COPY]], 0, 0, implicit $exec :: (store 8, addrspace 3) + %0:vgpr(s64) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 8, addrspace 3) + +... + +--- + +name: store_local_p1_align8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_p1_align8 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX7-LABEL: name: store_local_p1_align8 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX9-LABEL: name: store_local_p1_align8 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX9: DS_WRITE_B64_gfx9 [[COPY1]], [[COPY]], 0, 0, implicit $exec :: (store 8, addrspace 3) + %0:vgpr(p1) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 8, addrspace 3) + +... + +--- + +name: store_local_v2s32_align8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_v2s32_align8 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX7-LABEL: name: store_local_v2s32_align8 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX9-LABEL: name: store_local_v2s32_align8 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX9: DS_WRITE_B64_gfx9 [[COPY1]], [[COPY]], 0, 0, implicit $exec :: (store 8, addrspace 3) + %0:vgpr(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 8, addrspace 3) + +... + +--- + +name: store_local_v4s16_align8 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: + scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3 + scratchWaveOffsetReg: $sgpr4 + stackPtrOffsetReg: $sgpr32 + +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; GFX6-LABEL: name: store_local_v4s16_align8 + ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX6: $m0 = S_MOV_B32 -1 + ; GFX6: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX7-LABEL: name: store_local_v4s16_align8 + ; GFX7: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX7: $m0 = S_MOV_B32 -1 + ; GFX7: DS_WRITE_B64 [[COPY1]], [[COPY]], 0, 0, implicit $m0, implicit $exec :: (store 8, addrspace 3) + ; GFX9-LABEL: name: store_local_v4s16_align8 + ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX9: DS_WRITE_B64_gfx9 [[COPY1]], [[COPY]], 0, 0, implicit $exec :: (store 8, addrspace 3) + %0:vgpr(<4 x s16>) = COPY $vgpr0_vgpr1 + %1:vgpr(p3) = COPY $vgpr2 + G_STORE %0, %1 :: (store 8, align 8, addrspace 3) + +... Index: utils/TableGen/CodeGenDAGPatterns.cpp =================================================================== --- utils/TableGen/CodeGenDAGPatterns.cpp +++ utils/TableGen/CodeGenDAGPatterns.cpp @@ -919,6 +919,7 @@ if (isAtomic()) { if (getMemoryVT() == nullptr && !isAtomicOrderingMonotonic() && + getAddressSpaces() == nullptr && !isAtomicOrderingAcquire() && !isAtomicOrderingRelease() && !isAtomicOrderingAcquireRelease() && !isAtomicOrderingSequentiallyConsistent() &&