Index: llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -52,13 +52,15 @@ SyncScope::ID SSID = SyncScope::System; AtomicOrdering Ordering = AtomicOrdering::NotAtomic; AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; + bool IsNonTemporal = false; SIMemOpInfo(SyncScope::ID SSID, AtomicOrdering Ordering) : SSID(SSID), Ordering(Ordering) {} SIMemOpInfo(SyncScope::ID SSID, AtomicOrdering Ordering, - AtomicOrdering FailureOrdering) - : SSID(SSID), Ordering(Ordering), FailureOrdering(FailureOrdering) {} + AtomicOrdering FailureOrdering, bool IsNonTemporal = false) + : SSID(SSID), Ordering(Ordering), FailureOrdering(FailureOrdering), + IsNonTemporal(IsNonTemporal) {} /// \returns Info constructed from \p MI, which has at least machine memory /// operand. @@ -81,6 +83,11 @@ AtomicOrdering getFailureOrdering() const { return FailureOrdering; } + /// \returns True if memory access of the machine instruction used to + /// create this SIMemOpInfo is non-temporal, false otherwise. + bool isNonTemporal() const { + return IsNonTemporal; + } /// \returns True if ordering constraint of the machine instruction used to /// create this SIMemOpInfo is unordered or higher, false otherwise. @@ -130,6 +137,34 @@ /// \brief List of atomic pseudo instructions. std::list AtomicPseudoMIs; + /// \brief Sets named bit (BitName) to "true" if present in \p MI. Returns + /// true if \p MI is modified, false otherwise. + template + bool enableNamedBit(const MachineBasicBlock::iterator &MI) const { + int BitIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), BitName); + if (BitIdx == -1) + return false; + + MachineOperand &Bit = MI->getOperand(BitIdx); + if (Bit.getImm() != 0) + return false; + + Bit.setImm(1); + return true; + } + + /// \brief Sets GLC bit to "true" if present in \p MI. Returns true if \p MI + /// is modified, false otherwise. + bool enableGLCBit(const MachineBasicBlock::iterator &MI) const { + return enableNamedBit(MI); + } + + /// \brief Sets SLC bit to "true" if present in \p MI. Returns true if \p MI + /// is modified, false otherwise. + bool enableSLCBit(const MachineBasicBlock::iterator &MI) const { + return enableNamedBit(MI); + } + /// \brief Inserts "buffer_wbinvl1_vol" instruction \p Before or after \p MI. /// Always returns true. bool insertBufferWbinvl1Vol(MachineBasicBlock::iterator &MI, @@ -139,10 +174,6 @@ bool insertWaitcntVmcnt0(MachineBasicBlock::iterator &MI, bool Before = true) const; - /// \brief Sets GLC bit if present in \p MI. Returns true if \p MI is - /// modified, false otherwise. - bool setGLC(const MachineBasicBlock::iterator &MI) const; - /// \brief Removes all processed atomic pseudo instructions from the current /// function. Returns true if current function is modified, false otherwise. bool removeAtomicPseudoMIs(); @@ -199,6 +230,7 @@ SyncScope::ID SSID = SyncScope::SingleThread; AtomicOrdering Ordering = AtomicOrdering::NotAtomic; AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; + bool IsNonTemporal = true; // Validator should check whether or not MMOs cover the entire set of // locations accessed by the memory instruction. @@ -217,9 +249,12 @@ FailureOrdering = isStrongerThan(FailureOrdering, MMO->getFailureOrdering()) ? FailureOrdering : MMO->getFailureOrdering(); + + if (!(MMO->getFlags() & MachineMemOperand::MONonTemporal)) + IsNonTemporal = false; } - return SIMemOpInfo(SSID, Ordering, FailureOrdering); + return SIMemOpInfo(SSID, Ordering, FailureOrdering, IsNonTemporal); } /* static */ @@ -343,19 +378,6 @@ return true; } -bool SIMemoryLegalizer::setGLC(const MachineBasicBlock::iterator &MI) const { - int GLCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::glc); - if (GLCIdx == -1) - return false; - - MachineOperand &GLC = MI->getOperand(GLCIdx); - if (GLC.getImm() == 1) - return false; - - GLC.setImm(1); - return true; -} - bool SIMemoryLegalizer::removeAtomicPseudoMIs() { if (AtomicPseudoMIs.empty()) return false; @@ -378,7 +400,7 @@ MOI.getSSID() == MMI->getAgentSSID()) { if (MOI.getOrdering() == AtomicOrdering::Acquire || MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) - Changed |= setGLC(MI); + Changed |= enableGLCBit(MI); if (MOI.getOrdering() == AtomicOrdering::SequentiallyConsistent) Changed |= insertWaitcntVmcnt0(MI); @@ -401,6 +423,13 @@ llvm_unreachable("Unsupported synchronization scope"); } + // Atomic instructions do not have the nontemporal attribute. + if (MOI.isNonTemporal()) { + Changed |= enableGLCBit(MI); + Changed |= enableSLCBit(MI); + return Changed; + } + return Changed; } @@ -429,6 +458,13 @@ llvm_unreachable("Unsupported synchronization scope"); } + // Atomic instructions do not have the nontemporal attribute. + if (MOI.isNonTemporal()) { + Changed |= enableGLCBit(MI); + Changed |= enableSLCBit(MI); + return Changed; + } + return Changed; } @@ -499,7 +535,7 @@ if (MOI.getSSID() == SyncScope::SingleThread || MOI.getSSID() == MMI->getWorkgroupSSID() || MOI.getSSID() == MMI->getWavefrontSSID()) { - Changed |= setGLC(MI); + Changed |= enableGLCBit(MI); return Changed; } @@ -536,7 +572,7 @@ if (MOI.getSSID() == SyncScope::SingleThread || MOI.getSSID() == MMI->getWorkgroupSSID() || MOI.getSSID() == MMI->getWavefrontSSID()) { - Changed |= setGLC(MI); + Changed |= enableGLCBit(MI); return Changed; } Index: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-load.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-load.ll +++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-load.ll @@ -0,0 +1,97 @@ +; RUN: llc -mtriple=amdgcn-amd- -mcpu=gfx800 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX8 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX8 %s +; RUN: llc -mtriple=amdgcn-amd- -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s + +declare i32 @llvm.amdgcn.workitem.id.x() + +; GCN-LABEL: {{^}}nontemporal_load_private_0 +; GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}} +define amdgpu_kernel void @nontemporal_load_private_0( + i32* %in, i32 addrspace(4)* %out) { +entry: + %val = load i32, i32* %in, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_private_1 +; GCN: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}} +define amdgpu_kernel void @nontemporal_load_private_1( + i32* %in, i32 addrspace(4)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val.gep = getelementptr inbounds i32, i32* %in, i32 %tid + %val = load i32, i32* %val.gep, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_global_0 +; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0x0{{$}} +define amdgpu_kernel void @nontemporal_load_global_0( + i32 addrspace(1)* %in, i32 addrspace(4)* %out) { +entry: + %val = load i32, i32 addrspace(1)* %in, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_global_1 +; GFX8: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}} +; GFX9: global_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], off glc slc{{$}} +define amdgpu_kernel void @nontemporal_load_global_1( + i32 addrspace(1)* %in, i32 addrspace(4)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val.gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid + %val = load i32, i32 addrspace(1)* %val.gep, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_local_0 +; GCN: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}} +define amdgpu_kernel void @nontemporal_load_local_0( + i32 addrspace(3)* %in, i32 addrspace(4)* %out) { +entry: + %val = load i32, i32 addrspace(3)* %in, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_local_1 +; GCN: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}} +define amdgpu_kernel void @nontemporal_load_local_1( + i32 addrspace(3)* %in, i32 addrspace(4)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val.gep = getelementptr inbounds i32, i32 addrspace(3)* %in, i32 %tid + %val = load i32, i32 addrspace(3)* %val.gep, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_flat_0 +; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}} +define amdgpu_kernel void @nontemporal_load_flat_0( + i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +entry: + %val = load i32, i32 addrspace(4)* %in, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +; GCN-LABEL: {{^}}nontemporal_load_flat_1 +; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}] glc slc{{$}} +define amdgpu_kernel void @nontemporal_load_flat_1( + i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val.gep = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tid + %val = load i32, i32 addrspace(4)* %val.gep, align 4, !nontemporal !0 + store i32 %val, i32 addrspace(4)* %out + ret void +} + +!0 = !{i32 1} Index: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-store.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-store.ll +++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-nontemporal-store.ll @@ -0,0 +1,97 @@ +; RUN: llc -mtriple=amdgcn-amd- -mcpu=gfx800 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX8 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx800 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX8 %s +; RUN: llc -mtriple=amdgcn-amd- -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s + +declare i32 @llvm.amdgcn.workitem.id.x() + +; GCN-LABEL: {{^}}nontemporal_store_private_0 +; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_private_0( + i32 addrspace(4)* %in, i32* %out) { +entry: + %val = load i32, i32 addrspace(4)* %in, align 4 + store i32 %val, i32* %out, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_private_1 +; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], s{{[0-9]+}} offen glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_private_1( + i32 addrspace(4)* %in, i32* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, i32 addrspace(4)* %in, align 4 + %out.gep = getelementptr inbounds i32, i32* %out, i32 %tid + store i32 %val, i32* %out.gep, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_global_0 +; GCN: buffer_store_dword v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_global_0( + i32 addrspace(4)* %in, i32 addrspace(1)* %out) { +entry: + %val = load i32, i32 addrspace(4)* %in, align 4 + store i32 %val, i32 addrspace(1)* %out, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_global_1 +; GFX8: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc slc{{$}} +; GFX9: global_store_dword v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}}, off glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_global_1( + i32 addrspace(4)* %in, i32 addrspace(1)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, i32 addrspace(4)* %in, align 4 + %out.gep = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %tid + store i32 %val, i32 addrspace(1)* %out.gep, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_local_0 +; GCN: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}} +define amdgpu_kernel void @nontemporal_store_local_0( + i32 addrspace(4)* %in, i32 addrspace(3)* %out) { +entry: + %val = load i32, i32 addrspace(4)* %in, align 4 + store i32 %val, i32 addrspace(3)* %out, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_local_1 +; GCN: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}}{{$}} +define amdgpu_kernel void @nontemporal_store_local_1( + i32 addrspace(4)* %in, i32 addrspace(3)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, i32 addrspace(4)* %in, align 4 + %out.gep = getelementptr inbounds i32, i32 addrspace(3)* %out, i32 %tid + store i32 %val, i32 addrspace(3)* %out.gep, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_flat_0 +; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_flat_0( + i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +entry: + %val = load i32, i32 addrspace(4)* %in, align 4 + store i32 %val, i32 addrspace(4)* %out, !nontemporal !0 + ret void +} + +; GCN-LABEL: {{^}}nontemporal_store_flat_1 +; GCN: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]+}} glc slc{{$}} +define amdgpu_kernel void @nontemporal_store_flat_1( + i32 addrspace(4)* %in, i32 addrspace(4)* %out) { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %val = load i32, i32 addrspace(4)* %in, align 4 + %out.gep = getelementptr inbounds i32, i32 addrspace(4)* %out, i32 %tid + store i32 %val, i32 addrspace(4)* %out.gep, !nontemporal !0 + ret void +} + +!0 = !{i32 1} Index: llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir +++ llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir @@ -0,0 +1,161 @@ +# RUN: llc -march=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer %s -o - | FileCheck %s + +--- | + ; ModuleID = 'memory-legalizer-multiple-mem-operands.ll' + source_filename = "memory-legalizer-multiple-mem-operands.ll" + target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + + define amdgpu_kernel void @multiple_mem_operands(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) #0 { + entry: + %scratch0 = alloca [8192 x i32] + %scratch1 = alloca [8192 x i32] + %scratchptr01 = bitcast [8192 x i32]* %scratch0 to i32* + store i32 1, i32* %scratchptr01 + %scratchptr12 = bitcast [8192 x i32]* %scratch1 to i32* + store i32 2, i32* %scratchptr12 + %cmp = icmp eq i32 %cond, 0 + br i1 %cmp, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + if: ; preds = %entry + %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset, !amdgpu.uniform !0 + %if_value = load i32, i32* %if_ptr, align 4, !nontemporal !1 + br label %done, !structurizecfg.uniform !0 + + else: ; preds = %entry + %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset, !amdgpu.uniform !0 + %else_value = load i32, i32* %else_ptr, align 4, !nontemporal !1 + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %else, %if + %value = phi i32 [ %if_value, %if ], [ %else_value, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.if(i1) #1 + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.else(i64) #1 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.break(i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.if.break(i1, i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.else.break(i64, i64) #2 + + ; Function Attrs: convergent nounwind + declare i1 @llvm.amdgcn.loop(i64) #1 + + ; Function Attrs: convergent nounwind + declare void @llvm.amdgcn.end.cf(i64) #1 + + attributes #0 = { "target-cpu"="gfx803" } + attributes #1 = { convergent nounwind } + attributes #2 = { convergent nounwind readnone } + + !0 = !{} + !1 = !{i32 1} + +... +--- + +# CHECK-LABEL: name: multiple_mem_operands + +# CHECK-LABEL: bb.3.done: +# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 1, 1, 0 + +name: multiple_mem_operands +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '' } + - { reg: '%sgpr3', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 65540 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + savePoint: '' + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 4, stack-id: 0, + isImmutable: false, isAliased: false, callee-saved-register: '' } +stack: + - { id: 0, name: scratch0, type: default, offset: 4, size: 32768, alignment: 4, + stack-id: 0, callee-saved-register: '', local-offset: 0, di-variable: '', + di-expression: '', di-location: '' } + - { id: 1, name: scratch1, type: default, offset: 32772, size: 32768, + alignment: 4, stack-id: 0, callee-saved-register: '', local-offset: 32768, + di-variable: '', di-expression: '', di-location: '' } +constants: +body: | + bb.0.entry: + successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) + liveins: %sgpr0_sgpr1, %sgpr3 + + %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + %sgpr8 = S_MOV_B32 $SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr9 = S_MOV_B32 $SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + S_WAITCNT 127 + S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 2, implicit %exec + %vgpr1 = V_MOV_B32_e32 32772, implicit %exec + BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + + bb.2.else: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + S_BRANCH %bb.3.done + + bb.1.if: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 4, implicit %exec + + bb.3.done: + liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + + S_WAITCNT 127 + %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc + %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec + %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) + %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 + %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + S_WAITCNT 3952 + FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + S_ENDPGM + +... Index: llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir +++ llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir @@ -0,0 +1,161 @@ +# RUN: llc -march=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer %s -o - | FileCheck %s + +--- | + ; ModuleID = 'memory-legalizer-multiple-mem-operands.ll' + source_filename = "memory-legalizer-multiple-mem-operands.ll" + target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + + define amdgpu_kernel void @multiple_mem_operands(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) #0 { + entry: + %scratch0 = alloca [8192 x i32] + %scratch1 = alloca [8192 x i32] + %scratchptr01 = bitcast [8192 x i32]* %scratch0 to i32* + store i32 1, i32* %scratchptr01 + %scratchptr12 = bitcast [8192 x i32]* %scratch1 to i32* + store i32 2, i32* %scratchptr12 + %cmp = icmp eq i32 %cond, 0 + br i1 %cmp, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 + + if: ; preds = %entry + %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset, !amdgpu.uniform !0 + %if_value = load i32, i32* %if_ptr, align 4, !nontemporal !1 + br label %done, !structurizecfg.uniform !0 + + else: ; preds = %entry + %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset, !amdgpu.uniform !0 + %else_value = load i32, i32* %else_ptr, align 4 + br label %done, !structurizecfg.uniform !0 + + done: ; preds = %else, %if + %value = phi i32 [ %if_value, %if ], [ %else_value, %else ] + store i32 %value, i32 addrspace(1)* %out + ret void + } + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.if(i1) #1 + + ; Function Attrs: convergent nounwind + declare { i1, i64 } @llvm.amdgcn.else(i64) #1 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.break(i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.if.break(i1, i64) #2 + + ; Function Attrs: convergent nounwind readnone + declare i64 @llvm.amdgcn.else.break(i64, i64) #2 + + ; Function Attrs: convergent nounwind + declare i1 @llvm.amdgcn.loop(i64) #1 + + ; Function Attrs: convergent nounwind + declare void @llvm.amdgcn.end.cf(i64) #1 + + attributes #0 = { "target-cpu"="gfx803" } + attributes #1 = { convergent nounwind } + attributes #2 = { convergent nounwind readnone } + + !0 = !{} + !1 = !{i32 1} + +... +--- + +# CHECK-LABEL: name: multiple_mem_operands + +# CHECK-LABEL: bb.3.done: +# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0 + +name: multiple_mem_operands +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +registers: +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '' } + - { reg: '%sgpr3', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 65540 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + savePoint: '' + restorePoint: '' +fixedStack: + - { id: 0, type: default, offset: 0, size: 4, alignment: 4, stack-id: 0, + isImmutable: false, isAliased: false, callee-saved-register: '' } +stack: + - { id: 0, name: scratch0, type: default, offset: 4, size: 32768, alignment: 4, + stack-id: 0, callee-saved-register: '', local-offset: 0, di-variable: '', + di-expression: '', di-location: '' } + - { id: 1, name: scratch1, type: default, offset: 32772, size: 32768, + alignment: 4, stack-id: 0, callee-saved-register: '', local-offset: 32768, + di-variable: '', di-expression: '', di-location: '' } +constants: +body: | + bb.0.entry: + successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000) + liveins: %sgpr0_sgpr1, %sgpr3 + + %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + %sgpr8 = S_MOV_B32 $SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %sgpr9 = S_MOV_B32 $SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11 + %vgpr0 = V_MOV_B32_e32 1, implicit %exec + BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01) + S_WAITCNT 127 + S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 2, implicit %exec + %vgpr1 = V_MOV_B32_e32 32772, implicit %exec + BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12) + S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc + + bb.2.else: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 32772, implicit %exec + S_BRANCH %bb.3.done + + bb.1.if: + successors: %bb.3.done(0x80000000) + liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11 + + %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`) + S_WAITCNT 3855 + %vgpr0 = V_MOV_B32_e32 4, implicit %exec + + bb.3.done: + liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0 + + S_WAITCNT 127 + %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc + %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec + %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr) + %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5 + %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec + S_WAITCNT 3952 + FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out) + S_ENDPGM + +... Index: llvm/trunk/test/CodeGen/MIR/AMDGPU/syncscopes.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/AMDGPU/syncscopes.mir +++ llvm/trunk/test/CodeGen/MIR/AMDGPU/syncscopes.mir @@ -8,9 +8,9 @@ define void @syncscopes(i32 %agent, i32 addrspace(4)* %agent_out, i32 %workgroup, i32 addrspace(4)* %workgroup_out, i32 %wavefront, i32 addrspace(4)* %wavefront_out) #0 { entry: - store atomic i32 %agent, i32 addrspace(4)* %agent_out syncscope("agent") seq_cst, align 4 - store atomic i32 %workgroup, i32 addrspace(4)* %workgroup_out syncscope("workgroup") seq_cst, align 4 - store atomic i32 %wavefront, i32 addrspace(4)* %wavefront_out syncscope("wavefront") seq_cst, align 4 + store atomic i32 %agent, i32 addrspace(4)* %agent_out syncscope("agent") seq_cst, align 4, !nontemporal !0 + store atomic i32 %workgroup, i32 addrspace(4)* %workgroup_out syncscope("workgroup") seq_cst, align 4, !nontemporal !0 + store atomic i32 %wavefront, i32 addrspace(4)* %wavefront_out syncscope("wavefront") seq_cst, align 4, !nontemporal !0 ret void } @@ -39,10 +39,12 @@ attributes #1 = { convergent nounwind } attributes #2 = { convergent nounwind readnone } + !0 = !{i32 1} + # GCN-LABEL: name: syncscopes -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) ... --- name: syncscopes @@ -82,17 +84,17 @@ %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 40, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %vgpr1 = V_MOV_B32_e32 killed %sgpr1, implicit %exec, implicit killed %sgpr0_sgpr1, implicit %sgpr0_sgpr1, implicit %exec %vgpr2 = V_MOV_B32_e32 killed %sgpr6, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out) + FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out) S_WAITCNT 112 %vgpr0 = V_MOV_B32_e32 %sgpr2, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr2_sgpr3 %vgpr1 = V_MOV_B32_e32 killed %sgpr3, implicit %exec, implicit killed %sgpr2_sgpr3, implicit %sgpr2_sgpr3, implicit %exec %vgpr2 = V_MOV_B32_e32 killed %sgpr7, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) + FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) S_WAITCNT 112 %vgpr0 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr0_vgpr1, implicit %sgpr4_sgpr5 %vgpr1 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit killed %sgpr4_sgpr5, implicit %sgpr4_sgpr5, implicit %exec %vgpr2 = V_MOV_B32_e32 killed %sgpr8, implicit %exec, implicit %exec - FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) + FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) S_ENDPGM ...