Index: llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -417,7 +417,7 @@ return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset); const SIInstrInfo *TII = ST.getInstrInfo(); - return TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, true); + return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, true); } Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, @@ -496,7 +496,6 @@ MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); int64_t NewOffset = OffsetOp->getImm() + Offset; -#ifndef NDEBUG assert(FIOp && FIOp->isFI() && "frame index must be address operand"); assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI)); @@ -508,6 +507,7 @@ return; } +#ifndef NDEBUG MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset); assert(SOffset->isImm() && SOffset->getImm() == 0); #endif @@ -522,7 +522,7 @@ bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const { - if (!SIInstrInfo::isMUBUF(*MI) && !!SIInstrInfo::isFLATScratch(*MI)) + if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) return false; int64_t NewOffset = Offset + getScratchInstrOffset(MI); Index: llvm/test/CodeGen/AMDGPU/flat-scratch.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -1185,7 +1185,7 @@ ; GFX9-NEXT: s_add_u32 flat_scratch_lo, s0, s3 ; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s1, 0 ; GFX9-NEXT: s_mov_b32 vcc_hi, 0 -; GFX9-NEXT: scratch_load_dword v0, off, vcc_hi offset:4 glc +; GFX9-NEXT: scratch_load_dword v0, off, vcc_hi offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_mov_b32 s0, 0 ; GFX9-NEXT: s_mov_b32 s1, s0 @@ -1211,7 +1211,7 @@ ; GFX10-NEXT: s_addc_u32 s1, s1, 0 ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0 ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1 -; GFX10-NEXT: scratch_load_dword v0, off, off offset:4 glc dlc +; GFX10-NEXT: scratch_load_dword v0, off, off offset:16 glc dlc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: s_mov_b32 s0, 0 ; GFX10-NEXT: s_movk_i32 vcc_lo, 0x4010 @@ -1242,7 +1242,7 @@ ; GFX9-PAL-NEXT: s_and_b32 s3, s3, 0xffff ; GFX9-PAL-NEXT: s_add_u32 flat_scratch_lo, s2, s1 ; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s3, 0 -; GFX9-PAL-NEXT: scratch_load_dword v0, off, vcc_hi offset:4 glc +; GFX9-PAL-NEXT: scratch_load_dword v0, off, vcc_hi offset:16 glc ; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) ; GFX9-PAL-NEXT: s_mov_b32 s1, s0 ; GFX9-PAL-NEXT: s_mov_b32 s2, s0 @@ -1272,7 +1272,7 @@ ; GFX10-PAL-NEXT: s_addc_u32 s3, s3, 0 ; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2 ; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3 -; GFX10-PAL-NEXT: scratch_load_dword v0, off, off offset:4 glc dlc +; GFX10-PAL-NEXT: scratch_load_dword v0, off, off offset:16 glc dlc ; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) ; GFX10-PAL-NEXT: s_mov_b32 s0, 0 ; GFX10-PAL-NEXT: s_movk_i32 vcc_lo, 0x4010 Index: llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll +++ llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll @@ -200,6 +200,138 @@ ret void } +define amdgpu_kernel void @local_stack_offset_uses_sp_flat(<3 x i64> addrspace(1)* %out) { +; MUBUF-LABEL: local_stack_offset_uses_sp_flat: +; MUBUF: ; %bb.0: ; %entry +; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0 +; MUBUF-NEXT: s_add_u32 flat_scratch_lo, s6, s9 +; MUBUF-NEXT: s_addc_u32 flat_scratch_hi, s7, 0 +; MUBUF-NEXT: s_add_u32 s0, s0, s9 +; MUBUF-NEXT: s_addc_u32 s1, s1, 0 +; MUBUF-NEXT: v_mov_b32_e32 v0, 0x4000 +; MUBUF-NEXT: v_mov_b32_e32 v1, 0 +; MUBUF-NEXT: v_mov_b32_e32 v2, 0x2000 +; MUBUF-NEXT: s_mov_b32 s6, 0 +; MUBUF-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: BB2_1: ; %loadstoreloop +; MUBUF-NEXT: ; =>This Inner Loop Header: Depth=1 +; MUBUF-NEXT: v_add_u32_e32 v2, s6, v0 +; MUBUF-NEXT: s_add_i32 s6, s6, 1 +; MUBUF-NEXT: s_cmpk_lt_u32 s6, 0x2120 +; MUBUF-NEXT: buffer_store_byte v1, v2, s[0:3], 0 offen +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_cbranch_scc1 BB2_1 +; MUBUF-NEXT: ; %bb.2: ; %split +; MUBUF-NEXT: v_mov_b32_e32 v0, 0x4000 +; MUBUF-NEXT: v_or_b32_e32 v2, 0x12d4, v0 +; MUBUF-NEXT: buffer_load_dword v5, v2, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_or_b32_e32 v2, 0x12d0, v0 +; MUBUF-NEXT: buffer_load_dword v4, v2, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_or_b32_e32 v1, 0x12c0, v0 +; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c4, v0 +; MUBUF-NEXT: buffer_load_dword v6, v2, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v0 +; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c8, v0 +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: buffer_load_dword v7, v13, s[0:3], 0 offen glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v8, v13, s[0:3], 0 offen offset:4 glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v2, v13, s[0:3], 0 offen offset:8 glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v9, v13, s[0:3], 0 offen offset:12 glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v10, v13, s[0:3], 0 offen offset:16 glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v13, 0x4000 +; MUBUF-NEXT: buffer_load_dword v11, v13, s[0:3], 0 offen offset:20 glc +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: v_mov_b32_e32 v12, 0 +; MUBUF-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2 +; MUBUF-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v9, vcc +; MUBUF-NEXT: v_add_co_u32_e32 v0, vcc, v1, v7 +; MUBUF-NEXT: v_addc_co_u32_e32 v1, vcc, v6, v8, vcc +; MUBUF-NEXT: v_add_co_u32_e32 v4, vcc, v4, v10 +; MUBUF-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v11, vcc +; MUBUF-NEXT: s_waitcnt lgkmcnt(0) +; MUBUF-NEXT: global_store_dwordx2 v12, v[4:5], s[4:5] offset:16 +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: global_store_dwordx4 v12, v[0:3], s[4:5] +; MUBUF-NEXT: s_waitcnt vmcnt(0) +; MUBUF-NEXT: s_endpgm +; +; FLATSCR-LABEL: local_stack_offset_uses_sp_flat: +; FLATSCR: ; %bb.0: ; %entry +; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0 +; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5 +; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0 +; FLATSCR-NEXT: s_add_u32 s2, 16, 0x4000 +; FLATSCR-NEXT: v_mov_b32_e32 v0, 0 +; FLATSCR-NEXT: s_movk_i32 vcc_hi, 0x2000 +; FLATSCR-NEXT: s_mov_b32 s3, 0 +; FLATSCR-NEXT: scratch_store_dword off, v0, vcc_hi +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: BB2_1: ; %loadstoreloop +; FLATSCR-NEXT: ; =>This Inner Loop Header: Depth=1 +; FLATSCR-NEXT: s_add_u32 s4, 0x4000, s3 +; FLATSCR-NEXT: s_add_i32 s3, s3, 1 +; FLATSCR-NEXT: s_cmpk_lt_u32 s3, 0x2120 +; FLATSCR-NEXT: scratch_store_byte off, v0, s4 +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: s_cbranch_scc1 BB2_1 +; FLATSCR-NEXT: ; %bb.2: ; %split +; FLATSCR-NEXT: s_movk_i32 s3, 0x1000 +; FLATSCR-NEXT: s_add_u32 s3, 0x4000, s3 +; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s3 offset:720 glc +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s3 offset:704 glc +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: scratch_load_dwordx2 v[10:11], off, s2 glc +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: scratch_load_dwordx4 v[4:7], off, s2 offset:-16 glc +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: v_mov_b32_e32 v12, 0 +; FLATSCR-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6 +; FLATSCR-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc +; FLATSCR-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4 +; FLATSCR-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc +; FLATSCR-NEXT: v_add_co_u32_e32 v4, vcc, v8, v10 +; FLATSCR-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v11, vcc +; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) +; FLATSCR-NEXT: global_store_dwordx2 v12, v[4:5], s[0:1] offset:16 +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: global_store_dwordx4 v12, v[0:3], s[0:1] +; FLATSCR-NEXT: s_waitcnt vmcnt(0) +; FLATSCR-NEXT: s_endpgm +entry: + %pin.low = alloca i32, align 1024, addrspace(5) + %local.area = alloca [160 x <3 x i64>], align 8192, addrspace(5) + store volatile i32 0, i32 addrspace(5)* %pin.low + %local.area.cast = bitcast [160 x <3 x i64>] addrspace(5)* %local.area to i8 addrspace(5)* + call void @llvm.memset.p5i8.i32(i8 addrspace(5)* align 4 %local.area.cast, i8 0, i32 8480, i1 true) + %gep.large.offset = getelementptr inbounds [160 x <3 x i64>], [160 x <3 x i64>] addrspace(5)* %local.area, i64 0, i64 150 + %gep.small.offset = getelementptr inbounds [160 x <3 x i64>], [160 x <3 x i64>] addrspace(5)* %local.area, i64 0, i64 0 + %load0 = load volatile <3 x i64>, <3 x i64> addrspace(5)* %gep.large.offset + %load1 = load volatile <3 x i64>, <3 x i64> addrspace(5)* %gep.small.offset + %add0 = add <3 x i64> %load0, %load1 + store volatile <3 x i64> %add0, <3 x i64> addrspace(1)* %out + ret void +} + declare void @llvm.memset.p5i8.i32(i8 addrspace(5)* nocapture writeonly, i8, i32, i1 immarg) #0 attributes #0 = { argmemonly nounwind willreturn writeonly }