diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -67,7 +67,14 @@ } static bool isSSetReg(unsigned Opcode) { - return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32; + switch (Opcode) { + case AMDGPU::S_SETREG_B32: + case AMDGPU::S_SETREG_B32_mode: + case AMDGPU::S_SETREG_IMM32_B32: + case AMDGPU::S_SETREG_IMM32_B32_mode: + return true; + } + return false; } static bool isRWLane(unsigned Opcode) { diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -355,10 +355,17 @@ } // Special case for s_setreg_b32 - if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) { - MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32)); - appendFoldCandidate(FoldList, MI, OpNo, OpToFold); - return true; + if (OpToFold->isImm()) { + unsigned ImmOpc = 0; + if (Opc == AMDGPU::S_SETREG_B32) + ImmOpc = AMDGPU::S_SETREG_IMM32_B32; + else if (Opc == AMDGPU::S_SETREG_B32_mode) + ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode; + if (ImmOpc) { + MI->setDesc(TII->get(ImmOpc)); + appendFoldCandidate(FoldList, MI, OpNo, OpToFold); + return true; + } } // If we are already folding into another operand of MI, then diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4236,9 +4236,6 @@ return emitGWSMemViolTestLoop(MI, BB); case AMDGPU::S_SETREG_B32: { - if (!getSubtarget()->hasDenormModeInst()) - return BB; - // Try to optimize cases that only set the denormal mode or rounding mode. // // If the s_setreg_b32 fully sets all of the bits in the rounding mode or @@ -4248,9 +4245,6 @@ // FIXME: This could be predicates on the immediate, but tablegen doesn't // allow you to have a no side effect instruction in the output of a // sideeffecting pattern. - - // TODO: Should also emit a no side effects pseudo if only FP bits are - // touched, even if not all of them or to a variable. unsigned ID, Offset, Width; AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); if (ID != AMDGPU::Hwreg::ID_MODE) @@ -4258,45 +4252,54 @@ const unsigned WidthMask = maskTrailingOnes(Width); const unsigned SetMask = WidthMask << Offset; - unsigned SetDenormOp = 0; - unsigned SetRoundOp = 0; - - // The dedicated instructions can only set the whole denorm or round mode at - // once, not a subset of bits in either. - if (SetMask == - (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { - // If this fully sets both the round and denorm mode, emit the two - // dedicated instructions for these. - SetRoundOp = AMDGPU::S_ROUND_MODE; - SetDenormOp = AMDGPU::S_DENORM_MODE; - } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { - SetRoundOp = AMDGPU::S_ROUND_MODE; - } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { - SetDenormOp = AMDGPU::S_DENORM_MODE; - } - - if (SetRoundOp || SetDenormOp) { - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); - if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { - unsigned ImmVal = Def->getOperand(1).getImm(); - if (SetRoundOp) { - BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) - .addImm(ImmVal & 0xf); - - // If we also have the denorm mode, get just the denorm mode bits. - ImmVal >>= 4; - } - if (SetDenormOp) { - BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) - .addImm(ImmVal & 0xf); - } + if (getSubtarget()->hasDenormModeInst()) { + unsigned SetDenormOp = 0; + unsigned SetRoundOp = 0; + + // The dedicated instructions can only set the whole denorm or round mode + // at once, not a subset of bits in either. + if (SetMask == + (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { + // If this fully sets both the round and denorm mode, emit the two + // dedicated instructions for these. + SetRoundOp = AMDGPU::S_ROUND_MODE; + SetDenormOp = AMDGPU::S_DENORM_MODE; + } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { + SetRoundOp = AMDGPU::S_ROUND_MODE; + } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { + SetDenormOp = AMDGPU::S_DENORM_MODE; + } - MI.eraseFromParent(); + if (SetRoundOp || SetDenormOp) { + MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); + if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { + unsigned ImmVal = Def->getOperand(1).getImm(); + if (SetRoundOp) { + BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) + .addImm(ImmVal & 0xf); + + // If we also have the denorm mode, get just the denorm mode bits. + ImmVal >>= 4; + } + + if (SetDenormOp) { + BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) + .addImm(ImmVal & 0xf); + } + + MI.eraseFromParent(); + return BB; + } } } + // If only FP bits are touched, used the no side effects pseudo. + if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | + AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) + MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); + return BB; } default: diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3070,9 +3070,6 @@ // Target-independent instructions do not have an implicit-use of EXEC, even // when they operate on VGPRs. Treating EXEC modifications as scheduling // boundaries prevents incorrect movements of such instructions. - - // TODO: Don't treat setreg with known constant that only changes MODE as - // barrier. return MI.modifiesRegister(AMDGPU::EXEC, &RI) || MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || MI.getOpcode() == AMDGPU::S_SETREG_B32 || diff --git a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp --- a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp +++ b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp @@ -242,8 +242,10 @@ Status IPChange; for (MachineInstr &MI : MBB) { Status InstrMode = getInstructionMode(MI, TII); - if ((MI.getOpcode() == AMDGPU::S_SETREG_B32) || - (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32)) { + if (MI.getOpcode() == AMDGPU::S_SETREG_B32 || + MI.getOpcode() == AMDGPU::S_SETREG_B32_mode || + MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || + MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32_mode) { // We preserve any explicit mode register setreg instruction we encounter, // as we assume it has been inserted by a higher authority (this is // likely to be a very rare occurrence). @@ -267,7 +269,8 @@ // If this is an immediate then we know the value being set, but if it is // not an immediate then we treat the modified bits of the mode register // as unknown. - if (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32) { + if (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || + MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32_mode) { unsigned Val = TII->getNamedOperand(MI, AMDGPU::OpName::imm)->getImm(); unsigned Mode = (Val << Offset) & Mask; Status Setreg = Status(Mask, Mode); diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -813,8 +813,6 @@ "$sdst, $simm16" >; -let hasSideEffects = 1 in { - let mayLoad = 1 in { // s_getreg_b32 should use hasSideEffects = 1 for tablegen to allow // its use in the readcyclecounter selection. @@ -825,10 +823,11 @@ "$sdst, $simm16", [(set i32:$sdst, (int_amdgcn_s_getreg (i32 timm:$simm16)))]> { let SOPKZext = 1; + let hasSideEffects = 1; } -} +} // End mayLoad = 1 -let mayLoad = 0, mayStore =0 in { +let mayLoad = 0, mayStore = 0, Defs = [MODE], Uses = [MODE] in { // FIXME: Need to truncate immediate to 16-bits. def S_SETREG_B32 : SOPK_Pseudo < @@ -838,10 +837,19 @@ [(int_amdgcn_s_setreg (i32 timm:$simm16), i32:$sdst)]> { // Use custom inserter to optimize some cases to - // S_DENORM_MODE/S_ROUND_MODE. + // S_DENORM_MODE/S_ROUND_MODE/S_SETREG_B32_mode. let usesCustomInserter = 1; - let Defs = [MODE]; - let Uses = [MODE]; + let hasSideEffects = 1; +} + +// Variant of SETREG that is guaranteed to only touch FP bits in the MODE +// register, so doesn't have unmodeled side effects. +def S_SETREG_B32_mode : SOPK_Pseudo < + "s_setreg_b32", + (outs), (ins SReg_32:$sdst, hwreg:$simm16), + "$simm16, $sdst"> { + + let hasSideEffects = 0; } // FIXME: Not on SI? @@ -853,12 +861,21 @@ "$simm16, $imm"> { let Size = 8; // Unlike every other SOPK instruction. let has_sdst = 0; - let Defs = [MODE]; - let Uses = [MODE]; + let hasSideEffects = 1; } +// Variant of SETREG_IMM32 that is guaranteed to only touch FP bits in the MODE +// register, so doesn't have unmodeled side effects. +def S_SETREG_IMM32_B32_mode : SOPK_Pseudo < + "s_setreg_imm32_b32", + (outs), (ins i32imm:$imm, hwreg:$simm16), + "$simm16, $imm"> { + let Size = 8; // Unlike every other SOPK instruction. + let has_sdst = 0; + let hasSideEffects = 0; } -} // End hasSideEffects = 1 + +} // End mayLoad = 0, mayStore = 0, Defs = [MODE], Uses = [MODE] class SOPK_WAITCNT pat=[]> : SOPK_Pseudo< diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.setreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.setreg.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.setreg.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.s.setreg.ll @@ -891,9 +891,9 @@ ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_readfirstlane_b32 s4, v0 ; GFX10-NEXT: ; implicit-def: $vcc_hi -; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 0, 3), s4 ; GFX10-NEXT: ;;#ASMSTART ; GFX10-NEXT: ;;#ASMEND +; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 0, 3), s4 ; GFX10-NEXT: s_setpc_b64 s[30:31] call void @llvm.amdgcn.s.setreg(i32 4097, i32 %var.mode) call void asm sideeffect "", ""() diff --git a/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll b/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll --- a/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll +++ b/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll @@ -17,14 +17,14 @@ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 3 ; GCN: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216 ; GCN: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GCN: S_SETREG_B32 killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode + ; GCN: S_SETREG_B32_mode killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode ; GCN: %14:vgpr_32 = nofpexcept V_FMA_F32 1, %8, 0, %10, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec ; GCN: %15:vgpr_32 = nofpexcept V_FMA_F32 0, killed %14, 0, %10, 0, %10, 0, 0, implicit $mode, implicit $exec ; GCN: %16:vgpr_32 = nofpexcept V_MUL_F32_e64 0, %6, 0, %15, 0, 0, implicit $mode, implicit $exec ; GCN: %17:vgpr_32 = nofpexcept V_FMA_F32 1, %8, 0, %16, 0, %6, 0, 0, implicit $mode, implicit $exec ; GCN: %18:vgpr_32 = nofpexcept V_FMA_F32 0, killed %17, 0, %15, 0, %16, 0, 0, implicit $mode, implicit $exec ; GCN: %19:vgpr_32 = nofpexcept V_FMA_F32 1, %8, 0, %18, 0, %6, 0, 0, implicit $mode, implicit $exec - ; GCN: S_SETREG_B32 killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode + ; GCN: S_SETREG_B32_mode killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode ; GCN: $vcc = COPY %7 ; GCN: %20:vgpr_32 = nofpexcept V_DIV_FMAS_F32 0, killed %19, 0, %15, 0, %18, 0, 0, implicit $mode, implicit $vcc, implicit $exec ; GCN: %21:vgpr_32 = nofpexcept V_DIV_FIXUP_F32 0, killed %20, 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec @@ -50,14 +50,14 @@ ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 3 ; GCN: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216 ; GCN: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GCN: S_SETREG_B32 killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode + ; GCN: S_SETREG_B32_mode killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode ; GCN: %14:vgpr_32 = nnan nofpexcept V_FMA_F32 1, %8, 0, %10, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec ; GCN: %15:vgpr_32 = nnan nofpexcept V_FMA_F32 0, killed %14, 0, %10, 0, %10, 0, 0, implicit $mode, implicit $exec ; GCN: %16:vgpr_32 = nnan nofpexcept V_MUL_F32_e64 0, %6, 0, %15, 0, 0, implicit $mode, implicit $exec ; GCN: %17:vgpr_32 = nnan nofpexcept V_FMA_F32 1, %8, 0, %16, 0, %6, 0, 0, implicit $mode, implicit $exec ; GCN: %18:vgpr_32 = nnan nofpexcept V_FMA_F32 0, killed %17, 0, %15, 0, %16, 0, 0, implicit $mode, implicit $exec ; GCN: %19:vgpr_32 = nnan nofpexcept V_FMA_F32 1, %8, 0, %18, 0, %6, 0, 0, implicit $mode, implicit $exec - ; GCN: S_SETREG_B32 killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode + ; GCN: S_SETREG_B32_mode killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode ; GCN: $vcc = COPY %7 ; GCN: %20:vgpr_32 = nnan nofpexcept V_DIV_FMAS_F32 0, killed %19, 0, %15, 0, %18, 0, 0, implicit $mode, implicit $vcc, implicit $exec ; GCN: %21:vgpr_32 = nnan nofpexcept V_DIV_FIXUP_F32 0, killed %20, 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll --- a/llvm/test/CodeGen/AMDGPU/frem.ll +++ b/llvm/test/CodeGen/AMDGPU/frem.ll @@ -1040,9 +1040,9 @@ ; CI-NEXT: v_trunc_f32_e32 v4, v4 ; CI-NEXT: v_fma_f32 v0, -v4, v2, v0 ; CI-NEXT: v_div_scale_f32 v4, s[4:5], v3, v3, v1 +; CI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1 ; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0 ; CI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; CI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1 ; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; CI-NEXT: v_rcp_f32_e32 v5, v4 ; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6 @@ -1265,9 +1265,9 @@ ; CI-NEXT: v_trunc_f32_e32 v8, v8 ; CI-NEXT: v_fma_f32 v1, -v8, v1, v5 ; CI-NEXT: v_div_scale_f32 v8, s[4:5], v7, v7, v4 +; CI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4 ; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0 ; CI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; CI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4 ; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; CI-NEXT: v_rcp_f32_e32 v9, v8 ; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6 @@ -1300,8 +1300,8 @@ ; CI-NEXT: v_trunc_f32_e32 v4, v4 ; CI-NEXT: v_fma_f32 v0, -v4, v0, v3 ; CI-NEXT: v_div_scale_f32 v4, s[4:5], v6, v6, v2 -; CI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CI-NEXT: v_div_scale_f32 v3, vcc, v2, v6, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; CI-NEXT: v_rcp_f32_e32 v5, v4 ; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6