Index: lib/Target/AMDGPU/AMDGPU.td =================================================================== --- lib/Target/AMDGPU/AMDGPU.td +++ lib/Target/AMDGPU/AMDGPU.td @@ -777,7 +777,7 @@ FeatureLDSBankCount32, FeatureDLInsts, FeatureNSAEncoding, - FeatureWavefrontSize64, + FeatureWavefrontSize32, FeatureScalarStores, FeatureScalarAtomics, FeatureScalarFlatScratchInsts, @@ -795,7 +795,7 @@ FeatureDot5Insts, FeatureDot6Insts, FeatureNSAEncoding, - FeatureWavefrontSize64, + FeatureWavefrontSize32, FeatureScalarStores, FeatureScalarAtomics, FeatureScalarFlatScratchInsts, @@ -812,7 +812,7 @@ FeatureDot5Insts, FeatureDot6Insts, FeatureNSAEncoding, - FeatureWavefrontSize64, + FeatureWavefrontSize32, FeatureScalarStores, FeatureScalarAtomics, FeatureScalarFlatScratchInsts, Index: lib/Target/AMDGPU/AMDGPUInstrInfo.td =================================================================== --- lib/Target/AMDGPU/AMDGPUInstrInfo.td +++ lib/Target/AMDGPU/AMDGPUInstrInfo.td @@ -50,19 +50,19 @@ def AMDGPUKillSDT : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def AMDGPUIfOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] >; def AMDGPUElseOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] >; def AMDGPULoopOp : SDTypeProfile<0, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>] >; def AMDGPUIfBreakOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, i64>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, i1>] >; //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h =================================================================== --- lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h +++ lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h @@ -123,6 +123,8 @@ MCOperand decodeSDWASrc32(unsigned Val) const; MCOperand decodeSDWAVopcDst(unsigned Val) const; + MCOperand decodeBoolReg(unsigned Val) const; + int getTTmpIdx(unsigned Val) const; bool isVI() const; Index: lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp =================================================================== --- lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -101,6 +101,12 @@ return addOperand(Inst, MCOperand::createImm(Imm)); } +static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, + uint64_t Addr, const void *Decoder) { + auto DAsm = static_cast(Decoder); + return addOperand(Inst, DAsm->decodeBoolReg(Val)); +} + #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ static DecodeStatus StaticDecoderName(MCInst &Inst, \ unsigned Imm, \ @@ -1039,6 +1045,8 @@ STI.getFeatureBits()[AMDGPU::FeatureGFX10]) && "SDWAVopcDst should be present only on GFX9+"); + bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]; + if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; @@ -1046,15 +1054,21 @@ if (TTmpIdx >= 0) { return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); } else if (Val > SGPR_MAX) { - return decodeSpecialReg64(Val); + return IsWave64 ? decodeSpecialReg64(Val) + : decodeSpecialReg32(Val); } else { - return createSRegOperand(getSgprClassId(OPW64), Val); + return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val); } } else { - return createRegOperand(AMDGPU::VCC); + return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO); } } +MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const { + return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ? + decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val); +} + bool AMDGPUDisassembler::isVI() const { return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; } Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -943,6 +943,15 @@ /// not exist. If Opcode is not a pseudo instruction, this is identity. int pseudoToMCOpcode(int Opcode) const; + const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum, + const TargetRegisterInfo *TRI, + const MachineFunction &MF) + const override { + if (OpNum >= TID.getNumOperands()) + return nullptr; + return RI.getRegClass(TID.OpInfo[OpNum].RegClass); + } + void fixImplicitOperands(MachineInstr &MI) const; }; Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -747,6 +747,15 @@ let PrintMethod = "printVOPDst"; } +// SCSrc_i1 is the operand for pseudo instructions only. +// Boolean immeadiates shall not be exposed to codegen instructions. +def SCSrc_i1 : RegisterOperand { + let OperandNamespace = "AMDGPU"; + let OperandType = "OPERAND_REG_IMM_INT32"; + let ParserMatchClass = BoolReg; + let DecoderMethod = "decodeBoolReg"; +} + // ===----------------------------------------------------------------------===// // ExpSrc* Special cases for exp src operands which are printed as // "off" depending on en operand. @@ -785,11 +794,12 @@ def SDWASrc_f32 : SDWASrc; def SDWASrc_f16 : SDWASrc; -def SDWAVopcDst : VOPDstOperand { +def SDWAVopcDst : BoolRC { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_SDWA_VOPC_DST"; let EncoderMethod = "getSDWAVopcDstEncoding"; let DecoderMethod = "decodeSDWAVopcDst"; + let PrintMethod = "printVOPDst"; } class NamedMatchClass : AsmOperandClass { @@ -921,11 +931,6 @@ def KImmFP16MatchClass : KImmMatchClass<16>; def f16kimm : kimmOperand; - -def VOPDstS64 : VOPDstOperand { - let PrintMethod = "printVOPDst"; -} - class FPInputModsMatchClass : AsmOperandClass { let Name = "RegOrImmWithFP"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithFPInputMods"; @@ -1218,7 +1223,7 @@ !if(!eq(VT.Size, 128), VOPDstOperand, !if(!eq(VT.Size, 64), VOPDstOperand, !if(!eq(VT.Size, 16), VOPDstOperand, - VOPDstOperand)))); // else VT == i1 + VOPDstS64orS32)))); // else VT == i1 } // Returns the register class to use for the destination of VOP[12C] @@ -1294,7 +1299,7 @@ VSrc_f64, VSrc_b64), !if(!eq(VT.Value, i1.Value), - SCSrc_i1, + SSrc_i1, !if(isFP, !if(!eq(VT.Value, f16.Value), VSrc_f16, Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -121,14 +121,14 @@ } // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] -def ENTER_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins i64imm:$src0)> { +def ENTER_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins i64imm:$src0)> { let Defs = [EXEC]; let hasSideEffects = 0; let mayLoad = 0; let mayStore = 0; } -def EXIT_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins SReg_64:$src0)> { +def EXIT_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins SReg_1:$src0)> { let hasSideEffects = 0; let mayLoad = 0; let mayStore = 0; @@ -161,13 +161,18 @@ >; def S_ADD_U64_CO_PSEUDO : SPseudoInstSI < - (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) + (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) >; def S_SUB_U64_CO_PSEUDO : SPseudoInstSI < - (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) + (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) >; +def S_ADDC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_1:$sdst), + (ins SSrc_b64:$src0, SSrc_b64:$src1)>; +def S_SUBC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_1:$sdst), + (ins SSrc_b64:$src0, SSrc_b64:$src1)>; + } // End usesCustomInserter = 1, Defs = [SCC] let usesCustomInserter = 1 in { @@ -234,30 +239,30 @@ let OtherPredicates = [EnableLateCFGStructurize] in { def SI_NON_UNIFORM_BRCOND_PSEUDO : CFPseudoInstSI < (outs), - (ins SReg_64:$vcc, brtarget:$target), + (ins SReg_1:$vcc, brtarget:$target), [(brcond i1:$vcc, bb:$target)]> { let Size = 12; } } def SI_IF: CFPseudoInstSI < - (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target), - [(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> { + (outs SReg_1:$dst), (ins SReg_1:$vcc, brtarget:$target), + [(set i1:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> { let Constraints = ""; let Size = 12; let hasSideEffects = 1; } def SI_ELSE : CFPseudoInstSI < - (outs SReg_64:$dst), - (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> { + (outs SReg_1:$dst), + (ins SReg_1:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> { let Size = 12; let hasSideEffects = 1; } def SI_LOOP : CFPseudoInstSI < - (outs), (ins SReg_64:$saved, brtarget:$target), - [(AMDGPUloop i64:$saved, bb:$target)], 1, 1> { + (outs), (ins SReg_1:$saved, brtarget:$target), + [(AMDGPUloop i1:$saved, bb:$target)], 1, 1> { let Size = 8; let isBranch = 1; let hasSideEffects = 1; @@ -266,8 +271,7 @@ } // End isTerminator = 1 def SI_END_CF : CFPseudoInstSI < - (outs), (ins SReg_64:$saved), - [(int_amdgcn_end_cf i64:$saved)], 1, 1> { + (outs), (ins SReg_1:$saved), [], 1, 1> { let Size = 4; let isAsCheapAsAMove = 1; let isReMaterializable = 1; @@ -277,8 +281,7 @@ } def SI_IF_BREAK : CFPseudoInstSI < - (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src), - [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> { + (outs SReg_1:$dst), (ins SReg_1:$vcc, SReg_1:$src), []> { let Size = 4; let isAsCheapAsAMove = 1; let isReMaterializable = 1; @@ -304,7 +307,7 @@ } } -defm SI_KILL_I1 : PseudoInstKill <(ins SSrc_b64:$src, i1imm:$killvalue)>; +defm SI_KILL_I1 : PseudoInstKill <(ins SCSrc_i1:$src, i1imm:$killvalue)>; defm SI_KILL_F32_COND_IMM : PseudoInstKill <(ins VSrc_b32:$src0, i32imm:$src1, i32imm:$cond)>; let Defs = [EXEC,VCC] in @@ -323,7 +326,7 @@ } def SI_PS_LIVE : PseudoInstSI < - (outs SReg_64:$dst), (ins), + (outs SReg_1:$dst), (ins), [(set i1:$dst, (int_amdgcn_ps_live))]> { let SALU = 1; } @@ -558,7 +561,16 @@ def : GCNPat < (AMDGPUinit_exec i64:$src), (SI_INIT_EXEC (as_i64imm $src)) ->; +> { + let WaveSizePredicate = isWave64; +} + +def : GCNPat < + (AMDGPUinit_exec i64:$src), + (SI_INIT_EXEC_LO (as_i32imm $src)) +> { + let WaveSizePredicate = isWave32; +} def : GCNPat < (AMDGPUinit_exec_from_input i32:$input, i32:$shift), @@ -571,7 +583,7 @@ >; def : GCNPat< - (AMDGPUelse i64:$src, bb:$target), + (AMDGPUelse i1:$src, bb:$target), (SI_ELSE $src, $target, 0) >; @@ -1165,7 +1177,16 @@ def : GCNPat < (i1 imm:$imm), (S_MOV_B64 (i64 (as_i64imm $imm))) ->; +> { + let WaveSizePredicate = isWave64; +} + +def : GCNPat < + (i1 imm:$imm), + (S_MOV_B32 (i32 (as_i32imm $imm))) +> { + let WaveSizePredicate = isWave32; +} def : GCNPat < (f64 InlineFPImm:$imm), @@ -1356,10 +1377,12 @@ // If we need to perform a logical operation on i1 values, we need to // use vector comparisons since there is only one SCC register. Vector -// comparisons still write to a pair of SGPRs, so treat these as -// 64-bit comparisons. When legalizing SGPR copies, instructions -// resulting in the copies from SCC to these instructions will be -// moved to the VALU. +// comparisons may write to a pair of SGPRs or a single SGPR, so treat +// these as 32 or 64-bit comparisons. When legalizing SGPR copies, +// instructions resulting in the copies from SCC to these instructions +// will be moved to the VALU. + +let WaveSizePredicate = isWave64 in { def : GCNPat < (i1 (and i1:$src0, i1:$src1)), (S_AND_B64 $src0, $src1) @@ -1396,6 +1419,46 @@ (S_NOT_B64 $src0) >; } +} // end isWave64 + +let WaveSizePredicate = isWave32 in { +def : GCNPat < + (i1 (and i1:$src0, i1:$src1)), + (S_AND_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (or i1:$src0, i1:$src1)), + (S_OR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (xor i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (add i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (sub i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +let AddedComplexity = 1 in { +def : GCNPat < + (i1 (add i1:$src0, (i1 -1))), + (S_NOT_B32 $src0) +>; + +def : GCNPat < + (i1 (sub i1:$src0, (i1 -1))), + (S_NOT_B32 $src0) +>; +} +} // end isWave32 def : GCNPat < (f16 (sint_to_fp i1:$src)), Index: lib/Target/AMDGPU/SIRegisterInfo.td =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.td +++ lib/Target/AMDGPU/SIRegisterInfo.td @@ -454,7 +454,7 @@ // Subset of SReg_32 without M0 for SMRD instructions and alike. // See comments in SIInstructions.td for more info. -def SReg_32_XM0_XEXEC : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XM0_XEXEC : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SGPR_32, VCC_LO, VCC_HI, FLAT_SCR_LO, FLAT_SCR_HI, XNACK_MASK_LO, XNACK_MASK_HI, SGPR_NULL, TTMP_32, TMA_LO, TMA_HI, TBA_LO, TBA_HI, SRC_SHARED_BASE, SRC_SHARED_LIMIT, SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT, SRC_POPS_EXITING_WAVE_ID, @@ -462,23 +462,23 @@ let AllocationPriority = 8; } -def SReg_32_XEXEC_HI : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XEXEC_HI : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0_XEXEC, EXEC_LO, M0_CLASS)> { let AllocationPriority = 8; } -def SReg_32_XM0 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XM0 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0_XEXEC, EXEC_LO, EXEC_HI)> { let AllocationPriority = 8; } // Register class for all scalar registers (SGPRs + Special Registers) -def SReg_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0, M0_CLASS, EXEC_LO, EXEC_HI, SReg_32_XEXEC_HI)> { let AllocationPriority = 8; } -def SRegOrLds_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SRegOrLds_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0, M0_CLASS, EXEC_LO, EXEC_HI, SReg_32_XEXEC_HI, LDS_DIRECT_CLASS)> { let isAllocatable = 0; } @@ -733,8 +733,6 @@ defm SCSrc : RegInlineOperand<"SReg", "SCSrc"> ; -def SCSrc_i1 : RegisterOperand; - //===----------------------------------------------------------------------===// // VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/SOPInstructions.td =================================================================== --- lib/Target/AMDGPU/SOPInstructions.td +++ lib/Target/AMDGPU/SOPInstructions.td @@ -152,12 +152,24 @@ [(set i64:$sdst, (not i64:$src0))] >; def S_WQM_B32 : SOP1_32 <"s_wqm_b32">; - def S_WQM_B64 : SOP1_64 <"s_wqm_b64", - [(set i1:$sdst, (int_amdgcn_wqm_vote i1:$src0))] - >; + def S_WQM_B64 : SOP1_64 <"s_wqm_b64">; } // End Defs = [SCC] +let WaveSizePredicate = isWave32 in { +def : GCNPat < + (int_amdgcn_wqm_vote i1:$src0), + (S_WQM_B32 $src0) +>; +} + +let WaveSizePredicate = isWave64 in { +def : GCNPat < + (int_amdgcn_wqm_vote i1:$src0), + (S_WQM_B64 $src0) +>; +} + def S_BREV_B32 : SOP1_32 <"s_brev_b32", [(set i32:$sdst, (bitreverse i32:$src0))] >; Index: lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP2Instructions.td +++ lib/Target/AMDGPU/VOP2Instructions.td @@ -175,7 +175,9 @@ let SchedRW = [Write32Bit, WriteSALU] in { let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in { def _e32 : VOP2_Pseudo .ret>, - Commutable_REV; + Commutable_REV { + let usesCustomInserter = !eq(P.NumSrcArgs, 2); + } def _sdwa : VOP2_SDWA_Pseudo { let AsmMatchConverter = "cvtSdwaVOP2b"; @@ -342,7 +344,7 @@ let AsmDPP8 = "$vdst, vcc, $src0, $src1 $dpp8$fi"; let AsmDPP16 = AsmDPP#"$fi"; let Outs32 = (outs DstRC:$vdst); - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); } // Write out to vcc or arbitrary SGPR and read in from vcc or @@ -356,7 +358,7 @@ let AsmDPP8 = "$vdst, vcc, $src0, $src1, vcc $dpp8$fi"; let AsmDPP16 = AsmDPP#"$fi"; let Outs32 = (outs DstRC:$vdst); - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); // Suppress src2 implied by type since the 32-bit encoding uses an // implicit VCC use. Index: lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP3Instructions.td +++ lib/Target/AMDGPU/VOP3Instructions.td @@ -183,7 +183,7 @@ let HasModifiers = 0; let HasClamp = 0; let HasOMod = 0; - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0, $src1, $src2"; } @@ -203,7 +203,7 @@ // FIXME: Hack to stop printing _e64 let DstRC = RegisterOperand; - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp"; } Index: lib/Target/AMDGPU/VOPCInstructions.td =================================================================== --- lib/Target/AMDGPU/VOPCInstructions.td +++ lib/Target/AMDGPU/VOPCInstructions.td @@ -56,7 +56,7 @@ let Asm32 = "$src0, $src1"; // The destination for 32-bit encoding is implicit. let HasDst32 = 0; - let Outs64 = (outs VOPDstS64:$sdst); + let Outs64 = (outs VOPDstS64orS32:$sdst); list Schedule = sched; } Index: test/CodeGen/AMDGPU/add3.ll =================================================================== --- test/CodeGen/AMDGPU/add3.ll +++ test/CodeGen/AMDGPU/add3.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: add3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: mad_no_add3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u32_u24 v0, v0, v1, v4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mad_u32_u24 v0, v2, v3, v0 ; GFX10-NEXT: ; return to shader part epilog %a0 = shl i32 %a, 8 @@ -85,6 +87,7 @@ ; GFX10-LABEL: add3_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, s3, s2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -107,6 +110,7 @@ ; GFX10-LABEL: add3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %b, %c %result = add i32 %a, %x @@ -129,6 +133,7 @@ ; GFX10-LABEL: add3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -151,6 +156,7 @@ ; GFX10-LABEL: add3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, 16 @@ -175,6 +181,7 @@ ; GFX10-LABEL: add3_multiuse_outer: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mul_lo_u32 v1, v0, v3 ; GFX10-NEXT: ; return to shader part epilog %inner = add i32 %a, %b @@ -202,6 +209,7 @@ ; GFX10-LABEL: add3_multiuse_inner: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_add_nc_u32_e32 v1, v0, v2 ; GFX10-NEXT: ; return to shader part epilog %inner = add i32 %a, %b @@ -240,6 +248,7 @@ ; GFX10-NEXT: v_add_f32_e64 v1, s3, 2.0 ; GFX10-NEXT: v_add_f32_e64 v2, s2, 1.0 ; GFX10-NEXT: v_add_f32_e64 v0, 0x40400000, s4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_add_nc_u32_e32 v1, v2, v1 ; GFX10-NEXT: v_add_nc_u32_e32 v0, v1, v0 ; GFX10-NEXT: ; return to shader part epilog Index: test/CodeGen/AMDGPU/add_i1.ll =================================================================== --- test/CodeGen/AMDGPU/add_i1.ll +++ test/CodeGen/AMDGPU/add_i1.ll @@ -1,8 +1,10 @@ -; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s ; GCN-LABEL: {{^}}add_var_var_i1: -; GCN: s_xor_b64 +; GFX9: s_xor_b64 +; GFX10: s_xor_b32 define amdgpu_kernel void @add_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { %a = load volatile i1, i1 addrspace(1)* %in0 %b = load volatile i1, i1 addrspace(1)* %in1 @@ -12,7 +14,8 @@ } ; GCN-LABEL: {{^}}add_var_imm_i1: -; GCN: s_not_b64 +; GFX9: s_not_b64 +; GFX10: s_not_b32 define amdgpu_kernel void @add_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) { %a = load volatile i1, i1 addrspace(1)* %in %add = add i1 %a, 1 @@ -22,7 +25,8 @@ ; GCN-LABEL: {{^}}add_i1_cf: ; GCN: ; %endif -; GCN: s_not_b64 +; GFX9: s_not_b64 +; GFX10: s_not_b32 define amdgpu_kernel void @add_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/add_shl.ll =================================================================== --- test/CodeGen/AMDGPU/add_shl.ll +++ test/CodeGen/AMDGPU/add_shl.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: add_shl: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -45,6 +46,7 @@ ; GFX10-LABEL: add_shl_vgpr_c: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, s2, s3, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -67,6 +69,7 @@ ; GFX10-LABEL: add_shl_vgpr_ac: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -89,6 +92,7 @@ ; GFX10-LABEL: add_shl_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, v1, 9 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, 9 @@ -112,6 +116,7 @@ ; GFX10-LABEL: add_shl_vgpr_const_inline_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 9, 0x7e800 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, 1012 %result = shl i32 %x, 9 @@ -138,6 +143,7 @@ ; GFX10-LABEL: add_shl_vgpr_inline_const_x2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 9, 0x600 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, 3 %result = shl i32 %x, 9 Index: test/CodeGen/AMDGPU/and_or.ll =================================================================== --- test/CodeGen/AMDGPU/and_or.ll +++ test/CodeGen/AMDGPU/and_or.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: and_or: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: and_or_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, s2, v0, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: and_or_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: and_or_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 4, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 4, %a %result = or i32 %x, %b @@ -113,6 +117,7 @@ ; GFX10-LABEL: and_or_vgpr_const_inline_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 20, 0x808 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 20, %a %result = or i32 %x, 2056 @@ -135,6 +140,7 @@ ; GFX10-LABEL: and_or_vgpr_inline_const_x2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 4, 1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 4, %a %result = or i32 %x, 1 Index: test/CodeGen/AMDGPU/huge-private-buffer.ll =================================================================== --- test/CodeGen/AMDGPU/huge-private-buffer.ll +++ test/CodeGen/AMDGPU/huge-private-buffer.ll @@ -1,9 +1,23 @@ -; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE64 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE32 %s + +; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo14: +; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 +; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x3ffc, [[FI]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] +define amdgpu_kernel void @scratch_buffer_known_high_masklo14() #0 { + %alloca = alloca i32, align 4, addrspace(5) + store volatile i32 0, i32 addrspace(5)* %alloca + %toint = ptrtoint i32 addrspace(5)* %alloca to i32 + %masked = and i32 %toint, 16383 + store volatile i32 %masked, i32 addrspace(1)* undef + ret void +} ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo16: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 ; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xfffc, [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] define amdgpu_kernel void @scratch_buffer_known_high_masklo16() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca @@ -15,8 +29,11 @@ ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo17: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 -; GCN-NOT: [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] +; WAVE64-NOT: [[FI]] +; WAVE64: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] + +; WAVE32: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x1fffc, [[FI]] +; WAVE32: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] define amdgpu_kernel void @scratch_buffer_known_high_masklo17() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca @@ -29,7 +46,7 @@ ; GCN-LABEL: {{^}}scratch_buffer_known_high_mask18: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 ; GCN-NOT: [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] define amdgpu_kernel void @scratch_buffer_known_high_mask18() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca Index: test/CodeGen/AMDGPU/insert-skip-from-vcc.mir =================================================================== --- test/CodeGen/AMDGPU/insert-skip-from-vcc.mir +++ test/CodeGen/AMDGPU/insert-skip-from-vcc.mir @@ -1,4 +1,5 @@ # RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s +# RUN: llc -march=amdgcn -mcpu=gfx1010 -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=W32 %s --- # GCN-LABEL: name: and_execz_mov_vccz @@ -318,3 +319,22 @@ S_CBRANCH_VCCZ %bb.1, implicit killed $vcc S_ENDPGM 0, implicit $scc ... +--- +# W32-LABEL: name: and_execz_mov_vccz_w32 +# W32-NOT: S_MOV_ +# W32-NOT: S_AND_ +# W32: S_CBRANCH_EXECZ %bb.1, implicit $exec +name: and_execz_mov_vccz_w32 +body: | + bb.0: + S_NOP 0 + + bb.1: + S_NOP 0 + + bb.2: + $sgpr0 = S_MOV_B32 -1 + $vcc_lo = S_AND_B32 $exec_lo, killed $sgpr0, implicit-def dead $scc + S_CBRANCH_VCCZ %bb.1, implicit killed $vcc + S_ENDPGM 0 +... Index: test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll =================================================================== --- test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll +++ test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll @@ -1,5 +1,6 @@ -; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=SI --check-prefix=ALL %s -; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=CI --check-prefix=ALL %s +; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=SI,SICI,ALL %s +; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=CI,SICI,ALL %s +; RUN: opt -S -mcpu=gfx1010 -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=GFX10,ALL %s ; SI-NOT: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4 ; CI: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4 @@ -46,7 +47,8 @@ ret void } -; ALL: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4 +; SICI: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4 +; GFX10: alloca [5 x i32] define amdgpu_kernel void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 { entry: @@ -141,7 +143,9 @@ } ; ALL-LABEL: @occupancy_6_over( -; ALL: alloca [43 x i8] +; SICI: alloca [43 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_6_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 { entry: %stack = alloca [43 x i8], align 4 @@ -191,7 +195,9 @@ } ; ALL-LABEL: @occupancy_8_over( -; ALL: alloca [33 x i8] +; SICI: alloca [33 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_8_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 { entry: %stack = alloca [33 x i8], align 4 @@ -241,7 +247,9 @@ } ; ALL-LABEL: @occupancy_9_over( -; ALL: alloca [29 x i8] +; SICI: alloca [29 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_9_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 { entry: %stack = alloca [29 x i8], align 4 Index: test/CodeGen/AMDGPU/mubuf-legalize-operands.mir =================================================================== --- test/CodeGen/AMDGPU/mubuf-legalize-operands.mir +++ test/CodeGen/AMDGPU/mubuf-legalize-operands.mir @@ -1,6 +1,7 @@ # RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,ADDR64 # RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64 # RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W32 # Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions. # Index: test/CodeGen/AMDGPU/or3.ll =================================================================== --- test/CodeGen/AMDGPU/or3.ll +++ test/CodeGen/AMDGPU/or3.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: or3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -47,6 +48,7 @@ ; GFX10-LABEL: or3_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -69,6 +71,7 @@ ; GFX10-LABEL: or3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %b, %c %result = or i32 %a, %x @@ -91,6 +94,7 @@ ; GFX10-LABEL: or3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -113,6 +117,7 @@ ; GFX10-LABEL: or3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v1, v0, 64 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 64, %b %result = or i32 %x, %a Index: test/CodeGen/AMDGPU/regbank-reassign.mir =================================================================== --- test/CodeGen/AMDGPU/regbank-reassign.mir +++ test/CodeGen/AMDGPU/regbank-reassign.mir @@ -49,6 +49,24 @@ S_ENDPGM 0 ... +# GCN-LABEL: s11_vs_vcc{{$}} +# GCN: $vgpr0, $vcc_lo = V_ADDC_U32_e64 killed $sgpr14, killed $vgpr0, killed $vcc_lo, 0 +--- +name: s11_vs_vcc +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_32, preferred-register: '$sgpr11' } + - { id: 1, class: vgpr_32 } + - { id: 2, class: vgpr_32 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + $vcc_lo = IMPLICIT_DEF + %2, $vcc_lo = V_ADDC_U32_e64 killed %0, killed %1, killed $vcc_lo, 0, implicit $exec + S_ENDPGM 0 +... + # GCN-LABEL: s0_vs_s16{{$}} # GCN: S_AND_B32 killed renamable $sgpr14, $sgpr0, --- Index: test/CodeGen/AMDGPU/shl_add.ll =================================================================== --- test/CodeGen/AMDGPU/shl_add.ll +++ test/CodeGen/AMDGPU/shl_add.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: shl_add: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: shl_add_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: shl_add_vgpr_all: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: shl_add_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -112,6 +116,7 @@ ; GFX10-LABEL: shl_add_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 3, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 3 %result = add i32 %x, %b Index: test/CodeGen/AMDGPU/shl_or.ll =================================================================== --- test/CodeGen/AMDGPU/shl_or.ll +++ test/CodeGen/AMDGPU/shl_or.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: shl_or: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -45,6 +46,7 @@ ; GFX10-LABEL: shl_or_vgpr_c: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, s2, s3, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -67,6 +69,7 @@ ; GFX10-LABEL: shl_or_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %c, %x @@ -89,6 +92,7 @@ ; GFX10-LABEL: shl_or_vgpr_ac: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, s2, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -111,6 +115,7 @@ ; GFX10-LABEL: shl_or_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, 6 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, 6 @@ -133,6 +138,7 @@ ; GFX10-LABEL: shl_or_vgpr_const2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 6, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b @@ -155,6 +161,7 @@ ; GFX10-LABEL: shl_or_vgpr_const_scalar1: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, s2, 6, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b @@ -177,6 +184,7 @@ ; GFX10-LABEL: shl_or_vgpr_const_scalar2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 6, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b Index: test/CodeGen/AMDGPU/sub_i1.ll =================================================================== --- test/CodeGen/AMDGPU/sub_i1.ll +++ test/CodeGen/AMDGPU/sub_i1.ll @@ -1,8 +1,10 @@ -; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE64 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE32 %s ; GCN-LABEL: {{^}}sub_var_var_i1: -; GCN: s_xor_b64 +; WAVE32: s_xor_b32 +; WAVE64: s_xor_b64 define amdgpu_kernel void @sub_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { %a = load volatile i1, i1 addrspace(1)* %in0 %b = load volatile i1, i1 addrspace(1)* %in1 @@ -12,7 +14,8 @@ } ; GCN-LABEL: {{^}}sub_var_imm_i1: -; GCN: s_not_b64 +; WAVE32: s_not_b32 +; WAVE64: s_not_b64 define amdgpu_kernel void @sub_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) { %a = load volatile i1, i1 addrspace(1)* %in %sub = sub i1 %a, 1 @@ -22,7 +25,8 @@ ; GCN-LABEL: {{^}}sub_i1_cf: ; GCN: ; %endif -; GCN: s_not_b64 +; WAVE32: s_not_b32 +; WAVE64: s_not_b64 define amdgpu_kernel void @sub_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/wave32.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/wave32.ll @@ -0,0 +1,1140 @@ +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s + +; GCN-LABEL: {{^}}test_vopc_i32: +; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo +; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}} +define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp sgt i32 %load, 0 + %sel = select i1 %cmp, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_f32: +; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo +; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}} +define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid + %load = load float, float addrspace(1)* %gep, align 4 + %cmp = fcmp ugt float %load, 0.0 + %sel = select i1 %cmp, float 1.0, float 2.0 + store float %sel, float addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_vcmpx: +; GFX1032: v_cmpx_le_f32_e32 0, v{{[0-9]+}} +; GFX1064: v_cmpx_le_f32_e32 0, v{{[0-9]+}} +define amdgpu_ps void @test_vopc_vcmpx(float %x) { + %cmp = fcmp oge float %x, 0.0 + call void @llvm.amdgcn.kill(i1 %cmp) + ret void +} + +; GCN-LABEL: {{^}}test_vopc_2xf16: +; GFX1032: v_cmp_le_f16_sdwa [[SC:s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] +; GFX1064: v_cmp_le_f16_sdwa [[SC:s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] +define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid + %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4 + %elt = extractelement <2 x half> %load, i32 1 + %cmp = fcmp ugt half %elt, 0.0 + %sel = select i1 %cmp, <2 x half> , <2 x half> %load + store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_class: +; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204 +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] +; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204 +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}} +define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 { + %fabs = tail call float @llvm.fabs.f32(float %x) + %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 + %ext = zext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vcmp_vcnd_f16: +; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]] + +; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}} +define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 { + %cmp = fcmp oeq half %x, 0x7FF0000000000000 + %sel = select i1 %cmp, half 1.0, half %x + store half %sel, half addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and: +; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}} +; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] +; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}} +; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid + %load = load float, float addrspace(1)* %gep, align 4 + %cmp = fcmp ugt float %load, 0.0 + %cmp2 = fcmp ult float %load, 1.0 + %and = and i1 %cmp, %cmp2 + %sel = select i1 %and, float 1.0, float 2.0 + store float %sel, float addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor: +; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}} +; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}} +; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp sgt i32 %load, 0 + %cmp2 = icmp slt i32 %load, 1 + %xor = xor i1 %cmp, %cmp2 + %sel = select i1 %xor, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or: +; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}} +; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}} +; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}} +; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}} +; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp ugt i32 %load, 3 + %cmp2 = icmp ult i32 %load, 2 + %or = or i1 %cmp, %cmp2 + %sel = select i1 %or, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_mask_if: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: ; mask branch +define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %cmp = icmp ugt i32 %lid, 10 + br i1 %cmp, label %if, label %endif + +if: + store i32 0, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}test_loop_with_if: +; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} +; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] +; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}] +; GCN: s_cbranch_execz +; GCN: BB{{.*}}: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: s_cbranch_execz +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}} +; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}] +; GCN: ; mask branch BB +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} +; GCN: ; mask branch BB +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GCN: s_endpgm +define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + br label %bb2 + +bb1: + ret void + +bb2: + %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ] + %tmp4 = icmp slt i32 %tmp3, %tmp + br i1 %tmp4, label %bb5, label %bb11 + +bb5: + %tmp6 = sext i32 %tmp3 to i64 + %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6 + %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4 + %tmp9 = icmp sgt i32 %tmp8, 10 + br i1 %tmp9, label %bb10, label %bb11 + +bb10: + store i32 %tmp, i32 addrspace(1)* %tmp7, align 4 + br label %bb13 + +bb11: + %tmp12 = sdiv i32 %tmp3, 2 + br label %bb13 + +bb13: + %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ] + %tmp15 = add nsw i32 %tmp14, 1 + %tmp16 = icmp slt i32 %tmp14, 255 + br i1 %tmp16, label %bb2, label %bb1 +} + +; GCN-LABEL: {{^}}test_loop_with_if_else_break: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: ; mask branch +; GCN: s_cbranch_execz +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_andn2_b32 s{{[0-9]+}}, s{{[0-9]+}}, exec_lo +; GFX1064: s_andn2_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], exec +; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GCN: s_cbranch_execz +; GCN: BB{{.*}}: +define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = icmp eq i32 %tmp, 0 + br i1 %tmp1, label %.loopexit, label %.preheader + +.preheader: + br label %bb2 + +bb2: + %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ] + %tmp4 = zext i32 %tmp3 to i64 + %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4 + %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 + %tmp7 = icmp sgt i32 %tmp6, 10 + br i1 %tmp7, label %bb8, label %.loopexit + +bb8: + store i32 %tmp, i32 addrspace(1)* %tmp5, align 4 + %tmp9 = add nuw nsw i32 %tmp3, 1 + %tmp10 = icmp ult i32 %tmp9, 256 + %tmp11 = icmp ult i32 %tmp9, %tmp + %tmp12 = and i1 %tmp10, %tmp11 + br i1 %tmp12, label %bb2, label %.loopexit + +.loopexit: + ret void +} + +; GCN-LABEL: {{^}}test_addc_vop2b: +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} +define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = add nsw i64 %tmp4, %arg1 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_subbrev_vop2b: +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} +; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} +; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} +define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = sub nsw i64 %tmp4, %arg1 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_subb_vop2b: +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} +; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} +; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} +define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = sub nsw i64 %arg1, %tmp4 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_udiv64: +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo +; GFX1032: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s\[[0-9:]+\]]], v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}} +define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 { +bb: + %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1 + %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8 + %tmp2 = load i64, i64 addrspace(1)* %arg, align 8 + %tmp3 = udiv i64 %tmp1, %tmp2 + %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2 + store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_div_scale_f32: +; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 + + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 + + %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone + %result0 = extractvalue { float, i1 } %result, 0 + store float %result0, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_scale_f64: +; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 + + %a = load volatile double, double addrspace(1)* %gep.0, align 8 + %b = load volatile double, double addrspace(1)* %gep.1, align 8 + + %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone + %result0 = extractvalue { double, i1 } %result, 0 + store double %result0, double addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_mad_i64_i32: +; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = sext i32 %arg0 to i64 + %sext1 = sext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}test_mad_u64_u32: +; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = zext i32 %arg0 to i64 + %sext1 = zext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}test_div_fmas_f32: +; GFX1032: v_cmp_eq_u32_e64 vcc_lo, +; GFX1064: v_cmp_eq_u32_e64 vcc, +; GCN: v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind { + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f64: +; GFX1032: v_cmp_eq_u32_e64 vcc_lo, +; GFX1064: v_cmp_eq_u32_e64 vcc, +; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind { + %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone + store double %result, double addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: +; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}} +; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}} +; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}} +; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}} + +; GCN: load_dword [[LOAD:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]] + +; GCN: BB{{[0-9_]+}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]] +; GFX1064: s_or_b64 exec, exec, [[SAVE]] +; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 + %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 + + %a = load float, float addrspace(1)* %gep.a + %b = load float, float addrspace(1)* %gep.b + %c = load float, float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + br i1 %cmp0, label %bb, label %exit + +bb: + %val = load volatile i32, i32 addrspace(1)* %dummy + %cmp1 = icmp ne i32 %val, 0 + br label %exit + +exit: + %cond = phi i1 [false, %entry], [%cmp1, %bb] + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fdiv_f32: +; GFC1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} +; GFC1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} +; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NOT: vcc +; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 { +entry: + %fdiv = fdiv float %a, %b + store float %fdiv, float addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_br_cc_f16: +; GFX1032: v_cmp_nlt_f16_e32 vcc_lo, +; GFX1032-NEXT: s_and_b32 vcc_lo, exec_lo, vcc_lo +; GFX1064: v_cmp_nlt_f16_e32 vcc, +; GFX1064-NEXT: s_and_b64 vcc, exec, vcc{{$}} +; GCN-NEXT: s_cbranch_vccnz +define amdgpu_kernel void @test_br_cc_f16( + half addrspace(1)* %r, + half addrspace(1)* %a, + half addrspace(1)* %b) { +entry: + %a.val = load half, half addrspace(1)* %a + %b.val = load half, half addrspace(1)* %b + %fcmp = fcmp olt half %a.val, %b.val + br i1 %fcmp, label %one, label %two + +one: + store half %a.val, half addrspace(1)* %r + ret void + +two: + store half %b.val, half addrspace(1)* %r + ret void +} + +; GCN-LABEL: {{^}}test_brcc_i1: +; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0 +; GCN-NEXT: s_cbranch_scc1 +define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 { + %cmp0 = icmp ne i1 %val, 0 + br i1 %cmp0, label %store, label %end + +store: + store i32 222, i32 addrspace(1)* %out + ret void + +end: + ret void +} + +; GCN-LABEL: {{^}}test_preserve_condition_undef_flag: +; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 +; GFX1032: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0 +; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 +; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}} +; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]] +; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 +; GFX1064: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0 +; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 +; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}] +; GFX1064: s_and_b64 vcc, exec, [[OR2]] +; GCN: s_cbranch_vccnz +define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 { +bb0: + %tmp = icmp sgt i32 %arg1, 4 + %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef) + %tmp4 = select i1 %undef, float %arg, float 1.000000e+00 + %tmp5 = fcmp ogt float %arg2, 0.000000e+00 + %tmp6 = fcmp olt float %arg2, 1.000000e+00 + %tmp7 = fcmp olt float %arg, %tmp4 + %tmp8 = and i1 %tmp5, %tmp6 + %tmp9 = and i1 %tmp8, %tmp7 + br i1 %tmp9, label %bb1, label %bb2 + +bb1: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb2 + +bb2: + ret void +} + +; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop: +; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1 +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1 +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 { +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %tmp = sub i32 %id, %arg + br label %bb1 + +bb1: ; preds = %Flow, %bb + %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ] + %lsr.iv.next = add i32 %lsr.iv, 1 + %cmp0 = icmp slt i32 %lsr.iv.next, 0 + br i1 %cmp0, label %bb4, label %Flow + +bb4: ; preds = %bb1 + %load = load volatile i32, i32 addrspace(1)* undef, align 4 + %cmp1 = icmp sge i32 %tmp, %load + br label %Flow + +Flow: ; preds = %bb4, %bb1 + %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] + %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] + br i1 %tmp3, label %bb1, label %bb9 + +bb9: ; preds = %Flow + store volatile i32 7, i32 addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo +; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo +; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo +; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc +; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc +define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 { +entry: + %id = call i32 @llvm.amdgcn.workitem.id.x() #1 + %index = add i32 %id, -512 + %value = extractelement <4 x i32> , i32 %index + store i32 %value, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_set_inactive: +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42 +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1064: s_not_b64 exec, exec{{$}} +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42 +; GFX1064: s_not_b64 exec, exec{{$}} +define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 { + %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42) + store i32 %tmp, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_set_inactive_64: +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1064: s_not_b64 exec, exec{{$}} +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1064: s_not_b64 exec, exec{{$}} +define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 { + %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0) + store i64 %tmp, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_kill_i1_terminator_float: +; GFX1032: s_mov_b32 exec_lo, 0 +; GFX1064: s_mov_b64 exec, 0 +define amdgpu_ps void @test_kill_i1_terminator_float() #0 { + call void @llvm.amdgcn.kill(i1 false) + ret void +} + +; GCN-LABEL: {{^}}test_kill_i1_terminator_i1: +; GFX1032: s_or_b32 [[OR:s[0-9]+]], +; GFX1032: s_and_b32 exec_lo, exec_lo, [[OR]] +; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]], +; GFX1064: s_and_b64 exec, exec, [[OR]] +define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 { + %c1 = icmp slt i32 %a, %b + %c2 = icmp slt i32 %c, %d + %x = or i1 %c1, %c2 + call void @llvm.amdgcn.kill(i1 %x) + ret void +} + +; GCN-LABEL: {{^}}test_loop_vcc: +; GFX1032: v_cmp_lt_f32_e32 vcc_lo, +; GFX1064: v_cmp_lt_f32_e32 vcc, +; GCN: s_cbranch_vccnz +define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 { +entry: + br label %loop + +loop: + %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ] + %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ] + %cc = fcmp ogt float %ctr.iv, 7.0 + br i1 %cc, label %break, label %body + +body: + %c.iv0 = extractelement <4 x float> %c.iv, i32 0 + %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0) + %ctr.next = fadd float %ctr.iv, 2.0 + br label %loop + +break: + ret <4 x float> %c.iv +} + +; GCN-LABEL: {{^}}test_wwm1: +; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 +; GFX1032: s_mov_b32 exec_lo, [[SAVE]] +; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 +; GFX1064: s_mov_b64 exec, [[SAVE]] +define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { +main_body: + %out = fadd float %src0, %src1 + %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) + ret float %out.0 +} + +; GCN-LABEL: {{^}}test_wwm2: +; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} +; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo +; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 +; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] +; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] +; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} +; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} +; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 +; GFX1064: s_mov_b64 exec, [[SAVE2]] +; GFX1064: s_or_b64 exec, exec, [[SAVE1]] +define amdgpu_ps float @test_wwm2(i32 inreg %idx) { +main_body: + ; use mbcnt to make sure the branch is divergent + %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) + %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) + %cc = icmp uge i32 %hi, 32 + br i1 %cc, label %endif, label %if + +if: + %src = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0) + %out = fadd float %src, %src + %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) + %out.1 = fadd float %src, %out.0 + br label %endif + +endif: + %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] + ret float %out.2 +} + +; GCN-LABEL: {{^}}test_wqm1: +; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo +; GFX1032: s_wqm_b32 exec_lo, exec_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]] +; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}} +; GFX1064: s_wqm_b64 exec, exec{{$}} +; GFX1064: s_and_b64 exec, exec, [[ORIG]] +define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 { +main_body: + %inst23 = extractelement <2 x float> %pos, i32 0 + %inst24 = extractelement <2 x float> %pos, i32 1 + %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0) + %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0) + %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0) + %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0) + %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0) + ret <4 x float> %tex +} + +; GCN-LABEL: {{^}}test_wqm2: +; GFX1032: s_wqm_b32 exec_lo, exec_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9+]}} +; GFX1064: s_wqm_b64 exec, exec{{$}} +; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}] +define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 { +main_body: + %src0 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i1 0, i1 0) + %src1 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i1 0, i1 0) + %out = fadd float %src0, %src1 + %out.0 = bitcast float %out to i32 + %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0) + %out.2 = bitcast i32 %out.1 to float + ret float %out.2 +} + +; GCN-LABEL: {{^}}test_intr_fcmp_i64: +; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} +; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]], +define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) { + %temp = call float @llvm.fabs.f32(float %a) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_icmp_i64: +; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} +; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}} +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]] +; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}} +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]], +define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) { + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_fcmp_i32: +; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GCN: store_dword v[{{[0-9:]+}}], v[[V_LO]], +define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) { + %temp = call float @llvm.fabs.f32(float %a) + %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1) + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_icmp_i32: +; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}} +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} +; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}} +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} +; GCN: store_dword v[{{[0-9:]+}}], v[[V_LO]], +define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) { + %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32) + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_wqm_vote: +; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0 +; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, [[WQM]] +; GFX1064: v_cmp_neq_f32_e32 vcc, 0 +; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc{{$}} +; GFX1064: s_and_b64 exec, exec, [[WQM]] +define amdgpu_ps void @test_wqm_vote(float %a) { + %c1 = fcmp une float %a, 0.0 + %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) + call void @llvm.amdgcn.kill(i1 %c2) + ret void +} + +; GCN-LABEL: {{^}}test_branch_true: +; GFX1032: s_and_b32 vcc_lo, exec_lo, -1 +; GFX1064: s_and_b64 vcc, exec, -1 +define amdgpu_kernel void @test_branch_true() #2 { +entry: + br i1 true, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body, %for.body.lr.ph + br i1 undef, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +; GCN-LABEL: {{^}}test_ps_live: +; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo +; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] +define amdgpu_ps float @test_ps_live() #0 { + %live = call i1 @llvm.amdgcn.ps.live() + %live.32 = zext i1 %live to i32 + %r = bitcast i32 %live.32 to float + ret float %r +} + +; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: +; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0 +; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]] +; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0 +; GFX1064: s_and_b64 vcc, exec, [[C]] +define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { +entry: + %v = load double, double addrspace(1)* %in + %cc = fcmp oeq double %v, 1.000000e+00 + br i1 %cc, label %if, label %endif + +if: + %u = fadd double %v, %v + br label %endif + +endif: + %r = phi double [ %v, %entry ], [ %u, %if ] + store double %r, double addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_init_exec: +; GFX1032: s_mov_b32 exec_lo, 0x12345 +; GFX1064: s_mov_b64 exec, 0x12345 +; GCN: v_add_f32_e32 v0, +define amdgpu_ps float @test_init_exec(float %a, float %b) { +main_body: + %s = fadd float %a, %b + call void @llvm.amdgcn.init.exec(i64 74565) + ret float %s +} + +; GCN-LABEL: {{^}}test_init_exec_from_input: +; GCN: s_bfe_u32 s0, s3, 0x70008 +; GFX1032: s_bfm_b32 exec_lo, s0, 0 +; GFX1032: s_cmp_eq_u32 s0, 32 +; GFX1032: s_cmov_b32 exec_lo, -1 +; GFX1064: s_bfm_b64 exec, s0, 0 +; GFX1064: s_cmp_eq_u32 s0, 64 +; GFX1064: s_cmov_b64 exec, -1 +; GCN: v_add_f32_e32 v0, +define amdgpu_ps float @test_init_exec_from_input(i32 inreg, i32 inreg, i32 inreg, i32 inreg %count, float %a, float %b) { +main_body: + %s = fadd float %a, %b + call void @llvm.amdgcn.init.exec.from.input(i32 %count, i32 8) + ret float %s +} + +; GCN-LABEL: {{^}}test_vgprblocks_w32_attr: +; Test that the wave size can be overridden in function attributes and that the block size is correct as a result +; GFX10DEFWAVE: ; VGPRBlocks: 1 +define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 { +main_body: + %s = fadd float %a, %b + %s.1 = fadd float %s, %c + %s.2 = fadd float %s.1, %d + %s.3 = fadd float %s.2, %e + %s.4 = fadd float %s.3, %f + %s.5 = fadd float %s.4, %g + %s.6 = fadd float %s.5, %h + %s.7 = fadd float %s.6, %i + %s.8 = fadd float %s.7, %j + %s.9 = fadd float %s.8, %k + %s.10 = fadd float %s.9, %l + ret float %s.10 +} + +; GCN-LABEL: {{^}}test_vgprblocks_w64_attr: +; Test that the wave size can be overridden in function attributes and that the block size is correct as a result +; GFX10DEFWAVE: ; VGPRBlocks: 2 +define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 { +main_body: + %s = fadd float %a, %b + %s.1 = fadd float %s, %c + %s.2 = fadd float %s.1, %d + %s.3 = fadd float %s.2, %e + %s.4 = fadd float %s.3, %f + %s.5 = fadd float %s.4, %g + %s.6 = fadd float %s.5, %h + %s.7 = fadd float %s.6, %i + %s.8 = fadd float %s.7, %j + %s.9 = fadd float %s.8, %k + %s.10 = fadd float %s.9, %l + ret float %s.10 +} + +; GCN-LABEL: {{^}}icmp64: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v +define amdgpu_kernel void @icmp64(i32 %n, i32 %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %mul4 = mul nsw i32 %s, %n + %cmp = icmp slt i32 0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem = urem i32 %id, %s + %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32) + %shr = lshr i64 %icmp, 1 + %notmask = shl nsw i64 -1, 0 + %and = and i64 %notmask, %shr + %or = or i64 %and, -9223372036854775808 + %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) + %cast = trunc i64 %cttz to i32 + %cmp3 = icmp ugt i32 10, %cast + %cmp6 = icmp ne i32 %rem, 0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}fcmp64: +; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v +define amdgpu_kernel void @fcmp64(float %n, float %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %id.f = uitofp i32 %id to float + %mul4 = fmul float %s, %n + %cmp = fcmp ult float 0.0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem.f = frem float %id.f, %s + %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1) + %shr = lshr i64 %fcmp, 1 + %notmask = shl nsw i64 -1, 0 + %and = and i64 %notmask, %shr + %or = or i64 %and, -9223372036854775808 + %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) + %cast = trunc i64 %cttz to i32 + %cmp3 = icmp ugt i32 10, %cast + %cmp6 = fcmp one float %rem.f, 0.0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}icmp32: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v +define amdgpu_kernel void @icmp32(i32 %n, i32 %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %mul4 = mul nsw i32 %s, %n + %cmp = icmp slt i32 0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem = urem i32 %id, %s + %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32) + %shr = lshr i32 %icmp, 1 + %notmask = shl nsw i32 -1, 0 + %and = and i32 %notmask, %shr + %or = or i32 %and, 2147483648 + %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) + %cmp3 = icmp ugt i32 10, %cttz + %cmp6 = icmp ne i32 %rem, 0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}fcmp32: +; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v +define amdgpu_kernel void @fcmp32(float %n, float %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %id.f = uitofp i32 %id to float + %mul4 = fmul float %s, %n + %cmp = fcmp ult float 0.0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem.f = frem float %id.f, %s + %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1) + %shr = lshr i32 %fcmp, 1 + %notmask = shl nsw i32 -1, 0 + %and = and i32 %notmask, %shr + %or = or i32 %and, 2147483648 + %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) + %cmp3 = icmp ugt i32 10, %cttz + %cmp6 = fcmp one float %rem.f, 0.0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +declare void @external_void_func_void() #1 + +; Test save/restore of VGPR needed for SGPR spilling. + +; GCN-LABEL: {{^}}callee_no_stack_with_call: +; GCN: s_waitcnt +; GCN: s_mov_b32 s5, s32 +; GFX1064: s_add_u32 s32, s32, 0x400 +; GFX1032: s_add_u32 s32, s32, 0x200 + +; GFX1064: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}} +; GFX1032: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]]], -1{{$}} + +; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill + +; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]] +; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]] + +; GCN-DAG: v_writelane_b32 v32, s33, 0 +; GCN-DAG: v_writelane_b32 v32, s34, 1 +; GCN-DAG: s_mov_b32 s33, s5 +; GCN: s_swappc_b64 +; GCN-DAG: s_mov_b32 s5, s33 +; GCN-DAG: v_readlane_b32 s34, v32, 1 +; GCN-DAG: v_readlane_b32 s33, v32, 0 + +; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}} +; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}} +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload +; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]] +; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]] + +; GFX1064: s_sub_u32 s32, s32, 0x400 +; GFX1032: s_sub_u32 s32, s32, 0x200 +; GCN: s_setpc_b64 +define void @callee_no_stack_with_call() #1 { + call void @external_void_func_void() + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x() +declare float @llvm.fabs.f32(float) +declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1) +declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1) +declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) +declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) +declare i1 @llvm.amdgcn.class.f32(float, i32) +declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32) +declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64) +declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) +declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) +declare float @llvm.amdgcn.wwm.f32(float) +declare i32 @llvm.amdgcn.wqm.i32(i32) +declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) +declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) +declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) +declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) +declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) +declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32) +declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32) +declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32) +declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32) +declare void @llvm.amdgcn.kill(i1) +declare i1 @llvm.amdgcn.wqm.vote(i1) +declare i1 @llvm.amdgcn.ps.live() +declare void @llvm.amdgcn.init.exec(i64) +declare void @llvm.amdgcn.init.exec.from.input(i32, i32) +declare i64 @llvm.cttz.i64(i64, i1) +declare i32 @llvm.cttz.i32(i32, i1) + +attributes #0 = { nounwind readnone speculatable } +attributes #1 = { nounwind } +attributes #2 = { nounwind readnone optnone noinline } +attributes #3 = { "target-features"="+wavefrontsize32" } +attributes #4 = { "target-features"="+wavefrontsize64" } Index: test/CodeGen/AMDGPU/xor3.ll =================================================================== --- test/CodeGen/AMDGPU/xor3.ll +++ test/CodeGen/AMDGPU/xor3.ll @@ -16,6 +16,7 @@ ; GFX10-LABEL: xor3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -33,6 +34,7 @@ ; GFX10-LABEL: xor3_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, s2, v0, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -50,6 +52,7 @@ ; GFX10-LABEL: xor3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %b, %c %result = xor i32 %a, %x @@ -67,6 +70,7 @@ ; GFX10-LABEL: xor3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -84,6 +88,7 @@ ; GFX10-LABEL: xor3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, 16 @@ -102,6 +107,7 @@ ; GFX10-LABEL: xor3_multiuse_outer: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mul_lo_u32 v1, v0, v3 ; GFX10-NEXT: ; return to shader part epilog %inner = xor i32 %a, %b @@ -123,6 +129,7 @@ ; GFX10-LABEL: xor3_multiuse_inner: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_xor_b32_e32 v1, v0, v2 ; GFX10-NEXT: ; return to shader part epilog %inner = xor i32 %a, %b @@ -151,6 +158,7 @@ ; GFX10-NEXT: v_add_f32_e64 v1, s3, 2.0 ; GFX10-NEXT: v_add_f32_e64 v2, s2, 1.0 ; GFX10-NEXT: v_add_f32_e64 v0, 0x40400000, s4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1 ; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0 ; GFX10-NEXT: ; return to shader part epilog Index: test/CodeGen/AMDGPU/xor_add.ll =================================================================== --- test/CodeGen/AMDGPU/xor_add.ll +++ test/CodeGen/AMDGPU/xor_add.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: xor_add: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: xor_add_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: xor_add_vgpr_all: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: xor_add_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -112,6 +116,7 @@ ; GFX10-LABEL: xor_add_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, 3, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, 3 %result = add i32 %x, %b Index: test/MC/AMDGPU/gfx10-constant-bus.s =================================================================== --- test/MC/AMDGPU/gfx10-constant-bus.s +++ test/MC/AMDGPU/gfx10-constant-bus.s @@ -33,3 +33,13 @@ v_div_fmas_f64 v[5:6], v[1:2], s[2:3], 0x123456 // GFX10-ERR: error: invalid operand (violates constant bus restrictions) + +//----------------------------------------------------------------------------------------- +// v_mad_u64_u32 has operands of different sizes. +// When these operands are literals, they are counted as 2 scalar values even if literals are identical. + +v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678 +// GFX10: v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678 ; encoding: [0x05,0x0c,0x76,0xd5,0x01,0xff,0xfd,0x03,0x78,0x56,0x34,0x12] + +v_mad_u64_u32 v[5:6], s12, s1, 0x12345678, 0x12345678 +// GFX10-ERR: error: invalid operand (violates constant bus restrictions) Index: test/MC/AMDGPU/wave32.s =================================================================== --- /dev/null +++ test/MC/AMDGPU/wave32.s @@ -0,0 +1,412 @@ +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1032 %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1064 %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1032-ERR %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1064-ERR %s + +v_cmp_ge_i32_e32 s0, v0 +// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] +// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] + +v_cmp_ge_i32_e32 vcc_lo, s0, v1 +// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v1 ; encoding: [0x00,0x02,0x0c,0x7d] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cmp_ge_i32_e32 vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v2 ; encoding: [0x00,0x04,0x0c,0x7d] + +v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] + +v_cmp_class_f32_e32 vcc_lo, s0, v0 +// GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cmp_class_f32_e32 vcc, s0, v0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_class_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] + +// TODO-GFX10: The following encoding does not match SP3's encoding, which is: +// [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06] +v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +// TODO-GFX10: The following encoding does not match SP3's encoding, which is: +// [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06] +v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06] + +v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06] + +v_cndmask_b32_e32 v1, v2, v3, +// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02] + +v_cndmask_b32_e32 v1, v2, v3, vcc_lo +// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cndmask_b32_e32 v1, v2, v3, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02] + +v_add_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50] + +v_add_co_ci_u32_e32 v3, v3, v4 +// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50] +// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50] + +v_sub_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52] + +v_sub_co_ci_u32_e32 v3, v3, v4 +// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52] +// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52] + +v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54] + +v_subrev_co_ci_u32_e32 v1, 0, v1 +// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54] +// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54] + +v_add_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_add_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_add_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] + +v_add_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] + +v_sub_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_sub_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_sub_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_subrev_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_subrev_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] + +v_sub_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] +// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] + +v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] + +v_subrev_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] +// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] + +v_add_co_ci_u32 v1, sext(v1), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] + +v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] + +v_add_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_ci_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] +// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] + +v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] + +v_sub_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00] + +v_subrev_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00] + +v_add_co_u32 v0, s0, v0, v2 +// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_u32 v0, s0, v0, v2 +// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_u32 v0, s0, v0, v2 +// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] + +v_add_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] + +v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] + +v_sub_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] + +v_sub_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] + +v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] + +v_subrev_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] + +v_subrev_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] + +v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] + +v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 +// GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00] + +v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo +// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01] + +v_div_scale_f32 v2, s2, v0, v0, v2 +// GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_div_scale_f32 v2, s[2:3], v0, v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] + +v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] +// GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04] + +v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] +// GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04] + +v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] +// GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04] + +v_cmpx_neq_f32_e32 v0, v1 +// GFX1032: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] +// GFX1064: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] + +v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06] +// GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06] + +v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86] +// GFX1064: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86] + +v_cmpx_class_f32_e64 v0, 1 +// GFX1032: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] +// GFX1064: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] + +v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86] +// GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86] Index: test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt =================================================================== --- test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt +++ test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt @@ -8,3 +8,9 @@ # GFX10: s_mov_b32 s105, s104 ; encoding: [0x68,0x03,0xe9,0xbe] 0x68,0x03,0xe9,0xbe + +# GFX10: v_cmp_eq_f32_e64 s105, v0, s105 +0x69,0x00,0x02,0xd4,0x00,0xd3,0x00,0x00 + +# GFX10: v_cmp_eq_f32_sdwa s105, v0, s105 src0_sel:DWORD src1_sel:DWORD +0xf9,0xd2,0x04,0x7c,0x00,0xe9,0x06,0x86 Index: test/MC/Disassembler/AMDGPU/wave32.txt =================================================================== --- /dev/null +++ test/MC/Disassembler/AMDGPU/wave32.txt @@ -0,0 +1,164 @@ +# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1032 %s +# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64,-wavefrontsize32 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1064 %s + +# GFX1032: v_cmp_lt_f32_e32 vcc_lo, s2, v4 +# GFX1064: v_cmp_lt_f32_e32 vcc, s2, v4 +0x02,0x08,0x02,0x7c + +# GFX1032: v_cmp_ge_i32_e64 s2, s0, v2 +# GFX1064: v_cmp_ge_i32_e64 s[2:3], s0, v2 +0x02,0x00,0x86,0xd4,0x00,0x04,0x02,0x00 + +# GFX1032: v_cmp_ge_i32_sdwa vcc_lo, v0, v2 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmp_ge_i32_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x04,0x0c,0x7d,0x00,0x00,0x05,0x06 + +# GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06 + +# GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0 +# GFX1064: v_cmp_class_f32_e32 vcc, s0, v0 +0x00,0x00,0x10,0x7d + +# GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD +# GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD +0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06 + +# GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD +# GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD +0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06 + +# GFX1032: v_cndmask_b32_e32 v5, 0, v2, vcc_lo +# GFX1064: v_cndmask_b32_e32 v5, 0, v2, vcc ; +0x80,0x04,0x0a,0x02 + +# GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo +# GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; +0x02,0x07,0x02,0x02 + +# GFX1032: v_add_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_add_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x0f,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +# GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; +0x03,0x09,0x06,0x50 + +# GFX1032: v_sub_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_sub_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x10,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_subrev_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_subrev_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x19,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +# GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; +0x03,0x09,0x06,0x52 + +# GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +# GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; +0x80,0x02,0x02,0x54 + +# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06 + +# GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06 + +# GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06 + +# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e + +# GFX1032: v_add_nc_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# GFX1064: v_add_nc_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +0xfa,0x04,0x0a,0x4a,0x01,0xe4,0x00,0x00 + +# FIXME: Results in invalid v_subrev_u16_dpp which apparently has the same encoding but does not exist in GFX10 + +# gfx1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00 + +# FIXME: Results in v_mul_lo_u16_dpp + +# gfx1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00 + +# FIXME: gives v_lshlrev_b16_dpp + +# gfx1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00 + +# GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 +# GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] +0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo +# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; +0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01 + +# GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2 +# GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2 +0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04 + +# GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] +# GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] +0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04 + +# GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] +# GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] +0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04 + +# GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] +# GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] +0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04 + +# GFX1032: v_cmpx_neq_f32_e32 v0, v1 +# GFX1064: v_cmpx_neq_f32_e32 v0, v1 +0x00,0x03,0x3a,0x7c + +# GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06 + +# GFX1032: v_cmpx_class_f32_e64 v0, 1 +# GFX1064: v_cmpx_class_f32_e64 v0, 1 +0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00 + +# GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86