Index: include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- include/llvm/IR/IntrinsicsAMDGPU.td +++ include/llvm/IR/IntrinsicsAMDGPU.td @@ -187,6 +187,10 @@ llvm_i32_ty], // bit offset of the thread count [IntrConvergent]>; +def int_amdgcn_wavefrontsize : + GCCBuiltin<"__builtin_amdgcn_wavefrontsize">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>; + //===----------------------------------------------------------------------===// // Instruction Intrinsics @@ -1302,11 +1306,11 @@ >; def int_amdgcn_icmp : - Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty], + Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem, IntrConvergent, ImmArg<2>]>; def int_amdgcn_fcmp : - Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty], + Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem, IntrConvergent, ImmArg<2>]>; def int_amdgcn_readfirstlane : @@ -1576,23 +1580,23 @@ // Special Intrinsics for backend internal use only. No frontend // should emit calls to these. // ===----------------------------------------------------------------------===// -def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_i64_ty], +def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], [llvm_i1_ty], [IntrConvergent] >; -def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_i64_ty], - [llvm_i64_ty], [IntrConvergent] +def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], + [llvm_anyint_ty], [IntrConvergent] >; -def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty], - [llvm_i1_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent] +def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty], + [llvm_i1_ty, llvm_anyint_ty], [IntrNoMem, IntrConvergent] >; def int_amdgcn_loop : Intrinsic<[llvm_i1_ty], - [llvm_i64_ty], [IntrConvergent] + [llvm_anyint_ty], [IntrConvergent] >; -def int_amdgcn_end_cf : Intrinsic<[], [llvm_i64_ty], [IntrConvergent]>; +def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], [IntrConvergent]>; // Represent unreachable in a divergent region. def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>; Index: lib/Target/AMDGPU/AMDGPU.td =================================================================== --- lib/Target/AMDGPU/AMDGPU.td +++ lib/Target/AMDGPU/AMDGPU.td @@ -765,7 +765,7 @@ FeatureLDSBankCount32, FeatureDLInsts, FeatureNSAEncoding, - FeatureWavefrontSize64, + FeatureWavefrontSize32, FeatureScalarStores, FeatureScalarAtomics, FeatureScalarFlatScratchInsts, Index: lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp +++ lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp @@ -249,7 +249,8 @@ // We need to know how many lanes are active within the wavefront, and we do // this by doing a ballot of active lanes. CallInst *const Ballot = - B.CreateIntrinsic(Intrinsic::amdgcn_icmp, {B.getInt32Ty()}, + B.CreateIntrinsic(Intrinsic::amdgcn_icmp, + {B.getInt64Ty(), B.getInt32Ty()}, {B.getInt32(1), B.getInt32(0), B.getInt32(33)}); // We need to know how many lanes are active within the wavefront that are Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1010,6 +1010,9 @@ } void AMDGPUDAGToDAGISel::SelectDIV_FMAS(SDNode *N) { + const GCNSubtarget *ST = static_cast(Subtarget); + const SIRegisterInfo *TRI = ST->getRegisterInfo(); + SDLoc SL(N); EVT VT = N->getValueType(0); @@ -1021,7 +1024,7 @@ SDValue CarryIn = N->getOperand(3); // V_DIV_FMAS implicitly reads VCC. SDValue VCC = CurDAG->getCopyToReg(CurDAG->getEntryNode(), SL, - AMDGPU::VCC, CarryIn, SDValue()); + TRI->getVCC(), CarryIn, SDValue()); SDValue Ops[10]; @@ -1838,9 +1841,12 @@ return; } + const GCNSubtarget *ST = static_cast(Subtarget); + const SIRegisterInfo *TRI = ST->getRegisterInfo(); + bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N); unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ; - unsigned CondReg = UseSCCBr ? AMDGPU::SCC : AMDGPU::VCC; + unsigned CondReg = UseSCCBr ? (unsigned)AMDGPU::SCC : TRI->getVCC(); SDLoc SL(N); if (!UseSCCBr) { @@ -1857,9 +1863,13 @@ // the S_AND when is unnecessary. But it would be better to add a separate // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it // catches both cases. - Cond = SDValue(CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1, - CurDAG->getRegister(AMDGPU::EXEC, MVT::i1), - Cond), + Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32 + : AMDGPU::S_AND_B64, + SL, MVT::i1, + CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO + : AMDGPU::EXEC, + MVT::i1), + Cond), 0); } Index: lib/Target/AMDGPU/AMDGPUInstrInfo.td =================================================================== --- lib/Target/AMDGPU/AMDGPUInstrInfo.td +++ lib/Target/AMDGPU/AMDGPUInstrInfo.td @@ -50,19 +50,19 @@ def AMDGPUKillSDT : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def AMDGPUIfOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] >; def AMDGPUElseOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, OtherVT>] >; def AMDGPULoopOp : SDTypeProfile<0, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, OtherVT>] + [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>] >; def AMDGPUIfBreakOp : SDTypeProfile<1, 2, - [SDTCisVT<0, i64>, SDTCisVT<1, i1>, SDTCisVT<2, i64>] + [SDTCisVT<0, i1>, SDTCisVT<1, i1>, SDTCisVT<2, i1>] >; //===----------------------------------------------------------------------===// @@ -200,7 +200,7 @@ def AMDGPUborrow : SDNode<"AMDGPUISD::BORROW", SDTIntBinOp, []>; def AMDGPUSetCCOp : SDTypeProfile<1, 3, [ // setcc - SDTCisVT<0, i64>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT> + SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT> ]>; def AMDGPUsetcc : SDNode<"AMDGPUISD::SETCC", AMDGPUSetCCOp>; Index: lib/Target/AMDGPU/AMDGPUSubtarget.h =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.h +++ lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -1028,6 +1028,14 @@ std::vector> &Mutations) const override; + bool isWave32() const { + return WavefrontSize == 32; + } + + const TargetRegisterClass *getBoolRC() const { + return getRegisterInfo()->getBoolRC(); + } + /// \returns Maximum number of work groups per compute unit supported by the /// subtarget and limited by given \p FlatWorkGroupSize. unsigned getMaxWorkGroupsPerCU(unsigned FlatWorkGroupSize) const override { Index: lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h =================================================================== --- lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h +++ lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h @@ -123,6 +123,8 @@ MCOperand decodeSDWASrc32(unsigned Val) const; MCOperand decodeSDWAVopcDst(unsigned Val) const; + MCOperand decodeBoolReg(unsigned Val) const; + int getTTmpIdx(unsigned Val) const; bool isVI() const; Index: lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp =================================================================== --- lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -101,6 +101,12 @@ return addOperand(Inst, MCOperand::createImm(Imm)); } +static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, + uint64_t Addr, const void *Decoder) { + auto DAsm = static_cast(Decoder); + return addOperand(Inst, DAsm->decodeBoolReg(Val)); +} + #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ static DecodeStatus StaticDecoderName(MCInst &Inst, \ unsigned Imm, \ @@ -1039,6 +1045,8 @@ STI.getFeatureBits()[AMDGPU::FeatureGFX10]) && "SDWAVopcDst should be present only on GFX9+"); + bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]; + if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; @@ -1046,15 +1054,21 @@ if (TTmpIdx >= 0) { return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); } else if (Val > SGPR_MAX) { - return decodeSpecialReg64(Val); + return IsWave64 ? decodeSpecialReg64(Val) + : decodeSpecialReg32(Val); } else { - return createSRegOperand(getSgprClassId(OPW64), Val); + return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val); } } else { - return createRegOperand(AMDGPU::VCC); + return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO); } } +MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const { + return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ? + decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val); +} + bool AMDGPUDisassembler::isVI() const { return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; } Index: lib/Target/AMDGPU/SIAnnotateControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SIAnnotateControlFlow.cpp +++ lib/Target/AMDGPU/SIAnnotateControlFlow.cpp @@ -12,11 +12,13 @@ //===----------------------------------------------------------------------===// #include "AMDGPU.h" +#include "AMDGPUSubtarget.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/LegacyDivergenceAnalysis.h" #include "llvm/Analysis/LoopInfo.h" +#include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constant.h" @@ -55,13 +57,13 @@ Type *Boolean; Type *Void; - Type *Int64; + Type *IntMask; Type *ReturnStruct; ConstantInt *BoolTrue; ConstantInt *BoolFalse; UndefValue *BoolUndef; - Constant *Int64Zero; + Constant *IntMaskZero; Function *If; Function *Else; @@ -74,6 +76,8 @@ LoopInfo *LI; + void initialize(Module &M, const GCNSubtarget &ST); + bool isUniform(BranchInst *T); bool isTopOfStack(BasicBlock *BB); @@ -103,8 +107,6 @@ SIAnnotateControlFlow() : FunctionPass(ID) {} - bool doInitialization(Module &M) override; - bool runOnFunction(Function &F) override; StringRef getPassName() const override { return "SI annotate control flow"; } @@ -114,6 +116,7 @@ AU.addRequired(); AU.addRequired(); AU.addPreserved(); + AU.addRequired(); FunctionPass::getAnalysisUsage(AU); } }; @@ -124,31 +127,34 @@ "Annotate SI Control Flow", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) INITIALIZE_PASS_END(SIAnnotateControlFlow, DEBUG_TYPE, "Annotate SI Control Flow", false, false) char SIAnnotateControlFlow::ID = 0; /// Initialize all the types and constants used in the pass -bool SIAnnotateControlFlow::doInitialization(Module &M) { +void SIAnnotateControlFlow::initialize(Module &M, const GCNSubtarget &ST) { LLVMContext &Context = M.getContext(); Void = Type::getVoidTy(Context); Boolean = Type::getInt1Ty(Context); - Int64 = Type::getInt64Ty(Context); - ReturnStruct = StructType::get(Boolean, Int64); + IntMask = ST.isWave32() ? Type::getInt32Ty(Context) + : Type::getInt64Ty(Context); + ReturnStruct = StructType::get(Boolean, IntMask); BoolTrue = ConstantInt::getTrue(Context); BoolFalse = ConstantInt::getFalse(Context); BoolUndef = UndefValue::get(Boolean); - Int64Zero = ConstantInt::get(Int64, 0); - - If = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if); - Else = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_else); - IfBreak = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if_break); - Loop = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_loop); - EndCf = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_end_cf); - return false; + IntMaskZero = ConstantInt::get(IntMask, 0); + + If = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if, { IntMask }); + Else = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_else, + { IntMask, IntMask }); + IfBreak = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if_break, + { IntMask, IntMask }); + Loop = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_loop, { IntMask }); + EndCf = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_end_cf, { IntMask }); } /// Is the branch condition uniform or did the StructurizeCFG pass @@ -258,14 +264,14 @@ return; BasicBlock *Target = Term->getSuccessor(1); - PHINode *Broken = PHINode::Create(Int64, 0, "phi.broken", &Target->front()); + PHINode *Broken = PHINode::Create(IntMask, 0, "phi.broken", &Target->front()); Value *Cond = Term->getCondition(); Term->setCondition(BoolTrue); Value *Arg = handleLoopCondition(Cond, Broken, L, Term); for (BasicBlock *Pred : predecessors(Target)) { - Value *PHIValue = Int64Zero; + Value *PHIValue = IntMaskZero; if (Pred == BB) // Remember the value of the previous iteration. PHIValue = Arg; // If the backedge from Pred to Target could be executed before the exit @@ -316,6 +322,10 @@ DT = &getAnalysis().getDomTree(); LI = &getAnalysis().getLoopInfo(); DA = &getAnalysis(); + TargetPassConfig &TPC = getAnalysis(); + const TargetMachine &TM = TPC.getTM(); + + initialize(*F.getParent(), TM.getSubtarget(F)); for (df_iterator I = df_begin(&F.getEntryBlock()), E = df_end(&F.getEntryBlock()); I != E; ++I) { Index: lib/Target/AMDGPU/SIFrameLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIFrameLowering.cpp +++ lib/Target/AMDGPU/SIFrameLowering.cpp @@ -627,9 +627,11 @@ ScratchExecCopy = findScratchNonCalleeSaveRegister(MF, LiveRegs, - AMDGPU::SReg_64_XEXECRegClass); + *TRI.getWaveMaskRegClass()); - BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), + const unsigned OrSaveExec = ST.isWave32() ? + AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; + BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy) .addImm(-1); } @@ -641,7 +643,9 @@ if (ScratchExecCopy != AMDGPU::NoRegister) { // FIXME: Split block and make terminator. - BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) + unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) .addReg(ScratchExecCopy); } } @@ -663,6 +667,7 @@ if (!Reg.FI.hasValue()) continue; + const SIRegisterInfo &TRI = TII->getRegisterInfo(); if (ScratchExecCopy == AMDGPU::NoRegister) { // See emitPrologue LivePhysRegs LiveRegs(*ST.getRegisterInfo()); @@ -670,9 +675,12 @@ ScratchExecCopy = findScratchNonCalleeSaveRegister(MF, LiveRegs, - AMDGPU::SReg_64_XEXECRegClass); + *TRI.getWaveMaskRegClass()); - BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), ScratchExecCopy) + const unsigned OrSaveExec = ST.isWave32() ? + AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64; + + BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy) .addImm(-1); } @@ -683,7 +691,9 @@ if (ScratchExecCopy != AMDGPU::NoRegister) { // FIXME: Split block and make terminator. - BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) + unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec) .addReg(ScratchExecCopy); } Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -2924,12 +2924,16 @@ int Offset, bool UseGPRIdxMode, bool IsIndirectSrc) { + MachineFunction *MF = OrigBB.getParent(); + const GCNSubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); MachineBasicBlock::iterator I = LoopBB.begin(); - unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + const TargetRegisterClass *BoolRC = TRI->getBoolRC(); + unsigned PhiExec = MRI.createVirtualRegister(BoolRC); + unsigned NewExec = MRI.createVirtualRegister(BoolRC); unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); - unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned CondReg = MRI.createVirtualRegister(BoolRC); BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) .addReg(InitReg) @@ -2953,7 +2957,9 @@ .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); // Update EXEC, save the original EXEC value to VCC. - BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) + BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 + : AMDGPU::S_AND_SAVEEXEC_B64), + NewExec) .addReg(CondReg, RegState::Kill); MRI.setSimpleHint(NewExec, CondReg); @@ -2988,10 +2994,12 @@ } // Update EXEC, switch all done bits to 0 and all todo bits to 1. + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; MachineInstr *InsertPt = - BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) - .addReg(NewExec); + BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term + : AMDGPU::S_XOR_B64_term), Exec) + .addReg(Exec) + .addReg(NewExec); // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use // s_cbranch_scc0? @@ -3017,19 +3025,24 @@ bool UseGPRIdxMode, bool IsIndirectSrc) { MachineFunction *MF = MBB.getParent(); + const GCNSubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); MachineRegisterInfo &MRI = MF->getRegInfo(); const DebugLoc &DL = MI.getDebugLoc(); MachineBasicBlock::iterator I(&MI); + const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); + unsigned TmpExec = MRI.createVirtualRegister(BoolXExecRC); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); // Save the EXEC mask - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) - .addReg(AMDGPU::EXEC); + BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) + .addReg(Exec); // To insert the loop we need to split the block. Move everything after this // point to a new block, and insert a new empty block between the two. @@ -3057,7 +3070,7 @@ Offset, UseGPRIdxMode, IsIndirectSrc); MachineBasicBlock::iterator First = RemainderBB->begin(); - BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) + BuildMI(*RemainderBB, First, DL, TII->get(MovExecOpc), Exec) .addReg(SaveExec); return InsPt; @@ -3349,6 +3362,9 @@ case AMDGPU::S_ADD_U64_PSEUDO: case AMDGPU::S_SUB_U64_PSEUDO: { MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + const GCNSubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const TargetRegisterClass *BoolRC = TRI->getBoolRC(); const DebugLoc &DL = MI.getDebugLoc(); MachineOperand &Dest = MI.getOperand(0); @@ -3359,17 +3375,17 @@ unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, - Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, + Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32_XM0RegClass); MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, - Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, + Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32_XM0RegClass); MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, - Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, + Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32_XM0RegClass); MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, - Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, + Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32_XM0RegClass); bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); @@ -3405,6 +3421,14 @@ MI.eraseFromParent(); return BB; + case AMDGPU::SI_INIT_EXEC_LO: + // This should be before all vector instructions. + BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), + AMDGPU::EXEC_LO) + .addImm(MI.getOperand(0).getImm()); + MI.eraseFromParent(); + return BB; + case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { // Extract the thread count from an SGPR input and set EXEC accordingly. // Since BFM can't shift by 64, handle that case with CMP + CMOV. @@ -3438,18 +3462,23 @@ (void)Found; // This should be before all vector instructions. + unsigned Mask = (getSubtarget()->getWavefrontSize() << 1) - 1; + bool isWave32 = getSubtarget()->isWave32(); + unsigned Exec = isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) .addReg(InputReg) - .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); - BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), - AMDGPU::EXEC) + .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000); + BuildMI(*BB, FirstMI, DebugLoc(), + TII->get(isWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), + Exec) .addReg(CountReg) .addImm(0); BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) .addReg(CountReg, RegState::Kill) - .addImm(64); - BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), - AMDGPU::EXEC) + .addImm(getSubtarget()->getWavefrontSize()); + BuildMI(*BB, FirstMI, DebugLoc(), + TII->get(isWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64), + Exec) .addImm(-1); MI.eraseFromParent(); return BB; @@ -3480,6 +3509,8 @@ return splitKillBlock(MI, BB); case AMDGPU::V_CNDMASK_B64_PSEUDO: { MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); + const GCNSubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); unsigned Dst = MI.getOperand(0).getReg(); unsigned Src0 = MI.getOperand(1).getReg(); @@ -3489,7 +3520,8 @@ unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); + unsigned SrcCondCopy = MRI.createVirtualRegister(CondRC); BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) .addReg(SrcCond); @@ -3567,7 +3599,9 @@ auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); if (TII->isVOP3(*I)) { - I.addReg(AMDGPU::VCC, RegState::Define); + const GCNSubtarget &ST = MF->getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + I.addReg(TRI->getVCC(), RegState::Define); } I.add(MI.getOperand(1)) .add(MI.getOperand(2)); @@ -3839,7 +3873,6 @@ ICmpInst::Predicate IcInput = static_cast(CondCode); - SDValue LHS = N->getOperand(1); SDValue RHS = N->getOperand(2); @@ -3855,8 +3888,14 @@ ISD::CondCode CCOpcode = getICmpCondCode(IcInput); - return DAG.getNode(AMDGPUISD::SETCC, DL, VT, LHS, RHS, - DAG.getCondCode(CCOpcode)); + unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); + EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); + + SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, + DAG.getCondCode(CCOpcode)); + if (VT.bitsEq(CCVT)) + return SetCC; + return DAG.getZExtOrTrunc(SetCC, DL, VT); } static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, @@ -3882,8 +3921,13 @@ FCmpInst::Predicate IcInput = static_cast(CondCode); ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); - return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src0, - Src1, DAG.getCondCode(CCOpcode)); + unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); + EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); + SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, + Src1, DAG.getCondCode(CCOpcode)); + if (VT.bitsEq(CCVT)) + return SetCC; + return DAG.getZExtOrTrunc(SetCC, SL, VT); } void SITargetLowering::ReplaceNodeResults(SDNode *N, @@ -5394,6 +5438,9 @@ return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, SDLoc(DAG.getEntryNode()), MFI->getArgInfo().WorkItemIDZ); + case Intrinsic::amdgcn_wavefrontsize: + return DAG.getConstant(MF.getSubtarget().getWavefrontSize(), + SDLoc(Op), MVT::i32); case Intrinsic::amdgcn_s_buffer_load: { unsigned Cache = cast(Op.getOperand(3))->getZExtValue(); return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), @@ -5598,6 +5645,11 @@ case Intrinsic::amdgcn_fmad_ftz: return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); + + case Intrinsic::amdgcn_if_break: + return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, + Op->getOperand(1), Op->getOperand(2)), 0); + default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) @@ -6495,6 +6547,10 @@ M->getMemoryVT(), M->getMemOperand()); } + case Intrinsic::amdgcn_end_cf: + return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, + Op->getOperand(2), Chain), 0); + default: { if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) @@ -9981,6 +10037,7 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const { MachineRegisterInfo &MRI = MF.getRegInfo(); SIMachineFunctionInfo *Info = MF.getInfo(); + const GCNSubtarget &ST = MF.getSubtarget(); const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); if (Info->isEntryFunction()) { @@ -10008,6 +10065,25 @@ Info->limitOccupancy(MF); + if (ST.isWave32() && !MF.empty()) { + // Add VCC_HI def because many instructions marked as imp-use VCC where + // we may only define VCC_LO. If nothing defines VCC_HI we may end up + // having a use of undef. + + const SIInstrInfo *TII = ST.getInstrInfo(); + DebugLoc DL; + + MachineBasicBlock &MBB = MF.front(); + MachineBasicBlock::iterator I = MBB.getFirstNonDebugInstr(); + BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), AMDGPU::VCC_HI); + + for (auto &MBB : MF) { + for (auto &MI : MBB) { + TII->fixImplicitOperands(MI); + } + } + } + TargetLoweringBase::finalizeLowering(MF); } Index: lib/Target/AMDGPU/SIInsertSkips.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertSkips.cpp +++ lib/Target/AMDGPU/SIInsertSkips.cpp @@ -271,6 +271,9 @@ break; } case AMDGPU::SI_KILL_I1_TERMINATOR: { + const MachineFunction *MF = MI.getParent()->getParent(); + const GCNSubtarget &ST = MF->getSubtarget(); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; const MachineOperand &Op = MI.getOperand(0); int64_t KillVal = MI.getOperand(1).getImm(); assert(KillVal == 0 || KillVal == -1); @@ -281,14 +284,17 @@ assert(Imm == 0 || Imm == -1); if (Imm == KillVal) - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) + BuildMI(MBB, &MI, DL, TII->get(ST.isWave32() ? AMDGPU::S_MOV_B32 + : AMDGPU::S_MOV_B64), Exec) .addImm(0); break; } unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64; - BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + if (ST.isWave32()) + Opcode = KillVal ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_AND_B32; + BuildMI(MBB, &MI, DL, TII->get(Opcode), Exec) + .addReg(Exec) .add(Op); break; } @@ -337,9 +343,11 @@ // S_CBRANCH_EXEC[N]Z bool Changed = false; MachineBasicBlock &MBB = *MI.getParent(); - const unsigned CondReg = AMDGPU::VCC; - const unsigned ExecReg = AMDGPU::EXEC; - const unsigned And = AMDGPU::S_AND_B64; + const GCNSubtarget &ST = MBB.getParent()->getSubtarget(); + const bool IsWave32 = ST.isWave32(); + const unsigned CondReg = TRI->getVCC(); + const unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + const unsigned And = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; MachineBasicBlock::reverse_iterator A = MI.getReverseIterator(), E = MBB.rend(); Index: lib/Target/AMDGPU/SIInsertWaitcnts.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1402,9 +1402,9 @@ // bit is updated, so we can restore the bit by reading the value of // vcc and then writing it back to the register. BuildMI(Block, Inst, Inst.getDebugLoc(), - TII->get(AMDGPU::S_MOV_B64), - AMDGPU::VCC) - .addReg(AMDGPU::VCC); + TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), + TRI->getVCC()) + .addReg(TRI->getVCC()); VCCZBugHandledSet.insert(&Inst); Modified = true; } Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -942,6 +942,17 @@ /// Return -1 if the target-specific opcode for the pseudo instruction does /// not exist. If Opcode is not a pseudo instruction, this is identity. int pseudoToMCOpcode(int Opcode) const; + + const TargetRegisterClass *getRegClass(const MCInstrDesc &TID, unsigned OpNum, + const TargetRegisterInfo *TRI, + const MachineFunction &MF) + const override { + if (OpNum >= TID.getNumOperands()) + return nullptr; + return RI.getRegClass(TID.OpInfo[OpNum].RegClass); + } + + void fixImplicitOperands(MachineInstr &MI) const; }; /// \brief Returns true if a reg:subreg pair P has a TRC class Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -527,6 +527,21 @@ return; } + if (DestReg == AMDGPU::VCC_LO) { + if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { + BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) + .addReg(SrcReg, getKillRegState(KillSrc)); + } else { + // FIXME: Hack until VReg_1 removed. + assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); + BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) + .addImm(0) + .addReg(SrcReg, getKillRegState(KillSrc)); + } + + return; + } + if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); return; @@ -698,11 +713,15 @@ unsigned TrueReg, unsigned FalseReg) const { MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + MachineFunction *MF = MBB.getParent(); + const GCNSubtarget &ST = MF->getSubtarget(); + const TargetRegisterClass *BoolXExecRC = + RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && "Not a VGPR32 reg"); if (Cond.size() == 1) { - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(Cond[0]); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -715,8 +734,9 @@ assert(Cond[0].isImm() && "Cond[0] is not an immediate"); switch (Cond[0].getImm()) { case SIInstrInfo::SCC_TRUE: { - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 + : AMDGPU::S_CSELECT_B64), SReg) .addImm(-1) .addImm(0); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -728,8 +748,9 @@ break; } case SIInstrInfo::SCC_FALSE: { - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 + : AMDGPU::S_CSELECT_B64), SReg) .addImm(0) .addImm(-1); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -743,7 +764,7 @@ case SIInstrInfo::VCCNZ: { MachineOperand RegOp = Cond[1]; RegOp.setImplicit(false); - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(RegOp); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -757,7 +778,7 @@ case SIInstrInfo::VCCZ: { MachineOperand RegOp = Cond[1]; RegOp.setImplicit(false); - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) .add(RegOp); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -769,11 +790,13 @@ break; } case SIInstrInfo::EXECNZ: { - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 + : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) .addImm(0); - BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 + : AMDGPU::S_CSELECT_B64), SReg) .addImm(-1) .addImm(0); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -785,11 +808,13 @@ break; } case SIInstrInfo::EXECZ: { - unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) + unsigned SReg = MRI.createVirtualRegister(BoolXExecRC); + unsigned SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 + : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) .addImm(0); - BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) + BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 + : AMDGPU::S_CSELECT_B64), SReg) .addImm(0) .addImm(-1); BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) @@ -814,7 +839,7 @@ const DebugLoc &DL, unsigned SrcReg, int Value) const { MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) .addImm(Value) .addReg(SrcReg); @@ -827,7 +852,7 @@ const DebugLoc &DL, unsigned SrcReg, int Value) const { MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned Reg = MRI.createVirtualRegister(RI.getBoolRC()); BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) .addImm(Value) .addReg(SrcReg); @@ -1208,18 +1233,42 @@ MI.setDesc(get(AMDGPU::S_MOV_B64)); break; + case AMDGPU::S_MOV_B32_term: + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(get(AMDGPU::S_MOV_B32)); + break; + case AMDGPU::S_XOR_B64_term: // This is only a terminator to get the correct spill code placement during // register allocation. MI.setDesc(get(AMDGPU::S_XOR_B64)); break; + case AMDGPU::S_XOR_B32_term: + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(get(AMDGPU::S_XOR_B32)); + break; + + case AMDGPU::S_OR_B32_term: + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(get(AMDGPU::S_OR_B32)); + break; + case AMDGPU::S_ANDN2_B64_term: // This is only a terminator to get the correct spill code placement during // register allocation. MI.setDesc(get(AMDGPU::S_ANDN2_B64)); break; + case AMDGPU::S_ANDN2_B32_term: + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(get(AMDGPU::S_ANDN2_B32)); + break; + case AMDGPU::V_MOV_B64_PSEUDO: { unsigned Dst = MI.getOperand(0).getReg(); unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); @@ -1249,24 +1298,28 @@ break; } case AMDGPU::V_SET_INACTIVE_B32: { - BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + BuildMI(MBB, MI, DL, get(NotOpc), Exec) + .addReg(Exec); BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) .add(MI.getOperand(2)); - BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + BuildMI(MBB, MI, DL, get(NotOpc), Exec) + .addReg(Exec); MI.eraseFromParent(); break; } case AMDGPU::V_SET_INACTIVE_B64: { - BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + BuildMI(MBB, MI, DL, get(NotOpc), Exec) + .addReg(Exec); MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), MI.getOperand(0).getReg()) .add(MI.getOperand(2)); expandPostRAPseudo(*Copy); - BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + BuildMI(MBB, MI, DL, get(NotOpc), Exec) + .addReg(Exec); MI.eraseFromParent(); break; } @@ -1330,13 +1383,14 @@ case AMDGPU::ENTER_WWM: { // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when // WWM is entered. - MI.setDesc(get(AMDGPU::S_OR_SAVEEXEC_B64)); + MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 + : AMDGPU::S_OR_SAVEEXEC_B64)); break; } case AMDGPU::EXIT_WWM: { // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when // WWM is exited. - MI.setDesc(get(AMDGPU::S_MOV_B64)); + MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); break; } case TargetOpcode::BUNDLE: { @@ -1699,6 +1753,10 @@ case AMDGPU::S_MOV_B64_term: case AMDGPU::S_XOR_B64_term: case AMDGPU::S_ANDN2_B64_term: + case AMDGPU::S_MOV_B32_term: + case AMDGPU::S_XOR_B32_term: + case AMDGPU::S_OR_B32_term: + case AMDGPU::S_ANDN2_B32_term: break; case AMDGPU::SI_IF: case AMDGPU::SI_ELSE: @@ -1978,6 +2036,7 @@ .addReg(FalseReg, 0, SubIdx) .addReg(TrueReg, 0, SubIdx); preserveCondRegFlags(Select->getOperand(3), Cond[1]); + fixImplicitOperands(*Select); MIB.addReg(DstElt) .addImm(SubIdx); @@ -2782,7 +2841,8 @@ // dst Inst32.add(MI.getOperand(0)); } else { - assert(MI.getOperand(0).getReg() == AMDGPU::VCC && + assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || + (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && "Unexpected case"); } @@ -2850,6 +2910,8 @@ switch (MO.getReg()) { case AMDGPU::VCC: + case AMDGPU::VCC_LO: + case AMDGPU::VCC_HI: case AMDGPU::M0: case AMDGPU::FLAT_SCR: return MO.getReg(); @@ -3795,6 +3857,7 @@ Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); Src1.setSubReg(Src0SubReg); + fixImplicitOperands(MI); } // Legalize VOP3 operands. All operand types are supported for any operand @@ -3971,15 +4034,27 @@ emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, const DebugLoc &DL, MachineOperand &Rsrc) { + MachineFunction &MF = *OrigBB.getParent(); + const GCNSubtarget &ST = MF.getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + unsigned SaveExecOpc = + ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; + unsigned XorTermOpc = + ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; + unsigned AndOpc = + ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; + const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); + MachineBasicBlock::iterator I = LoopBB.begin(); unsigned VRsrc = Rsrc.getReg(); unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); - unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); + unsigned CondReg0 = MRI.createVirtualRegister(BoolXExecRC); + unsigned CondReg1 = MRI.createVirtualRegister(BoolXExecRC); + unsigned AndCond = MRI.createVirtualRegister(BoolXExecRC); unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); @@ -4017,22 +4092,22 @@ BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) .addReg(SRsrc, 0, AMDGPU::sub2_sub3) .addReg(VRsrc, 0, AMDGPU::sub2_sub3); - BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond) + BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond) .addReg(CondReg0) .addReg(CondReg1); MRI.setSimpleHint(SaveExec, AndCond); // Update EXEC to matching lanes, saving original to SaveExec. - BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec) + BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) .addReg(AndCond, RegState::Kill); // The original instruction is here; we insert the terminators after it. I = LoopBB.end(); // Update EXEC, switch all done bits to 0 and all todo bits to 1. - BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) + .addReg(Exec) .addReg(SaveExec); BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); } @@ -4043,15 +4118,19 @@ MachineOperand &Rsrc, MachineDominatorTree *MDT) { MachineBasicBlock &MBB = *MI.getParent(); MachineFunction &MF = *MBB.getParent(); + const GCNSubtarget &ST = MF.getSubtarget(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineBasicBlock::iterator I(&MI); const DebugLoc &DL = MI.getDebugLoc(); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; + const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); - unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned SaveExec = MRI.createVirtualRegister(BoolXExecRC); // Save the EXEC mask - BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec) - .addReg(AMDGPU::EXEC); + BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); // Killed uses in the instruction we are waterfalling around will be // incorrect due to the added control-flow. @@ -4100,8 +4179,7 @@ // Restore the EXEC mask MachineBasicBlock::iterator First = RemainderBB->begin(); - BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) - .addReg(SaveExec); + BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); } // Extract pointer from Rsrc and return a zero-value Rsrc replacement. @@ -4332,14 +4410,16 @@ // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 DebugLoc DL = MI.getDebugLoc(); - BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) + fixImplicitOperands(* + BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) .addReg(RsrcPtr, 0, AMDGPU::sub0) - .addReg(VAddr->getReg(), 0, AMDGPU::sub0); + .addReg(VAddr->getReg(), 0, AMDGPU::sub0)); // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 - BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) + fixImplicitOperands(* + BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) .addReg(RsrcPtr, 0, AMDGPU::sub1) - .addReg(VAddr->getReg(), 0, AMDGPU::sub1); + .addReg(VAddr->getReg(), 0, AMDGPU::sub1)); // NewVaddr = {NewVaddrHi, NewVaddrLo} BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) @@ -4563,10 +4643,16 @@ case AMDGPU::S_CBRANCH_SCC0: case AMDGPU::S_CBRANCH_SCC1: // Clear unused bits of vcc - BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), - AMDGPU::VCC) - .addReg(AMDGPU::EXEC) - .addReg(AMDGPU::VCC); + if (ST.isWave32()) + BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), + AMDGPU::VCC_LO) + .addReg(AMDGPU::EXEC_LO) + .addReg(AMDGPU::VCC_LO); + else + BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), + AMDGPU::VCC) + .addReg(AMDGPU::EXEC) + .addReg(AMDGPU::VCC); break; case AMDGPU::S_BFE_U64: @@ -4644,6 +4730,7 @@ } Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); + fixImplicitOperands(Inst); if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { const MachineOperand &OffsetWidthOp = Inst.getOperand(2); @@ -4957,13 +5044,14 @@ MachineBasicBlock &MBB = *Inst.getParent(); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + unsigned CarryReg = MRI.createVirtualRegister(CarryRC); + unsigned DeadCarryReg = MRI.createVirtualRegister(CarryRC); MachineOperand &Dest = Inst.getOperand(0); MachineOperand &Src0 = Inst.getOperand(1); @@ -5661,7 +5749,7 @@ MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { - unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC()); MachineInstr *SIIF = BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) .add(Branch->getOperand(0)) @@ -5688,8 +5776,8 @@ if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { - unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned DstReg = MRI.createVirtualRegister(RI.getBoolRC()); + unsigned BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); MachineInstrBuilder HeaderPHIBuilder = BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), @@ -5699,7 +5787,7 @@ HeaderPHIBuilder.addReg(BackEdgeReg); } else { MachineBasicBlock *PMBB = *PI; - unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), ZeroReg, 0); HeaderPHIBuilder.addReg(ZeroReg); @@ -5781,8 +5869,8 @@ return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); - unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); - MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC); + unsigned UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); + MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) .addReg(UnusedCarry, RegState::Define | RegState::Dead); @@ -5809,6 +5897,20 @@ } } +void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { + MachineBasicBlock *MBB = MI.getParent(); + MachineFunction *MF = MBB->getParent(); + const GCNSubtarget &ST = MF->getSubtarget(); + + if (!ST.isWave32()) + return; + + for (auto &Op : MI.implicit_operands()) { + if (Op.isReg() && Op.getReg() == AMDGPU::VCC) + Op.setReg(AMDGPU::VCC_LO); + } +} + bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { if (!isSMRD(MI)) return false; Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -727,6 +727,35 @@ include "SIInstrFormats.td" include "VIInstrFormats.td" +def BoolReg : AsmOperandClass { + let Name = "BoolReg"; + let ParserMethod = "parseBoolReg"; + let RenderMethod = "addRegOperands"; +} + +class BoolRC : RegisterOperand { + let ParserMatchClass = BoolReg; + let DecoderMethod = "decodeBoolReg"; +} + +def SSrc_i1 : RegisterOperand { + let ParserMatchClass = BoolReg; + let DecoderMethod = "decodeBoolReg"; +} + +def VOPDstS64orS32 : BoolRC { + let PrintMethod = "printVOPDst"; +} + +// SCSrc_i1 is the operand for pseudo instructions only. +// Boolean immeadiates shall not be exposed to codegen instructions. +def SCSrc_i1 : RegisterOperand { + let OperandNamespace = "AMDGPU"; + let OperandType = "OPERAND_REG_IMM_INT32"; + let ParserMatchClass = BoolReg; + let DecoderMethod = "decodeBoolReg"; +} + // ===----------------------------------------------------------------------===// // ExpSrc* Special cases for exp src operands which are printed as // "off" depending on en operand. @@ -765,11 +794,12 @@ def SDWASrc_f32 : SDWASrc; def SDWASrc_f16 : SDWASrc; -def SDWAVopcDst : VOPDstOperand { +def SDWAVopcDst : BoolRC { let OperandNamespace = "AMDGPU"; let OperandType = "OPERAND_SDWA_VOPC_DST"; let EncoderMethod = "getSDWAVopcDstEncoding"; let DecoderMethod = "decodeSDWAVopcDst"; + let PrintMethod = "printVOPDst"; } class NamedMatchClass : AsmOperandClass { @@ -901,11 +931,6 @@ def KImmFP16MatchClass : KImmMatchClass<16>; def f16kimm : kimmOperand; - -def VOPDstS64 : VOPDstOperand { - let PrintMethod = "printVOPDst"; -} - class FPInputModsMatchClass : AsmOperandClass { let Name = "RegOrImmWithFP"#opSize#"InputMods"; let ParserMethod = "parseRegOrImmWithFPInputMods"; @@ -1198,7 +1223,7 @@ !if(!eq(VT.Size, 128), VOPDstOperand, !if(!eq(VT.Size, 64), VOPDstOperand, !if(!eq(VT.Size, 16), VOPDstOperand, - VOPDstOperand)))); // else VT == i1 + VOPDstS64orS32)))); // else VT == i1 } // Returns the register class to use for the destination of VOP[12C] @@ -1274,7 +1299,7 @@ VSrc_f64, VSrc_b64), !if(!eq(VT.Value, i1.Value), - SCSrc_i1, + SSrc_i1, !if(isFP, !if(!eq(VT.Value, f16.Value), VSrc_f16, Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -121,14 +121,14 @@ } // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] -def ENTER_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins i64imm:$src0)> { +def ENTER_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins i64imm:$src0)> { let Defs = [EXEC]; let hasSideEffects = 0; let mayLoad = 0; let mayStore = 0; } -def EXIT_WWM : SPseudoInstSI <(outs SReg_64:$sdst), (ins SReg_64:$src0)> { +def EXIT_WWM : SPseudoInstSI <(outs SReg_1:$sdst), (ins SReg_1:$src0)> { let hasSideEffects = 0; let mayLoad = 0; let mayStore = 0; @@ -161,13 +161,18 @@ >; def S_ADD_U64_CO_PSEUDO : SPseudoInstSI < - (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) + (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) >; def S_SUB_U64_CO_PSEUDO : SPseudoInstSI < - (outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) + (outs SReg_64:$vdst, VOPDstS64orS32:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1) >; +def S_ADDC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_1:$sdst), + (ins SSrc_b64:$src0, SSrc_b64:$src1)>; +def S_SUBC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_1:$sdst), + (ins SSrc_b64:$src0, SSrc_b64:$src1)>; + } // End usesCustomInserter = 1, Defs = [SCC] let usesCustomInserter = 1 in { @@ -234,30 +239,30 @@ let OtherPredicates = [EnableLateCFGStructurize] in { def SI_NON_UNIFORM_BRCOND_PSEUDO : CFPseudoInstSI < (outs), - (ins SReg_64:$vcc, brtarget:$target), + (ins SReg_1:$vcc, brtarget:$target), [(brcond i1:$vcc, bb:$target)]> { let Size = 12; } } def SI_IF: CFPseudoInstSI < - (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target), - [(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> { + (outs SReg_1:$dst), (ins SReg_1:$vcc, brtarget:$target), + [(set i1:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> { let Constraints = ""; let Size = 12; let hasSideEffects = 1; } def SI_ELSE : CFPseudoInstSI < - (outs SReg_64:$dst), - (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> { + (outs SReg_1:$dst), + (ins SReg_1:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> { let Size = 12; let hasSideEffects = 1; } def SI_LOOP : CFPseudoInstSI < - (outs), (ins SReg_64:$saved, brtarget:$target), - [(AMDGPUloop i64:$saved, bb:$target)], 1, 1> { + (outs), (ins SReg_1:$saved, brtarget:$target), + [(AMDGPUloop i1:$saved, bb:$target)], 1, 1> { let Size = 8; let isBranch = 1; let hasSideEffects = 1; @@ -266,8 +271,7 @@ } // End isTerminator = 1 def SI_END_CF : CFPseudoInstSI < - (outs), (ins SReg_64:$saved), - [(int_amdgcn_end_cf i64:$saved)], 1, 1> { + (outs), (ins SReg_1:$saved), [], 1, 1> { let Size = 4; let isAsCheapAsAMove = 1; let isReMaterializable = 1; @@ -277,8 +281,7 @@ } def SI_IF_BREAK : CFPseudoInstSI < - (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src), - [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> { + (outs SReg_1:$dst), (ins SReg_1:$vcc, SReg_1:$src), []> { let Size = 4; let isAsCheapAsAMove = 1; let isReMaterializable = 1; @@ -304,7 +307,7 @@ } } -defm SI_KILL_I1 : PseudoInstKill <(ins SSrc_b64:$src, i1imm:$killvalue)>; +defm SI_KILL_I1 : PseudoInstKill <(ins SCSrc_i1:$src, i1imm:$killvalue)>; defm SI_KILL_F32_COND_IMM : PseudoInstKill <(ins VSrc_b32:$src0, i32imm:$src1, i32imm:$cond)>; let Defs = [EXEC,VCC] in @@ -323,7 +326,7 @@ } def SI_PS_LIVE : PseudoInstSI < - (outs SReg_64:$dst), (ins), + (outs SReg_1:$dst), (ins), [(set i1:$dst, (int_amdgcn_ps_live))]> { let SALU = 1; } @@ -558,7 +561,16 @@ def : GCNPat < (AMDGPUinit_exec i64:$src), (SI_INIT_EXEC (as_i64imm $src)) ->; +> { + let WaveSizePredicate = isWave64; +} + +def : GCNPat < + (AMDGPUinit_exec i64:$src), + (SI_INIT_EXEC_LO (as_i32imm $src)) +> { + let WaveSizePredicate = isWave32; +} def : GCNPat < (AMDGPUinit_exec_from_input i32:$input, i32:$shift), @@ -571,7 +583,7 @@ >; def : GCNPat< - (AMDGPUelse i64:$src, bb:$target), + (AMDGPUelse i1:$src, bb:$target), (SI_ELSE $src, $target, 0) >; @@ -604,7 +616,12 @@ // TODO: we could add more variants for other types of conditionals def : Pat < - (int_amdgcn_icmp i1:$src, (i1 0), (i32 33)), + (i64 (int_amdgcn_icmp i1:$src, (i1 0), (i32 33))), + (COPY $src) // Return the SGPRs representing i1 src +>; + +def : Pat < + (i32 (int_amdgcn_icmp i1:$src, (i1 0), (i32 33))), (COPY $src) // Return the SGPRs representing i1 src >; @@ -1160,7 +1177,16 @@ def : GCNPat < (i1 imm:$imm), (S_MOV_B64 (i64 (as_i64imm $imm))) ->; +> { + let WaveSizePredicate = isWave64; +} + +def : GCNPat < + (i1 imm:$imm), + (S_MOV_B32 (i32 (as_i32imm $imm))) +> { + let WaveSizePredicate = isWave32; +} def : GCNPat < (f64 InlineFPImm:$imm), @@ -1351,10 +1377,12 @@ // If we need to perform a logical operation on i1 values, we need to // use vector comparisons since there is only one SCC register. Vector -// comparisons still write to a pair of SGPRs, so treat these as -// 64-bit comparisons. When legalizing SGPR copies, instructions -// resulting in the copies from SCC to these instructions will be -// moved to the VALU. +// comparisons may write to a pair of SGPRs or a single SGPR, so treat +// these as 32 or 64-bit comparisons. When legalizing SGPR copies, +// instructions resulting in the copies from SCC to these instructions +// will be moved to the VALU. + +let WaveSizePredicate = isWave64 in { def : GCNPat < (i1 (and i1:$src0, i1:$src1)), (S_AND_B64 $src0, $src1) @@ -1391,6 +1419,46 @@ (S_NOT_B64 $src0) >; } +} // end isWave64 + +let WaveSizePredicate = isWave32 in { +def : GCNPat < + (i1 (and i1:$src0, i1:$src1)), + (S_AND_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (or i1:$src0, i1:$src1)), + (S_OR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (xor i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (add i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +def : GCNPat < + (i1 (sub i1:$src0, i1:$src1)), + (S_XOR_B32 $src0, $src1) +>; + +let AddedComplexity = 1 in { +def : GCNPat < + (i1 (add i1:$src0, (i1 -1))), + (S_NOT_B32 $src0) +>; + +def : GCNPat < + (i1 (sub i1:$src0, (i1 -1))), + (S_NOT_B32 $src0) +>; +} +} // end isWave32 def : GCNPat < (f16 (sint_to_fp i1:$src)), Index: lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -1144,9 +1144,10 @@ MachineOperand OffsetLo = createRegOrImm(static_cast(Addr.Offset), MI); MachineOperand OffsetHi = createRegOrImm(static_cast(Addr.Offset >> 32), MI); - unsigned CarryReg = MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); - unsigned DeadCarryReg = - MRI->createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); + + const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); + unsigned CarryReg = MRI->createVirtualRegister(CarryRC); + unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC); unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); Index: lib/Target/AMDGPU/SILowerControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SILowerControlFlow.cpp +++ lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -82,6 +82,16 @@ LiveIntervals *LIS = nullptr; MachineRegisterInfo *MRI = nullptr; + const TargetRegisterClass *BoolRC = nullptr; + unsigned AndOpc; + unsigned OrOpc; + unsigned XorOpc; + unsigned MovTermOpc; + unsigned Andn2TermOpc; + unsigned XorTermrOpc; + unsigned OrSaveExecOpc; + unsigned Exec; + void emitIf(MachineInstr &MI); void emitElse(MachineInstr &MI); void emitIfBreak(MachineInstr &MI); @@ -188,16 +198,16 @@ // Add an implicit def of exec to discourage scheduling VALU after this which // will interfere with trying to form s_and_saveexec_b64 later. unsigned CopyReg = SimpleIf ? SaveExecReg - : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + : MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) - .addReg(AMDGPU::EXEC) - .addReg(AMDGPU::EXEC, RegState::ImplicitDefine); + .addReg(Exec) + .addReg(Exec, RegState::ImplicitDefine); - unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned Tmp = MRI->createVirtualRegister(BoolRC); MachineInstr *And = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp) + BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp) .addReg(CopyReg) .add(Cond); @@ -206,7 +216,7 @@ MachineInstr *Xor = nullptr; if (!SimpleIf) { Xor = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg) + BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg) .addReg(Tmp) .addReg(CopyReg); setImpSCCDefDead(*Xor, ImpDefSCC.isDead()); @@ -215,7 +225,7 @@ // Use a copy that is a terminator to get correct spill code placement it with // fast regalloc. MachineInstr *SetExec = - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC) + BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec) .addReg(Tmp, RegState::Kill); // Insert a pseudo terminator to help keep the verifier happy. This will also @@ -265,7 +275,7 @@ // We are running before TwoAddressInstructions, and si_else's operands are // tied. In order to correctly tie the registers, split this into a copy of // the src like it does. - unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + unsigned CopyReg = MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) .add(MI.getOperand(1)); // Saved EXEC @@ -273,9 +283,9 @@ // This must be inserted before phis and any spill code inserted before the // else. unsigned SaveReg = ExecModified ? - MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg; + MRI->createVirtualRegister(BoolRC) : DstReg; MachineInstr *OrSaveExec = - BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg) + BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg) .addReg(CopyReg); MachineBasicBlock *DestBB = MI.getOperand(2).getMBB(); @@ -284,8 +294,8 @@ if (ExecModified) { MachineInstr *And = - BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg) + .addReg(Exec) .addReg(SaveReg); if (LIS) @@ -293,8 +303,8 @@ } MachineInstr *Xor = - BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec) + .addReg(Exec) .addReg(DstReg); MachineInstr *Branch = @@ -347,14 +357,14 @@ // exit" mask. MachineInstr *And = nullptr, *Or = nullptr; if (!SkipAnding) { - And = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) - .addReg(AMDGPU::EXEC) + And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), Dst) + .addReg(Exec) .add(MI.getOperand(1)); - Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst) .addReg(Dst) .add(MI.getOperand(2)); } else - Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst) .add(MI.getOperand(1)) .add(MI.getOperand(2)); @@ -372,8 +382,8 @@ const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *AndN2 = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec) + .addReg(Exec) .add(MI.getOperand(0)); MachineInstr *Branch = @@ -394,8 +404,8 @@ MachineBasicBlock::iterator InsPt = MBB.begin(); MachineInstr *NewMI = - BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, InsPt, DL, TII->get(OrOpc), Exec) + .addReg(Exec) .add(MI.getOperand(0)); if (LIS) @@ -427,13 +437,13 @@ // does not really modify exec. for (auto I = Def->getIterator(); I != MI.getIterator(); ++I) if (I->modifiesRegister(AMDGPU::EXEC, TRI) && - !(I->isCopy() && I->getOperand(0).getReg() != AMDGPU::EXEC)) + !(I->isCopy() && I->getOperand(0).getReg() != Exec)) return; for (const auto &SrcOp : Def->explicit_operands()) if (SrcOp.isReg() && SrcOp.isUse() && (TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) || - SrcOp.getReg() == AMDGPU::EXEC)) + SrcOp.getReg() == Exec)) Src.push_back(SrcOp); } @@ -471,6 +481,27 @@ // This doesn't actually need LiveIntervals, but we can preserve them. LIS = getAnalysisIfAvailable(); MRI = &MF.getRegInfo(); + BoolRC = TRI->getBoolRC(); + + if (ST.isWave32()) { + AndOpc = AMDGPU::S_AND_B32; + OrOpc = AMDGPU::S_OR_B32; + XorOpc = AMDGPU::S_XOR_B32; + MovTermOpc = AMDGPU::S_MOV_B32_term; + Andn2TermOpc = AMDGPU::S_ANDN2_B32_term; + XorTermrOpc = AMDGPU::S_XOR_B32_term; + OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32; + Exec = AMDGPU::EXEC_LO; + } else { + AndOpc = AMDGPU::S_AND_B64; + OrOpc = AMDGPU::S_OR_B64; + XorOpc = AMDGPU::S_XOR_B64; + MovTermOpc = AMDGPU::S_MOV_B64_term; + Andn2TermOpc = AMDGPU::S_ANDN2_B64_term; + XorTermrOpc = AMDGPU::S_XOR_B64_term; + OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64; + Exec = AMDGPU::EXEC; + } MachineFunction::iterator NextBB; for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); @@ -507,6 +538,8 @@ case AMDGPU::S_AND_B64: case AMDGPU::S_OR_B64: + case AMDGPU::S_AND_B32: + case AMDGPU::S_OR_B32: // Cleanup bit manipulations on exec mask combineMasks(MI); Last = I; Index: lib/Target/AMDGPU/SILowerI1Copies.cpp =================================================================== --- lib/Target/AMDGPU/SILowerI1Copies.cpp +++ lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -7,8 +7,8 @@ //===----------------------------------------------------------------------===// // // This pass lowers all occurrences of i1 values (with a vreg_1 register class) -// to lane masks (64-bit scalar registers). The pass assumes machine SSA form -// and a wave-level control flow graph. +// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA +// form and a wave-level control flow graph. // // Before this pass, values that are semantically i1 and are defined and used // within the same basic block are already represented as lane masks in scalar @@ -50,6 +50,7 @@ static char ID; private: + bool IsWave32 = false; MachineFunction *MF = nullptr; MachineDominatorTree *DT = nullptr; MachinePostDominatorTree *PDT = nullptr; @@ -57,6 +58,14 @@ const GCNSubtarget *ST = nullptr; const SIInstrInfo *TII = nullptr; + unsigned ExecReg; + unsigned MovOp; + unsigned AndOp; + unsigned OrOp; + unsigned XorOp; + unsigned AndN2Op; + unsigned OrN2Op; + DenseSet ConstrainRegs; public: @@ -411,8 +420,10 @@ } static unsigned createLaneMaskReg(MachineFunction &MF) { + const GCNSubtarget &ST = MF.getSubtarget(); MachineRegisterInfo &MRI = MF.getRegInfo(); - return MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); + return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass + : &AMDGPU::SReg_64RegClass); } static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) { @@ -442,13 +453,32 @@ ST = &MF->getSubtarget(); TII = ST->getInstrInfo(); + IsWave32 = ST->isWave32(); + + if (IsWave32) { + ExecReg = AMDGPU::EXEC_LO; + MovOp = AMDGPU::S_MOV_B32; + AndOp = AMDGPU::S_AND_B32; + OrOp = AMDGPU::S_OR_B32; + XorOp = AMDGPU::S_XOR_B32; + AndN2Op = AMDGPU::S_ANDN2_B32; + OrN2Op = AMDGPU::S_ORN2_B32; + } else { + ExecReg = AMDGPU::EXEC; + MovOp = AMDGPU::S_MOV_B64; + AndOp = AMDGPU::S_AND_B64; + OrOp = AMDGPU::S_OR_B64; + XorOp = AMDGPU::S_XOR_B64; + AndN2Op = AMDGPU::S_ANDN2_B64; + OrN2Op = AMDGPU::S_ORN2_B64; + } lowerCopiesFromI1(); lowerPhis(); lowerCopiesToI1(); for (unsigned Reg : ConstrainRegs) - MRI->constrainRegClass(Reg, &AMDGPU::SReg_64_XEXECRegClass); + MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass); ConstrainRegs.clear(); return true; @@ -518,7 +548,8 @@ LLVM_DEBUG(dbgs() << "Lower PHI: " << MI); - MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass); + MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass + : &AMDGPU::SReg_64RegClass); // Collect incoming values. for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { @@ -648,7 +679,8 @@ LLVM_DEBUG(dbgs() << "Lower Other: " << MI); - MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass); + MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass + : &AMDGPU::SReg_64RegClass); if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) continue; @@ -707,7 +739,7 @@ return false; } - if (MI->getOpcode() != AMDGPU::S_MOV_B64) + if (MI->getOpcode() != MovOp) return false; if (!MI->getOperand(1).isImm()) @@ -782,10 +814,10 @@ if (PrevVal == CurVal) { BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg); } else if (CurVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(AMDGPU::EXEC); + BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg); } else { - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), DstReg) - .addReg(AMDGPU::EXEC) + BuildMI(MBB, I, DL, TII->get(XorOp), DstReg) + .addReg(ExecReg) .addImm(-1); } return; @@ -798,9 +830,9 @@ PrevMaskedReg = PrevReg; } else { PrevMaskedReg = createLaneMaskReg(*MF); - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ANDN2_B64), PrevMaskedReg) + BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg) .addReg(PrevReg) - .addReg(AMDGPU::EXEC); + .addReg(ExecReg); } } if (!CurConstant) { @@ -809,9 +841,9 @@ CurMaskedReg = CurReg; } else { CurMaskedReg = createLaneMaskReg(*MF); - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), CurMaskedReg) + BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg) .addReg(CurReg) - .addReg(AMDGPU::EXEC); + .addReg(ExecReg); } } @@ -822,12 +854,12 @@ BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg) .addReg(PrevMaskedReg); } else if (PrevConstant && PrevVal) { - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ORN2_B64), DstReg) + BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg) .addReg(CurMaskedReg) - .addReg(AMDGPU::EXEC); + .addReg(ExecReg); } else { - BuildMI(MBB, I, DL, TII->get(AMDGPU::S_OR_B64), DstReg) + BuildMI(MBB, I, DL, TII->get(OrOp), DstReg) .addReg(PrevMaskedReg) - .addReg(CurMaskedReg ? CurMaskedReg : (unsigned)AMDGPU::EXEC); + .addReg(CurMaskedReg ? CurMaskedReg : ExecReg); } } Index: lib/Target/AMDGPU/SIOptimizeExecMasking.cpp =================================================================== --- lib/Target/AMDGPU/SIOptimizeExecMasking.cpp +++ lib/Target/AMDGPU/SIOptimizeExecMasking.cpp @@ -56,13 +56,16 @@ char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID; /// If \p MI is a copy from exec, return the register copied to. -static unsigned isCopyFromExec(const MachineInstr &MI) { +static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) { switch (MI.getOpcode()) { case AMDGPU::COPY: case AMDGPU::S_MOV_B64: - case AMDGPU::S_MOV_B64_term: { + case AMDGPU::S_MOV_B64_term: + case AMDGPU::S_MOV_B32: + case AMDGPU::S_MOV_B32_term: { const MachineOperand &Src = MI.getOperand(1); - if (Src.isReg() && Src.getReg() == AMDGPU::EXEC) + if (Src.isReg() && + Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)) return MI.getOperand(0).getReg(); } } @@ -71,16 +74,20 @@ } /// If \p MI is a copy to exec, return the register copied from. -static unsigned isCopyToExec(const MachineInstr &MI) { +static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) { switch (MI.getOpcode()) { case AMDGPU::COPY: - case AMDGPU::S_MOV_B64: { + case AMDGPU::S_MOV_B64: + case AMDGPU::S_MOV_B32: { const MachineOperand &Dst = MI.getOperand(0); - if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC && MI.getOperand(1).isReg()) + if (Dst.isReg() && + Dst.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) && + MI.getOperand(1).isReg()) return MI.getOperand(1).getReg(); break; } case AMDGPU::S_MOV_B64_term: + case AMDGPU::S_MOV_B32_term: llvm_unreachable("should have been replaced"); } @@ -105,6 +112,23 @@ const MachineOperand &Src2 = MI.getOperand(2); if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC) return MI.getOperand(0).getReg(); + break; + } + case AMDGPU::S_AND_B32: + case AMDGPU::S_OR_B32: + case AMDGPU::S_XOR_B32: + case AMDGPU::S_ANDN2_B32: + case AMDGPU::S_ORN2_B32: + case AMDGPU::S_NAND_B32: + case AMDGPU::S_NOR_B32: + case AMDGPU::S_XNOR_B32: { + const MachineOperand &Src1 = MI.getOperand(1); + if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO) + return MI.getOperand(0).getReg(); + const MachineOperand &Src2 = MI.getOperand(2); + if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO) + return MI.getOperand(0).getReg(); + break; } } @@ -129,6 +153,22 @@ return AMDGPU::S_NOR_SAVEEXEC_B64; case AMDGPU::S_XNOR_B64: return AMDGPU::S_XNOR_SAVEEXEC_B64; + case AMDGPU::S_AND_B32: + return AMDGPU::S_AND_SAVEEXEC_B32; + case AMDGPU::S_OR_B32: + return AMDGPU::S_OR_SAVEEXEC_B32; + case AMDGPU::S_XOR_B32: + return AMDGPU::S_XOR_SAVEEXEC_B32; + case AMDGPU::S_ANDN2_B32: + return AMDGPU::S_ANDN2_SAVEEXEC_B32; + case AMDGPU::S_ORN2_B32: + return AMDGPU::S_ORN2_SAVEEXEC_B32; + case AMDGPU::S_NAND_B32: + return AMDGPU::S_NAND_SAVEEXEC_B32; + case AMDGPU::S_NOR_B32: + return AMDGPU::S_NOR_SAVEEXEC_B32; + case AMDGPU::S_XNOR_B32: + return AMDGPU::S_XNOR_SAVEEXEC_B32; default: return AMDGPU::INSTRUCTION_LIST_END; } @@ -139,7 +179,8 @@ // these is expected per block. static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) { switch (MI.getOpcode()) { - case AMDGPU::S_MOV_B64_term: { + case AMDGPU::S_MOV_B64_term: + case AMDGPU::S_MOV_B32_term: { MI.setDesc(TII.get(AMDGPU::COPY)); return true; } @@ -149,12 +190,30 @@ MI.setDesc(TII.get(AMDGPU::S_XOR_B64)); return true; } + case AMDGPU::S_XOR_B32_term: { + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(TII.get(AMDGPU::S_XOR_B32)); + return true; + } + case AMDGPU::S_OR_B32_term: { + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(TII.get(AMDGPU::S_OR_B32)); + return true; + } case AMDGPU::S_ANDN2_B64_term: { // This is only a terminator to get the correct spill code placement during // register allocation. MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64)); return true; } + case AMDGPU::S_ANDN2_B32_term: { + // This is only a terminator to get the correct spill code placement during + // register allocation. + MI.setDesc(TII.get(AMDGPU::S_ANDN2_B32)); + return true; + } default: return false; } @@ -177,6 +236,7 @@ static MachineBasicBlock::reverse_iterator findExecCopy( const SIInstrInfo &TII, + const GCNSubtarget &ST, MachineBasicBlock &MBB, MachineBasicBlock::reverse_iterator I, unsigned CopyToExec) { @@ -184,7 +244,7 @@ auto E = MBB.rend(); for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) { - unsigned CopyFromExec = isCopyFromExec(*I); + unsigned CopyFromExec = isCopyFromExec(*I, ST); if (CopyFromExec != AMDGPU::NoRegister) return I; } @@ -211,6 +271,7 @@ const GCNSubtarget &ST = MF.getSubtarget(); const SIRegisterInfo *TRI = ST.getRegisterInfo(); const SIInstrInfo *TII = ST.getInstrInfo(); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; // Optimize sequences emitted for control flow lowering. They are originally // emitted as the separate operations because spill code may need to be @@ -229,13 +290,13 @@ if (I == E) continue; - unsigned CopyToExec = isCopyToExec(*I); + unsigned CopyToExec = isCopyToExec(*I, ST); if (CopyToExec == AMDGPU::NoRegister) continue; // Scan backwards to find the def. auto CopyToExecInst = &*I; - auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec); + auto CopyFromExecInst = findExecCopy(*TII, ST, MBB, I, CopyToExec); if (CopyFromExecInst == E) { auto PrepareExecInst = std::next(I); if (PrepareExecInst == E) @@ -245,7 +306,7 @@ isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) { LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst); - PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC); + PrepareExecInst->getOperand(0).setReg(Exec); LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n'); @@ -268,7 +329,7 @@ for (MachineBasicBlock::iterator J = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator(); J != JE; ++J) { - if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) { + if (SaveExecInst && J->readsRegister(Exec, TRI)) { LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n'); // Make sure this is inserted after any VALU ops that may have been // scheduled in between. @@ -352,7 +413,7 @@ CopyToExecInst->eraseFromParent(); for (MachineInstr *OtherInst : OtherUseInsts) { - OtherInst->substituteRegister(CopyToExec, AMDGPU::EXEC, + OtherInst->substituteRegister(CopyToExec, Exec, AMDGPU::NoSubRegister, *TRI); } } Index: lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp =================================================================== --- lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -82,13 +82,21 @@ return new SIOptimizeExecMaskingPreRA(); } -static bool isEndCF(const MachineInstr& MI, const SIRegisterInfo* TRI) { +static bool isEndCF(const MachineInstr &MI, const SIRegisterInfo *TRI, + const GCNSubtarget &ST) { + if (ST.isWave32()) { + return MI.getOpcode() == AMDGPU::S_OR_B32 && + MI.modifiesRegister(AMDGPU::EXEC_LO, TRI); + } + return MI.getOpcode() == AMDGPU::S_OR_B64 && MI.modifiesRegister(AMDGPU::EXEC, TRI); } -static bool isFullExecCopy(const MachineInstr& MI) { - if (MI.isCopy() && MI.getOperand(1).getReg() == AMDGPU::EXEC) { +static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) { + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + + if (MI.isCopy() && MI.getOperand(1).getReg() == Exec) { assert(MI.isFullCopy()); return true; } @@ -97,24 +105,27 @@ } static unsigned getOrNonExecReg(const MachineInstr &MI, - const SIInstrInfo &TII) { + const SIInstrInfo &TII, + const GCNSubtarget& ST) { + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1); - if (Op->isReg() && Op->getReg() != AMDGPU::EXEC) + if (Op->isReg() && Op->getReg() != Exec) return Op->getReg(); Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0); - if (Op->isReg() && Op->getReg() != AMDGPU::EXEC) + if (Op->isReg() && Op->getReg() != Exec) return Op->getReg(); return AMDGPU::NoRegister; } static MachineInstr* getOrExecSource(const MachineInstr &MI, const SIInstrInfo &TII, - const MachineRegisterInfo &MRI) { - auto SavedExec = getOrNonExecReg(MI, TII); + const MachineRegisterInfo &MRI, + const GCNSubtarget& ST) { + auto SavedExec = getOrNonExecReg(MI, TII, ST); if (SavedExec == AMDGPU::NoRegister) return nullptr; auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec); - if (!SaveExecInst || !isFullExecCopy(*SaveExecInst)) + if (!SaveExecInst || !isFullExecCopy(*SaveExecInst, ST)) return nullptr; return SaveExecInst; } @@ -180,10 +191,11 @@ LiveIntervals *LIS) { const SIRegisterInfo *TRI = ST.getRegisterInfo(); const SIInstrInfo *TII = ST.getInstrInfo(); - const unsigned AndOpc = AMDGPU::S_AND_B64; - const unsigned Andn2Opc = AMDGPU::S_ANDN2_B64; - const unsigned CondReg = AMDGPU::VCC; - const unsigned ExecReg = AMDGPU::EXEC; + bool Wave32 = ST.isWave32(); + const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; + const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64; + const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC; + const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) { unsigned Opc = MI.getOpcode(); @@ -290,6 +302,7 @@ MachineRegisterInfo &MRI = MF.getRegInfo(); LiveIntervals *LIS = &getAnalysis(); DenseSet RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI}); + unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; bool Changed = false; for (MachineBasicBlock &MBB : MF) { @@ -368,19 +381,19 @@ // Try to collapse adjacent endifs. auto E = MBB.end(); auto Lead = skipDebugInstructionsForward(MBB.begin(), E); - if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI)) + if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI, ST)) continue; MachineBasicBlock *TmpMBB = &MBB; auto NextLead = skipIgnoreExecInstsTrivialSucc(TmpMBB, std::next(Lead)); - if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI) || - !getOrExecSource(*NextLead, *TII, MRI)) + if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI, ST) || + !getOrExecSource(*NextLead, *TII, MRI, ST)) continue; LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n'); - auto SaveExec = getOrExecSource(*Lead, *TII, MRI); - unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII); + auto SaveExec = getOrExecSource(*Lead, *TII, MRI, ST); + unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII, ST); for (auto &Op : Lead->operands()) { if (Op.isReg()) RecalcRegs.insert(Op.getReg()); @@ -414,7 +427,7 @@ if (SafeToReplace) { LIS->RemoveMachineInstrFromMaps(*SaveExec); SaveExec->eraseFromParent(); - MRI.replaceRegWith(SavedExec, AMDGPU::EXEC); + MRI.replaceRegWith(SavedExec, Exec); LIS->removeInterval(SavedExec); } } Index: lib/Target/AMDGPU/SIPeepholeSDWA.cpp =================================================================== --- lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -954,7 +954,8 @@ if (TII->isVOPC(Opc)) { if (!ST.hasSDWASdst()) { const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); - if (SDst && SDst->getReg() != AMDGPU::VCC) + if (SDst && (SDst->getReg() != AMDGPU::VCC && + SDst->getReg() != AMDGPU::VCC_LO)) return false; } @@ -1019,7 +1020,7 @@ SDWAInst.add(*Dst); } else { assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); - SDWAInst.addReg(AMDGPU::VCC, RegState::Define); + SDWAInst.addReg(TRI->getVCC(), RegState::Define); } // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and Index: lib/Target/AMDGPU/SIRegisterInfo.h =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.h +++ lib/Target/AMDGPU/SIRegisterInfo.h @@ -33,6 +33,7 @@ BitVector VGPRPressureSets; bool SpillSGPRToVGPR; bool SpillSGPRToSMEM; + bool isWave32; void classifyPressureSet(unsigned PSetID, unsigned Reg, BitVector &PressureSets) const; @@ -231,6 +232,20 @@ getConstrainedRegClassForOperand(const MachineOperand &MO, const MachineRegisterInfo &MRI) const override; + const TargetRegisterClass *getBoolRC() const { + return isWave32 ? &AMDGPU::SReg_32_XM0RegClass + : &AMDGPU::SReg_64RegClass; + } + + const TargetRegisterClass *getWaveMaskRegClass() const { + return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass + : &AMDGPU::SReg_64_XEXECRegClass; + } + + unsigned getVCC() const; + + const TargetRegisterClass *getRegClass(unsigned RCID) const; + // Find reaching register definition MachineInstr *findReachingDef(unsigned Reg, unsigned SubReg, MachineInstr &Use, Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -63,7 +63,8 @@ SGPRPressureSets(getNumRegPressureSets()), VGPRPressureSets(getNumRegPressureSets()), SpillSGPRToVGPR(false), - SpillSGPRToSMEM(false) { + SpillSGPRToSMEM(false), + isWave32(ST.isWave32()) { if (EnableSpillSGPRToSMEM && ST.hasScalarStores()) SpillSGPRToSMEM = true; else if (EnableSpillSGPRToVGPR) @@ -184,6 +185,13 @@ // Reserve null register - it shall never be allocated reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL); + // Disallow vcc_hi allocation in wave32. It may be allocated but most likely + // will result in bugs. + if (isWave32) { + Reserved.set(AMDGPU::VCC); + Reserved.set(AMDGPU::VCC_HI); + } + const GCNSubtarget &ST = MF.getSubtarget(); unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF); @@ -1706,6 +1714,25 @@ } } +unsigned SIRegisterInfo::getVCC() const { + return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC; +} + +const TargetRegisterClass * +SIRegisterInfo::getRegClass(unsigned RCID) const { + switch ((int)RCID) { + case AMDGPU::SReg_1RegClassID: + return getBoolRC(); + case AMDGPU::SReg_1_XEXECRegClassID: + return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass + : &AMDGPU::SReg_64_XEXECRegClass; + case -1: + return nullptr; + default: + return AMDGPURegisterInfo::getRegClass(RCID); + } +} + // Find reaching register definition MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg, MachineInstr &Use, Index: lib/Target/AMDGPU/SIRegisterInfo.td =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.td +++ lib/Target/AMDGPU/SIRegisterInfo.td @@ -454,7 +454,7 @@ // Subset of SReg_32 without M0 for SMRD instructions and alike. // See comments in SIInstructions.td for more info. -def SReg_32_XM0_XEXEC : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XM0_XEXEC : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SGPR_32, VCC_LO, VCC_HI, FLAT_SCR_LO, FLAT_SCR_HI, XNACK_MASK_LO, XNACK_MASK_HI, SGPR_NULL, TTMP_32, TMA_LO, TMA_HI, TBA_LO, TBA_HI, SRC_SHARED_BASE, SRC_SHARED_LIMIT, SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT, SRC_POPS_EXITING_WAVE_ID, @@ -462,23 +462,23 @@ let AllocationPriority = 8; } -def SReg_32_XEXEC_HI : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XEXEC_HI : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0_XEXEC, EXEC_LO, M0_CLASS)> { let AllocationPriority = 8; } -def SReg_32_XM0 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32_XM0 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0_XEXEC, EXEC_LO, EXEC_HI)> { let AllocationPriority = 8; } // Register class for all scalar registers (SGPRs + Special Registers) -def SReg_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SReg_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0, M0_CLASS, EXEC_LO, EXEC_HI, SReg_32_XEXEC_HI)> { let AllocationPriority = 8; } -def SRegOrLds_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32, +def SRegOrLds_32 : RegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32, (add SReg_32_XM0, M0_CLASS, EXEC_LO, EXEC_HI, SReg_32_XEXEC_HI, LDS_DIRECT_CLASS)> { let isAllocatable = 0; } @@ -504,6 +504,18 @@ let AllocationPriority = 9; } +def SReg_1_XEXEC : RegisterClass<"AMDGPU", [i1], 32, + (add SReg_64_XEXEC, SReg_32_XM0_XEXEC)> { + let CopyCost = 1; + let isAllocatable = 0; +} + +def SReg_1 : RegisterClass<"AMDGPU", [i1], 32, + (add SReg_1_XEXEC, EXEC, EXEC_LO)> { + let CopyCost = 1; + let isAllocatable = 0; +} + // Requires 2 s_mov_b64 to copy let CopyCost = 2 in { @@ -721,8 +733,6 @@ defm SCSrc : RegInlineOperand<"SReg", "SCSrc"> ; -def SCSrc_i1 : RegisterOperand; - //===----------------------------------------------------------------------===// // VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -551,6 +551,7 @@ MachineRegisterInfo &MRI = MF.getRegInfo(); const GCNSubtarget &ST = MF.getSubtarget(); const SIInstrInfo *TII = ST.getInstrInfo(); + unsigned VCCReg = ST.isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC; std::vector I1Defs; @@ -726,10 +727,10 @@ // So, instead of forcing the instruction to write to VCC, we provide // a hint to the register allocator to use VCC and then we will run // this pass again after RA and shrink it if it outputs to VCC. - MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC); + MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, VCCReg); continue; } - if (DstReg != AMDGPU::VCC) + if (DstReg != VCCReg) continue; } @@ -742,10 +743,10 @@ continue; unsigned SReg = Src2->getReg(); if (TargetRegisterInfo::isVirtualRegister(SReg)) { - MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC); + MRI.setRegAllocationHint(SReg, 0, VCCReg); continue; } - if (SReg != AMDGPU::VCC) + if (SReg != VCCReg) continue; } @@ -758,20 +759,24 @@ AMDGPU::OpName::src2); if (SDst) { - if (SDst->getReg() != AMDGPU::VCC) { + bool Next = false; + + if (SDst->getReg() != VCCReg) { if (TargetRegisterInfo::isVirtualRegister(SDst->getReg())) - MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC); - continue; + MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg); + Next = true; } // All of the instructions with carry outs also have an SGPR input in // src2. - if (Src2 && Src2->getReg() != AMDGPU::VCC) { + if (Src2 && Src2->getReg() != VCCReg) { if (TargetRegisterInfo::isVirtualRegister(Src2->getReg())) - MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC); + MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg); + Next = true; + } + if (Next) continue; - } } // We can shrink this instruction Index: lib/Target/AMDGPU/SIWholeQuadMode.cpp =================================================================== --- lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -148,6 +148,7 @@ CallingConv::ID CallingConv; const SIInstrInfo *TII; const SIRegisterInfo *TRI; + const GCNSubtarget *ST; MachineRegisterInfo *MRI; LiveIntervals *LIS; @@ -278,7 +279,7 @@ // for VCC, which can appear as the (implicit) input of a uniform branch, // e.g. when a loop counter is stored in a VGPR. if (!TargetRegisterInfo::isVirtualRegister(Reg)) { - if (Reg == AMDGPU::EXEC) + if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO) continue; for (MCRegUnitIterator RegUnit(Reg, TRI); RegUnit.isValid(); ++RegUnit) { @@ -620,13 +621,16 @@ MachineInstr *MI; if (SaveWQM) { - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64), + MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ? + AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64), SaveWQM) .addReg(LiveMaskReg); } else { - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64), - AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) + unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ? + AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64), + Exec) + .addReg(Exec) .addReg(LiveMaskReg); } @@ -638,13 +642,15 @@ unsigned SavedWQM) { MachineInstr *MI; + unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; if (SavedWQM) { - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC) + MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec) .addReg(SavedWQM); } else { - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), - AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ? + AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64), + Exec) + .addReg(Exec); } LIS->InsertMachineInstrInMaps(*MI); @@ -667,7 +673,8 @@ MachineInstr *MI; assert(SavedOrig); - MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM), AMDGPU::EXEC) + MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM), + ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) .addReg(SavedOrig); LIS->InsertMachineInstrInMaps(*MI); } @@ -693,6 +700,7 @@ bool WQMFromExec = isEntry; char State = (isEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM; char NonWWMState = 0; + const TargetRegisterClass *BoolRC = TRI->getBoolRC(); auto II = MBB.getFirstNonPHI(), IE = MBB.end(); if (isEntry) @@ -780,13 +788,13 @@ if (Needs == StateWWM) { NonWWMState = State; - SavedNonWWMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + SavedNonWWMReg = MRI->createVirtualRegister(BoolRC); toWWM(MBB, Before, SavedNonWWMReg); State = StateWWM; } else { if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) { if (!WQMFromExec && (OutNeeds & StateWQM)) - SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + SavedWQMReg = MRI->createVirtualRegister(BoolRC); toExact(MBB, Before, SavedWQMReg, LiveMaskReg); State = StateExact; @@ -865,17 +873,18 @@ LowerToCopyInstrs.clear(); CallingConv = MF.getFunction().getCallingConv(); - const GCNSubtarget &ST = MF.getSubtarget(); + ST = &MF.getSubtarget(); - TII = ST.getInstrInfo(); + TII = ST->getInstrInfo(); TRI = &TII->getRegisterInfo(); MRI = &MF.getRegInfo(); LIS = &getAnalysis(); char GlobalFlags = analyzeFunction(MF); unsigned LiveMaskReg = 0; + unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; if (!(GlobalFlags & StateWQM)) { - lowerLiveMaskQueries(AMDGPU::EXEC); + lowerLiveMaskQueries(Exec); if (!(GlobalFlags & StateWWM)) return !LiveMaskQueries.empty(); } else { @@ -884,10 +893,10 @@ MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI(); if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) { - LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC()); MachineInstr *MI = BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg) - .addReg(AMDGPU::EXEC); + .addReg(Exec); LIS->InsertMachineInstrInMaps(*MI); } @@ -895,9 +904,10 @@ if (GlobalFlags == StateWQM) { // For a shader that needs only WQM, we can just set it once. - BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), - AMDGPU::EXEC) - .addReg(AMDGPU::EXEC); + BuildMI(Entry, EntryMI, DebugLoc(), TII->get(ST->isWave32() ? + AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64), + Exec) + .addReg(Exec); lowerCopyInstrs(); // EntryMI may become invalid here Index: lib/Target/AMDGPU/SOPInstructions.td =================================================================== --- lib/Target/AMDGPU/SOPInstructions.td +++ lib/Target/AMDGPU/SOPInstructions.td @@ -152,12 +152,24 @@ [(set i64:$sdst, (not i64:$src0))] >; def S_WQM_B32 : SOP1_32 <"s_wqm_b32">; - def S_WQM_B64 : SOP1_64 <"s_wqm_b64", - [(set i1:$sdst, (int_amdgcn_wqm_vote i1:$src0))] - >; + def S_WQM_B64 : SOP1_64 <"s_wqm_b64">; } // End Defs = [SCC] +let WaveSizePredicate = isWave32 in { +def : GCNPat < + (int_amdgcn_wqm_vote i1:$src0), + (S_WQM_B32 $src0) +>; +} + +let WaveSizePredicate = isWave64 in { +def : GCNPat < + (int_amdgcn_wqm_vote i1:$src0), + (S_WQM_B64 $src0) +>; +} + def S_BREV_B32 : SOP1_32 <"s_brev_b32", [(set i32:$sdst, (bitreverse i32:$src0))] >; Index: lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP2Instructions.td +++ lib/Target/AMDGPU/VOP2Instructions.td @@ -175,7 +175,9 @@ let SchedRW = [Write32Bit, WriteSALU] in { let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in { def _e32 : VOP2_Pseudo .ret>, - Commutable_REV; + Commutable_REV { + let usesCustomInserter = !eq(P.NumSrcArgs, 2); + } def _sdwa : VOP2_SDWA_Pseudo { let AsmMatchConverter = "cvtSdwaVOP2b"; @@ -328,7 +330,7 @@ let AsmDPP8 = "$vdst, vcc, $src0, $src1 $dpp8$fi"; let AsmDPP16 = AsmDPP#"$fi"; let Outs32 = (outs DstRC:$vdst); - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); } // Write out to vcc or arbitrary SGPR and read in from vcc or @@ -342,7 +344,7 @@ let AsmDPP8 = "$vdst, vcc, $src0, $src1, vcc $dpp8$fi"; let AsmDPP16 = AsmDPP#"$fi"; let Outs32 = (outs DstRC:$vdst); - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); // Suppress src2 implied by type since the 32-bit encoding uses an // implicit VCC use. Index: lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- lib/Target/AMDGPU/VOP3Instructions.td +++ lib/Target/AMDGPU/VOP3Instructions.td @@ -183,7 +183,7 @@ let HasModifiers = 0; let HasClamp = 0; let HasOMod = 0; - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0, $src1, $src2"; } @@ -203,7 +203,7 @@ // FIXME: Hack to stop printing _e64 let DstRC = RegisterOperand; - let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); + let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst); let Asm64 = " $vdst, $sdst, $src0, $src1, $src2$clamp"; } Index: lib/Target/AMDGPU/VOPCInstructions.td =================================================================== --- lib/Target/AMDGPU/VOPCInstructions.td +++ lib/Target/AMDGPU/VOPCInstructions.td @@ -56,7 +56,7 @@ let Asm32 = "$src0, $src1"; // The destination for 32-bit encoding is implicit. let HasDst32 = 0; - let Outs64 = (outs VOPDstS64:$sdst); + let Outs64 = (outs VOPDstS64orS32:$sdst); list Schedule = sched; } Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3733,7 +3733,9 @@ break; Function *NewF = - Intrinsic::getDeclaration(II->getModule(), NewIID, SrcLHS->getType()); + Intrinsic::getDeclaration(II->getModule(), NewIID, + { II->getType(), + SrcLHS->getType() }); Value *Args[] = { SrcLHS, SrcRHS, ConstantInt::get(CC->getType(), SrcPred) }; CallInst *NewCall = Builder.CreateCall(NewF, Args); Index: test/CodeGen/AMDGPU/add3.ll =================================================================== --- test/CodeGen/AMDGPU/add3.ll +++ test/CodeGen/AMDGPU/add3.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: add3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: mad_no_add3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_mad_u32_u24 v0, v0, v1, v4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mad_u32_u24 v0, v2, v3, v0 ; GFX10-NEXT: ; return to shader part epilog %a0 = shl i32 %a, 8 @@ -85,6 +87,7 @@ ; GFX10-LABEL: add3_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, s3, s2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -107,6 +110,7 @@ ; GFX10-LABEL: add3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %b, %c %result = add i32 %a, %x @@ -129,6 +133,7 @@ ; GFX10-LABEL: add3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, %c @@ -151,6 +156,7 @@ ; GFX10-LABEL: add3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = add i32 %x, 16 @@ -175,6 +181,7 @@ ; GFX10-LABEL: add3_multiuse_outer: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mul_lo_u32 v1, v0, v3 ; GFX10-NEXT: ; return to shader part epilog %inner = add i32 %a, %b @@ -202,6 +209,7 @@ ; GFX10-LABEL: add3_multiuse_inner: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_add_nc_u32_e32 v1, v0, v2 ; GFX10-NEXT: ; return to shader part epilog %inner = add i32 %a, %b @@ -240,6 +248,7 @@ ; GFX10-NEXT: v_add_f32_e64 v1, s3, 2.0 ; GFX10-NEXT: v_add_f32_e64 v2, s2, 1.0 ; GFX10-NEXT: v_add_f32_e64 v0, 0x40400000, s4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_add_nc_u32_e32 v1, v2, v1 ; GFX10-NEXT: v_add_nc_u32_e32 v0, v1, v0 ; GFX10-NEXT: ; return to shader part epilog Index: test/CodeGen/AMDGPU/add_i1.ll =================================================================== --- test/CodeGen/AMDGPU/add_i1.ll +++ test/CodeGen/AMDGPU/add_i1.ll @@ -1,8 +1,10 @@ -; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s ; GCN-LABEL: {{^}}add_var_var_i1: -; GCN: s_xor_b64 +; GFX9: s_xor_b64 +; GFX10: s_xor_b32 define amdgpu_kernel void @add_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { %a = load volatile i1, i1 addrspace(1)* %in0 %b = load volatile i1, i1 addrspace(1)* %in1 @@ -12,7 +14,8 @@ } ; GCN-LABEL: {{^}}add_var_imm_i1: -; GCN: s_not_b64 +; GFX9: s_not_b64 +; GFX10: s_not_b32 define amdgpu_kernel void @add_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) { %a = load volatile i1, i1 addrspace(1)* %in %add = add i1 %a, 1 @@ -22,7 +25,8 @@ ; GCN-LABEL: {{^}}add_i1_cf: ; GCN: ; %endif -; GCN: s_not_b64 +; GFX9: s_not_b64 +; GFX10: s_not_b32 define amdgpu_kernel void @add_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/add_shl.ll =================================================================== --- test/CodeGen/AMDGPU/add_shl.ll +++ test/CodeGen/AMDGPU/add_shl.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: add_shl: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -45,6 +46,7 @@ ; GFX10-LABEL: add_shl_vgpr_c: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, s2, s3, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -67,6 +69,7 @@ ; GFX10-LABEL: add_shl_vgpr_ac: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, %c @@ -89,6 +92,7 @@ ; GFX10-LABEL: add_shl_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_add_lshl_u32 v0, v0, v1, 9 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, %b %result = shl i32 %x, 9 @@ -112,6 +116,7 @@ ; GFX10-LABEL: add_shl_vgpr_const_inline_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 9, 0x7e800 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, 1012 %result = shl i32 %x, 9 @@ -138,6 +143,7 @@ ; GFX10-LABEL: add_shl_vgpr_inline_const_x2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 9, 0x600 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = add i32 %a, 3 %result = shl i32 %x, 9 Index: test/CodeGen/AMDGPU/and_or.ll =================================================================== --- test/CodeGen/AMDGPU/and_or.ll +++ test/CodeGen/AMDGPU/and_or.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: and_or: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: and_or_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, s2, v0, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: and_or_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 %a, %b %result = or i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: and_or_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 4, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 4, %a %result = or i32 %x, %b @@ -113,6 +117,7 @@ ; GFX10-LABEL: and_or_vgpr_const_inline_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 20, 0x808 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 20, %a %result = or i32 %x, 2056 @@ -135,6 +140,7 @@ ; GFX10-LABEL: and_or_vgpr_inline_const_x2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_and_or_b32 v0, v0, 4, 1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = and i32 4, %a %result = or i32 %x, 1 Index: test/CodeGen/AMDGPU/diverge-switch-default.ll =================================================================== --- test/CodeGen/AMDGPU/diverge-switch-default.ll +++ test/CodeGen/AMDGPU/diverge-switch-default.ll @@ -38,8 +38,8 @@ ; CHECK: load i8 ; CHECK-NOT: {{ br }} ; CHECK: [[ICMP:%[a-zA-Z0-9._]+]] = icmp eq -; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break(i1 [[ICMP]], i64 [[PHI]]) -; CHECK: [[LOOP:%[a-zA-Z0-9._]+]] = call i1 @llvm.amdgcn.loop(i64 [[IF]]) +; CHECK: [[IF:%[a-zA-Z0-9._]+]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[ICMP]], i64 [[PHI]]) +; CHECK: [[LOOP:%[a-zA-Z0-9._]+]] = call i1 @llvm.amdgcn.loop.i64(i64 [[IF]]) ; CHECK: br i1 [[LOOP]] sw.while: Index: test/CodeGen/AMDGPU/huge-private-buffer.ll =================================================================== --- test/CodeGen/AMDGPU/huge-private-buffer.ll +++ test/CodeGen/AMDGPU/huge-private-buffer.ll @@ -1,9 +1,23 @@ -; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE64 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,WAVE32 %s + +; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo14: +; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 +; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x3ffc, [[FI]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] +define amdgpu_kernel void @scratch_buffer_known_high_masklo14() #0 { + %alloca = alloca i32, align 4, addrspace(5) + store volatile i32 0, i32 addrspace(5)* %alloca + %toint = ptrtoint i32 addrspace(5)* %alloca to i32 + %masked = and i32 %toint, 16383 + store volatile i32 %masked, i32 addrspace(1)* undef + ret void +} ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo16: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 ; GCN: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xfffc, [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] define amdgpu_kernel void @scratch_buffer_known_high_masklo16() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca @@ -15,8 +29,11 @@ ; GCN-LABEL: {{^}}scratch_buffer_known_high_masklo17: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 -; GCN-NOT: [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] +; WAVE64-NOT: [[FI]] +; WAVE64: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] + +; WAVE32: v_and_b32_e32 [[MASKED:v[0-9]+]], 0x1fffc, [[FI]] +; WAVE32: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MASKED]] define amdgpu_kernel void @scratch_buffer_known_high_masklo17() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca @@ -29,7 +46,7 @@ ; GCN-LABEL: {{^}}scratch_buffer_known_high_mask18: ; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 4 ; GCN-NOT: [[FI]] -; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] +; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FI]] define amdgpu_kernel void @scratch_buffer_known_high_mask18() #0 { %alloca = alloca i32, align 4, addrspace(5) store volatile i32 0, i32 addrspace(5)* %alloca Index: test/CodeGen/AMDGPU/insert-skip-from-vcc.mir =================================================================== --- test/CodeGen/AMDGPU/insert-skip-from-vcc.mir +++ test/CodeGen/AMDGPU/insert-skip-from-vcc.mir @@ -1,4 +1,5 @@ # RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s +# RUN: llc -march=amdgcn -mcpu=gfx1010 -run-pass si-insert-skips -verify-machineinstrs -o - %s | FileCheck -check-prefix=W32 %s --- # GCN-LABEL: name: and_execz_mov_vccz @@ -318,3 +319,22 @@ S_CBRANCH_VCCZ %bb.1, implicit killed $vcc S_ENDPGM 0, implicit $scc ... +--- +# W32-LABEL: name: and_execz_mov_vccz_w32 +# W32-NOT: S_MOV_ +# W32-NOT: S_AND_ +# W32: S_CBRANCH_EXECZ %bb.1, implicit $exec +name: and_execz_mov_vccz_w32 +body: | + bb.0: + S_NOP 0 + + bb.1: + S_NOP 0 + + bb.2: + $sgpr0 = S_MOV_B32 -1 + $vcc_lo = S_AND_B32 $exec_lo, killed $sgpr0, implicit-def dead $scc + S_CBRANCH_VCCZ %bb.1, implicit killed $vcc + S_ENDPGM 0 +... Index: test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll =================================================================== --- test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll +++ test/CodeGen/AMDGPU/large-work-group-promote-alloca.ll @@ -1,5 +1,6 @@ -; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=SI --check-prefix=ALL %s -; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefix=CI --check-prefix=ALL %s +; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=SI,SICI,ALL %s +; RUN: opt -S -mcpu=tonga -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=CI,SICI,ALL %s +; RUN: opt -S -mcpu=gfx1010 -mtriple=amdgcn-unknown-unknown -amdgpu-promote-alloca -disable-promote-alloca-to-vector < %s | FileCheck --check-prefixes=GFX10,ALL %s ; SI-NOT: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4 ; CI: @promote_alloca_size_63.stack = internal unnamed_addr addrspace(3) global [63 x [5 x i32]] undef, align 4 @@ -46,7 +47,8 @@ ret void } -; ALL: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4 +; SICI: @promote_alloca_size_1600.stack = internal unnamed_addr addrspace(3) global [1600 x [5 x i32]] undef, align 4 +; GFX10: alloca [5 x i32] define amdgpu_kernel void @promote_alloca_size_1600(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #2 { entry: @@ -141,7 +143,9 @@ } ; ALL-LABEL: @occupancy_6_over( -; ALL: alloca [43 x i8] +; SICI: alloca [43 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_6_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #5 { entry: %stack = alloca [43 x i8], align 4 @@ -191,7 +195,9 @@ } ; ALL-LABEL: @occupancy_8_over( -; ALL: alloca [33 x i8] +; SICI: alloca [33 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_8_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #6 { entry: %stack = alloca [33 x i8], align 4 @@ -241,7 +247,9 @@ } ; ALL-LABEL: @occupancy_9_over( -; ALL: alloca [29 x i8] +; SICI: alloca [29 x i8] +; GFX10-NOT: alloca + define amdgpu_kernel void @occupancy_9_over(i8 addrspace(1)* nocapture %out, i8 addrspace(1)* nocapture %in) #7 { entry: %stack = alloca [29 x i8], align 4 Index: test/CodeGen/AMDGPU/loop_break.ll =================================================================== --- test/CodeGen/AMDGPU/loop_break.ll +++ test/CodeGen/AMDGPU/loop_break.ll @@ -15,12 +15,12 @@ ; OPT: br label %Flow ; OPT: Flow: -; OPT: call i64 @llvm.amdgcn.if.break( -; OPT: call i1 @llvm.amdgcn.loop(i64 +; OPT: call i64 @llvm.amdgcn.if.break.i64.i64( +; OPT: call i1 @llvm.amdgcn.loop.i64(i64 ; OPT: br i1 %{{[0-9]+}}, label %bb9, label %bb1 ; OPT: bb9: -; OPT: call void @llvm.amdgcn.end.cf(i64 +; OPT: call void @llvm.amdgcn.end.cf.i64(i64 ; GCN-LABEL: {{^}}break_loop: ; GCN: s_mov_b64 [[OUTER_MASK:s\[[0-9]+:[0-9]+\]]], 0{{$}} @@ -84,12 +84,12 @@ ; OPT: Flow: ; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] ; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ undef, %bb1 ] -; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken) -; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0) +; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken) +; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0) ; OPT-NEXT: br i1 %1, label %bb9, label %bb1 ; OPT: bb9: ; preds = %Flow -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void define amdgpu_kernel void @undef_phi_cond_break_loop(i32 %arg) #0 { @@ -138,12 +138,12 @@ ; OPT: Flow: ; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] ; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ icmp ne (i32 addrspace(3)* inttoptr (i32 4 to i32 addrspace(3)*), i32 addrspace(3)* @lds), %bb1 ] -; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken) -; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0) +; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken) +; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0) ; OPT-NEXT: br i1 %1, label %bb9, label %bb1 ; OPT: bb9: ; preds = %Flow -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 { @@ -189,12 +189,12 @@ ; OPT: Flow: ; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] ; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] -; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken) -; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0) +; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken) +; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0) ; OPT-NEXT: br i1 %1, label %bb9, label %bb1 ; OPT: bb9: ; preds = %Flow -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void define amdgpu_kernel void @true_phi_cond_break_loop(i32 %arg) #0 { @@ -239,12 +239,12 @@ ; OPT: Flow: ; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] ; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ false, %bb1 ] -; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break(i1 %tmp3, i64 %phi.broken) -; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop(i64 %0) +; OPT-NEXT: %0 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp3, i64 %phi.broken) +; OPT-NEXT: %1 = call i1 @llvm.amdgcn.loop.i64(i64 %0) ; OPT-NEXT: br i1 %1, label %bb9, label %bb1 ; OPT: bb9: ; preds = %Flow -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %0) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %0) ; OPT-NEXT: store volatile i32 7 ; OPT-NEXT: ret void define amdgpu_kernel void @false_phi_cond_break_loop(i32 %arg) #0 { @@ -294,12 +294,12 @@ ; OPT-NEXT: %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] ; OPT-NEXT: %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] ; OPT-NEXT: %0 = xor i1 %tmp3, true -; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break(i1 %0, i64 %phi.broken) -; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop(i64 %1) +; OPT-NEXT: %1 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %0, i64 %phi.broken) +; OPT-NEXT: %2 = call i1 @llvm.amdgcn.loop.i64(i64 %1) ; OPT-NEXT: br i1 %2, label %bb9, label %bb1 ; OPT: bb9: -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 %1) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %1) ; OPT-NEXT: store volatile i32 7, i32 addrspace(3)* undef ; OPT-NEXT: ret void define amdgpu_kernel void @invert_true_phi_cond_break_loop(i32 %arg) #0 { Index: test/CodeGen/AMDGPU/mubuf-legalize-operands.mir =================================================================== --- test/CodeGen/AMDGPU/mubuf-legalize-operands.mir +++ test/CodeGen/AMDGPU/mubuf-legalize-operands.mir @@ -1,5 +1,7 @@ -# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,ADDR64 -# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=COMMON,NO-ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W64,W64-NO-ADDR64 +# RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -verify-machine-dom-info --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s --check-prefixes=W32 # Test that we correctly legalize VGPR Rsrc operands in MUBUF instructions. # @@ -7,27 +9,50 @@ # needing a waterfall. For all other instruction variants, and when we are # on non-ADDR64 hardware, we emit a waterfall loop. -# COMMON-LABEL: name: idxen -# COMMON-LABEL: bb.0: -# COMMON-NEXT: successors: %bb.1({{.*}}) -# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 -# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec -# COMMON-LABEL: bb.1: -# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) -# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec -# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec -# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec -# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec -# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 -# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec -# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec -# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc -# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec -# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec -# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc -# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec -# COMMON-LABEL bb.2: -# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +# W64-LABEL: name: idxen +# W64-LABEL: bb.0: +# W64-NEXT: successors: %bb.1({{.*}}) +# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# W64-LABEL: bb.1: +# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W64-LABEL bb.2: +# W64: $exec = S_MOV_B64 [[SAVEEXEC]] + +# W32-LABEL: name: idxen +# W32-LABEL: bb.0: +# W32-NEXT: successors: %bb.1({{.*}}) +# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo +# W32-LABEL: bb.1: +# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc +# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# TODO: S_XOR_B32_term should be `implicit-def $scc` +# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]] +# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W32-LABEL bb.2: +# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]] --- name: idxen liveins: @@ -53,27 +78,50 @@ S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 ... -# COMMON-LABEL: name: offen -# COMMON-LABEL: bb.0: -# COMMON-NEXT: successors: %bb.1({{.*}}) -# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 -# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec -# COMMON-LABEL: bb.1: -# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) -# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec -# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec -# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec -# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec -# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 -# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec -# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec -# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc -# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec -# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec -# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc -# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec -# COMMON-LABEL bb.2: -# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +# W64-LABEL: name: offen +# W64-LABEL: bb.0: +# W64-NEXT: successors: %bb.1({{.*}}) +# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# W64-LABEL: bb.1: +# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W64-LABEL bb.2: +# W64: $exec = S_MOV_B64 [[SAVEEXEC]] + +# W32-LABEL: name: offen +# W32-LABEL: bb.0: +# W32-NEXT: successors: %bb.1({{.*}}) +# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo +# W32-LABEL: bb.1: +# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc +# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# TODO: S_XOR_B32_term should be `implicit-def $scc` +# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]] +# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W32-LABEL bb.2: +# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]] --- name: offen liveins: @@ -99,27 +147,50 @@ S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 ... -# COMMON-LABEL: name: bothen -# COMMON-LABEL: bb.0: -# COMMON-NEXT: successors: %bb.1({{.*}}) -# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 -# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec -# COMMON-LABEL: bb.1: -# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) -# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec -# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec -# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec -# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec -# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 -# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec -# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec -# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc -# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec -# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec -# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc -# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec -# COMMON-LABEL bb.2: -# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]] +# W64-LABEL: name: bothen +# W64-LABEL: bb.0: +# W64-NEXT: successors: %bb.1({{.*}}) +# W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# W64-LABEL: bb.1: +# W64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# W64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# W64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# W64: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W64-LABEL bb.2: +# W64: $exec = S_MOV_B64 [[SAVEEXEC]] + +# W32-LABEL: name: bothen +# W32-LABEL: bb.0: +# W32-NEXT: successors: %bb.1({{.*}}) +# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo +# W32-LABEL: bb.1: +# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc +# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# TODO: S_XOR_B32_term should be `implicit-def $scc` +# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]] +# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W32-LABEL bb.2: +# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]] --- name: bothen liveins: @@ -145,17 +216,17 @@ S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 ... -# COMMON-LABEL: name: addr64 -# COMMON-LABEL: bb.0: -# COMMON: %12:vreg_64 = COPY %8.sub0_sub1 -# COMMON: %13:sreg_64 = S_MOV_B64 0 -# COMMON: %14:sgpr_32 = S_MOV_B32 0 -# COMMON: %15:sgpr_32 = S_MOV_B32 61440 -# COMMON: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3 -# COMMON: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec -# COMMON: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec -# COMMON: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1 -# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, 0, implicit $exec +# ADDR64-LABEL: name: addr64 +# ADDR64-LABEL: bb.0: +# ADDR64: %12:vreg_64 = COPY %8.sub0_sub1 +# ADDR64: %13:sreg_64 = S_MOV_B64 0 +# ADDR64: %14:sgpr_32 = S_MOV_B32 0 +# ADDR64: %15:sgpr_32 = S_MOV_B32 61440 +# ADDR64: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14, %subreg.sub2, %15, %subreg.sub3 +# ADDR64: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def $vcc, implicit $exec +# ADDR64: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def $vcc, implicit $vcc, implicit $exec +# ADDR64: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1 +# ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed %16, 0, 0, 0, 0, 0, 0, implicit $exec --- name: addr64 liveins: @@ -181,28 +252,49 @@ S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0 ... -# COMMON-LABEL: name: offset -# COMMON-LABEL: bb.0: +# W64-LABEL: name: offset +# W64-LABEL: bb.0: + +# W64-NO-ADDR64: successors: %bb.1({{.*}}) +# W64-NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W64-NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec +# W64-NO-ADDR64-LABEL: bb.1: +# W64-NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W64-NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W64-NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W64-NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W64-NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W64-NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W64-NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W64-NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W64-NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc +# W64-NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W64-NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# W64-NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc +# W64-NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W64-NO-ADDR64-LABEL bb.2: +# W64-NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]] -# NO-ADDR64-NEXT: successors: %bb.1({{.*}}) -# NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 -# NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec -# NO-ADDR64-LABEL: bb.1: -# NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) -# NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec -# NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec -# NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec -# NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec -# NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 -# NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec -# NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec -# NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]], implicit-def $scc -# NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec -# NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec -# NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc -# NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec -# NO-ADDR64-LABEL bb.2: -# NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]] +# W32: successors: %bb.1({{.*}}) +# W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 +# W32: [[SAVEEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo +# W32-LABEL: bb.1: +# W32-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}}) +# W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec +# W32: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub1, implicit $exec +# W32: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub2, implicit $exec +# W32: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub3, implicit $exec +# W32: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2, [[SRSRC3]], %subreg.sub3 +# W32: [[CMP0:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec +# W32: [[CMP1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec +# W32: [[CMP:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[CMP0]], [[CMP1]], implicit-def $scc +# W32: [[TMPEXEC:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec +# W32: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed [[SRSRC]], 0, 0, 0, 0, 0, 0, implicit $exec +# TODO: S_XOR_B32_term should be `implicit-def $scc` +# W32: $exec_lo = S_XOR_B32_term $exec_lo, [[TMPEXEC]] +# W32: S_CBRANCH_EXECNZ %bb.1, implicit $exec +# W32-LABEL bb.2: +# W32: $exec_lo = S_MOV_B32 [[SAVEEXEC]] # ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3 # ADDR64: [[RSRCPTR:%[0-9]+]]:vreg_64 = COPY [[VRSRC]].sub0_sub1 Index: test/CodeGen/AMDGPU/multi-divergent-exit-region.ll =================================================================== --- test/CodeGen/AMDGPU/multi-divergent-exit-region.ll +++ test/CodeGen/AMDGPU/multi-divergent-exit-region.ll @@ -9,7 +9,7 @@ ; StructurizeCFG. ; IR-LABEL: @multi_divergent_region_exit_ret_ret( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0) ; IR: %2 = extractvalue { i1, i64 } %1, 0 ; IR: %3 = extractvalue { i1, i64 } %1, 1 ; IR: br i1 %2, label %LeafBlock1, label %Flow @@ -17,7 +17,7 @@ ; IR: Flow: ; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] ; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3) ; IR: %7 = extractvalue { i1, i64 } %6, 0 ; IR: %8 = extractvalue { i1, i64 } %6, 1 ; IR: br i1 %7, label %LeafBlock, label %Flow1 @@ -30,8 +30,8 @@ ; IR: Flow2: ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) +; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11) ; IR: %13 = extractvalue { i1, i64 } %12, 0 ; IR: %14 = extractvalue { i1, i64 } %12, 1 ; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock @@ -43,8 +43,8 @@ ; IR: Flow1: ; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] ; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8) +; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16) ; IR: %18 = extractvalue { i1, i64 } %17, 0 ; IR: %19 = extractvalue { i1, i64 } %17, 1 ; IR: br i1 %18, label %exit1, label %Flow2 @@ -54,7 +54,7 @@ ; IR: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR: call void @llvm.amdgcn.end.cf(i64 %14) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14) ; IR: ret void @@ -141,13 +141,13 @@ } ; IR-LABEL: @multi_divergent_region_exit_unreachable_unreachable( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0) -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3) ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) +; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11) ; IR: br i1 %13, label %exit0, label %UnifiedUnreachableBlock @@ -203,7 +203,7 @@ ; IR: {{^}}Flow: ; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] ; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3) ; IR: br i1 %7, label %LeafBlock, label %Flow1 ; IR: {{^}}LeafBlock: @@ -218,8 +218,8 @@ ; IR: Flow2: ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) +; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11) ; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock ; IR: exit0: @@ -229,8 +229,8 @@ ; IR: {{^}}Flow1: ; IR: %15 = phi i1 [ %divergent.cond1, %LeafBlock ], [ %4, %Flow ] ; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8) +; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16) ; IR: %18 = extractvalue { i1, i64 } %17, 0 ; IR: %19 = extractvalue { i1, i64 } %17, 1 ; IR: br i1 %18, label %exit1, label %Flow2 @@ -240,7 +240,7 @@ ; IR: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR: call void @llvm.amdgcn.end.cf(i64 %14) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14) ; IR: ret void define amdgpu_kernel void @multi_exit_region_divergent_ret_uniform_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 { entry: @@ -279,17 +279,17 @@ } ; IR-LABEL: @multi_exit_region_uniform_ret_divergent_ret( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0) ; IR: br i1 %2, label %LeafBlock1, label %Flow ; IR: Flow: ; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] ; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3) ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) +; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11) define amdgpu_kernel void @multi_exit_region_uniform_ret_divergent_ret(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2, i32 %arg3) #0 { entry: @@ -330,11 +330,11 @@ ; IR-LABEL: @multi_divergent_region_exit_ret_ret_return_value( ; IR: Flow2: ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) ; IR: UnifiedReturnBlock: ; IR: %UnifiedRetVal = phi float [ 2.000000e+00, %Flow2 ], [ 1.000000e+00, %exit0 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %14) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %14) ; IR: ret float %UnifiedRetVal define amdgpu_ps float @multi_divergent_region_exit_ret_ret_return_value(i32 %vgpr) #0 { entry: @@ -402,17 +402,17 @@ } ; IR-LABEL: @multi_divergent_region_exit_ret_unreachable( -; IR: %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %0) +; IR: %1 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %0) ; IR: Flow: ; IR: %4 = phi i1 [ true, %LeafBlock1 ], [ false, %entry ] ; IR: %5 = phi i1 [ %10, %LeafBlock1 ], [ false, %entry ] -; IR: %6 = call { i1, i64 } @llvm.amdgcn.else(i64 %3) +; IR: %6 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %3) ; IR: Flow2: ; IR: %11 = phi i1 [ false, %exit1 ], [ %15, %Flow1 ] -; IR: call void @llvm.amdgcn.end.cf(i64 %19) -; IR: %12 = call { i1, i64 } @llvm.amdgcn.if(i1 %11) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %19) +; IR: %12 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %11) ; IR: br i1 %13, label %exit0, label %UnifiedReturnBlock ; IR: exit0: @@ -422,8 +422,8 @@ ; IR: Flow1: ; IR: %15 = phi i1 [ %SwitchLeaf, %LeafBlock ], [ %4, %Flow ] ; IR: %16 = phi i1 [ %9, %LeafBlock ], [ %5, %Flow ] -; IR: call void @llvm.amdgcn.end.cf(i64 %8) -; IR: %17 = call { i1, i64 } @llvm.amdgcn.if(i1 %16) +; IR: call void @llvm.amdgcn.end.cf.i64(i64 %8) +; IR: %17 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %16) ; IR: %18 = extractvalue { i1, i64 } %17, 0 ; IR: %19 = extractvalue { i1, i64 } %17, 1 ; IR: br i1 %18, label %exit1, label %Flow2 @@ -434,7 +434,7 @@ ; IR-NEXT: br label %Flow2 ; IR: UnifiedReturnBlock: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %14) ; IR-NEXT: ret void define amdgpu_kernel void @multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 { entry: @@ -490,7 +490,7 @@ ; IR-NEXT: br label %Flow2 ; IR: UnifiedReturnBlock: ; preds = %exit0, %Flow2 -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %14) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %14) ; IR-NEXT: ret void define amdgpu_kernel void @indirect_multi_divergent_region_exit_ret_unreachable(i32 addrspace(1)* nocapture %arg0, i32 addrspace(1)* nocapture %arg1, i32 addrspace(1)* nocapture %arg2) #0 { entry: @@ -645,7 +645,7 @@ ; IR: br i1 %11, label %uniform.endif, label %uniform.ret0 ; IR: UnifiedReturnBlock: ; preds = %Flow3, %Flow2 -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %6) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %6) ; IR-NEXT: ret void define amdgpu_kernel void @uniform_complex_multi_ret_nest_in_divergent_triangle(i32 %arg0) #0 { entry: @@ -691,7 +691,7 @@ ; IR-NEXT: br label %UnifiedReturnBlock ; IR: UnifiedReturnBlock: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 ; IR-NEXT: ret void define amdgpu_kernel void @multi_divergent_unreachable_exit() #0 { bb: Index: test/CodeGen/AMDGPU/multilevel-break.ll =================================================================== --- test/CodeGen/AMDGPU/multilevel-break.ll +++ test/CodeGen/AMDGPU/multilevel-break.ll @@ -5,7 +5,7 @@ ; OPT: main_body: ; OPT: LOOP.outer: ; OPT: LOOP: -; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if( +; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if.i64( ; OPT: [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1 ; ; OPT: Flow: @@ -13,9 +13,9 @@ ; Ensure two if.break calls, for both the inner and outer loops ; OPT: call void @llvm.amdgcn.end.cf -; OPT-NEXT: call i64 @llvm.amdgcn.if.break(i1 -; OPT-NEXT: call i1 @llvm.amdgcn.loop(i64 -; OPT-NEXT: call i64 @llvm.amdgcn.if.break(i1 +; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1 +; OPT-NEXT: call i1 @llvm.amdgcn.loop.i64(i64 +; OPT-NEXT: call i64 @llvm.amdgcn.if.break.i64.i64(i1 ; ; OPT: Flow1: Index: test/CodeGen/AMDGPU/nested-loop-conditions.ll =================================================================== --- test/CodeGen/AMDGPU/nested-loop-conditions.ll +++ test/CodeGen/AMDGPU/nested-loop-conditions.ll @@ -13,7 +13,7 @@ ; IR-NEXT: %phi.broken = phi i64 [ %3, %bb10 ], [ 0, %bb ] ; IR-NEXT: %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ] ; IR-NEXT: %tmp7 = icmp eq i32 %tmp6, 1 -; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp7) +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp7) ; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0 ; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1 ; IR-NEXT: br i1 %1, label %bb8, label %Flow @@ -24,14 +24,14 @@ ; IR: bb10: ; IR-NEXT: %tmp11 = phi i32 [ %6, %Flow ] ; IR-NEXT: %tmp12 = phi i1 [ %5, %Flow ] -; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break(i1 %tmp12, i64 %phi.broken) -; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop(i64 %3) +; IR-NEXT: %3 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %tmp12, i64 %phi.broken) +; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop.i64(i64 %3) ; IR-NEXT: br i1 %4, label %bb23, label %bb5 ; IR: Flow: ; IR-NEXT: %5 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ] ; IR-NEXT: %6 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ] -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2) ; IR-NEXT: br label %bb10 ; IR: bb13: @@ -51,7 +51,7 @@ ; IR-NEXT: br label %bb9 ; IR: bb23: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %3) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %3) ; IR-NEXT: ret void ; GCN-LABEL: {{^}}reduced_nested_loop_conditions: @@ -121,27 +121,27 @@ ; IR-LABEL: @nested_loop_conditions( ; IR: Flow3: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %21) -; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %14) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %21) +; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %14) ; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0 ; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1 ; IR-NEXT: br i1 %1, label %bb4.bb13_crit_edge, label %Flow4 ; IR: Flow4: ; IR-NEXT: %3 = phi i1 [ true, %bb4.bb13_crit_edge ], [ false, %Flow3 ] -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %2) ; IR-NEXT: br label %Flow ; IR: Flow: ; IR-NEXT: %4 = phi i1 [ %3, %Flow4 ], [ true, %bb ] -; IR-NEXT: %5 = call { i1, i64 } @llvm.amdgcn.if(i1 %4) +; IR-NEXT: %5 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %4) ; IR-NEXT: %6 = extractvalue { i1, i64 } %5, 0 ; IR-NEXT: %7 = extractvalue { i1, i64 } %5, 1 ; IR-NEXT: br i1 %6, label %bb13, label %bb31 ; IR: bb14: ; IR: %tmp15 = icmp eq i32 %tmp1037, 1 -; IR-NEXT: %8 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp15) +; IR-NEXT: %8 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %tmp15) ; IR: Flow1: ; IR-NEXT: %11 = phi <4 x i32> [ %tmp9, %bb21 ], [ undef, %bb14 ] @@ -149,9 +149,9 @@ ; IR-NEXT: %13 = phi i1 [ %18, %bb21 ], [ true, %bb14 ] ; IR-NEXT: %14 = phi i1 [ %18, %bb21 ], [ false, %bb14 ] ; IR-NEXT: %15 = phi i1 [ false, %bb21 ], [ true, %bb14 ] -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %10) -; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break(i1 %13, i64 %phi.broken) -; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop(i64 %16) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %10) +; IR-NEXT: %16 = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %13, i64 %phi.broken) +; IR-NEXT: %17 = call i1 @llvm.amdgcn.loop.i64(i64 %16) ; IR-NEXT: br i1 %17, label %Flow2, label %bb14 ; IR: bb21: @@ -160,14 +160,14 @@ ; IR-NEXT: br label %Flow1 ; IR: Flow2: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %16) -; IR-NEXT: %19 = call { i1, i64 } @llvm.amdgcn.if(i1 %15) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %16) +; IR-NEXT: %19 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %15) ; IR-NEXT: %20 = extractvalue { i1, i64 } %19, 0 ; IR-NEXT: %21 = extractvalue { i1, i64 } %19, 1 ; IR-NEXT: br i1 %20, label %bb31.loopexit, label %Flow3 ; IR: bb31: -; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %7) +; IR-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 %7) ; IR-NEXT: store volatile i32 0, i32 addrspace(1)* undef ; IR-NEXT: ret void Index: test/CodeGen/AMDGPU/or3.ll =================================================================== --- test/CodeGen/AMDGPU/or3.ll +++ test/CodeGen/AMDGPU/or3.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: or3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -47,6 +48,7 @@ ; GFX10-LABEL: or3_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -69,6 +71,7 @@ ; GFX10-LABEL: or3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %b, %c %result = or i32 %a, %x @@ -91,6 +94,7 @@ ; GFX10-LABEL: or3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 %a, %b %result = or i32 %x, %c @@ -113,6 +117,7 @@ ; GFX10-LABEL: or3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_or3_b32 v0, v1, v0, 64 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = or i32 64, %b %result = or i32 %x, %a Index: test/CodeGen/AMDGPU/regbank-reassign.mir =================================================================== --- test/CodeGen/AMDGPU/regbank-reassign.mir +++ test/CodeGen/AMDGPU/regbank-reassign.mir @@ -49,6 +49,24 @@ S_ENDPGM 0 ... +# GCN-LABEL: s11_vs_vcc{{$}} +# GCN: $vgpr0, $vcc_lo = V_ADDC_U32_e64 killed $sgpr14, killed $vgpr0, killed $vcc_lo, 0 +--- +name: s11_vs_vcc +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_32, preferred-register: '$sgpr11' } + - { id: 1, class: vgpr_32 } + - { id: 2, class: vgpr_32 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + $vcc_lo = IMPLICIT_DEF + %2, $vcc_lo = V_ADDC_U32_e64 killed %0, killed %1, killed $vcc_lo, 0, implicit $exec + S_ENDPGM 0 +... + # GCN-LABEL: s0_vs_s16{{$}} # GCN: S_AND_B32 killed renamable $sgpr14, $sgpr0, --- Index: test/CodeGen/AMDGPU/shl_add.ll =================================================================== --- test/CodeGen/AMDGPU/shl_add.ll +++ test/CodeGen/AMDGPU/shl_add.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: shl_add: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: shl_add_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: shl_add_vgpr_all: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: shl_add_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = add i32 %x, %c @@ -112,6 +116,7 @@ ; GFX10-LABEL: shl_add_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_add_u32 v0, v0, 3, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 3 %result = add i32 %x, %b Index: test/CodeGen/AMDGPU/shl_or.ll =================================================================== --- test/CodeGen/AMDGPU/shl_or.ll +++ test/CodeGen/AMDGPU/shl_or.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: shl_or: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -45,6 +46,7 @@ ; GFX10-LABEL: shl_or_vgpr_c: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, s2, s3, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -67,6 +69,7 @@ ; GFX10-LABEL: shl_or_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %c, %x @@ -89,6 +92,7 @@ ; GFX10-LABEL: shl_or_vgpr_ac: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, s2, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, %c @@ -111,6 +115,7 @@ ; GFX10-LABEL: shl_or_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, v1, 6 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, %b %result = or i32 %x, 6 @@ -133,6 +138,7 @@ ; GFX10-LABEL: shl_or_vgpr_const2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 6, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b @@ -155,6 +161,7 @@ ; GFX10-LABEL: shl_or_vgpr_const_scalar1: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, s2, 6, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b @@ -177,6 +184,7 @@ ; GFX10-LABEL: shl_or_vgpr_const_scalar2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 6, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = shl i32 %a, 6 %result = or i32 %x, %b Index: test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll =================================================================== --- test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll +++ test/CodeGen/AMDGPU/si-annotate-cf-unreachable.ll @@ -3,8 +3,8 @@ ; OPT-LABEL: @annotate_unreachable( -; OPT: call { i1, i64 } @llvm.amdgcn.if( -; OPT-NOT: call void @llvm.amdgcn.end.cf( +; OPT: call { i1, i64 } @llvm.amdgcn.if.i64( +; OPT-NOT: call void @llvm.amdgcn.end.cf ; GCN-LABEL: {{^}}annotate_unreachable: Index: test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll =================================================================== --- test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll +++ test/CodeGen/AMDGPU/si-annotatecfg-multiple-backedges.ll @@ -17,17 +17,17 @@ ; OPT-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP5:%.*]], [[LOOP]] ], [ 0, [[LOOP_END]] ] ; OPT-NEXT: [[TMP5]] = add nsw i32 [[TMP4]], [[TMP]] ; OPT-NEXT: [[TMP6:%.*]] = icmp slt i32 [[ARG]], [[TMP5]] -; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break(i1 [[TMP6]], i64 [[PHI_BROKEN]]) -; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop(i64 [[TMP0]]) +; OPT-NEXT: [[TMP0]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[TMP6]], i64 [[PHI_BROKEN]]) +; OPT-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP0]]) ; OPT-NEXT: br i1 [[TMP1]], label [[LOOP_END]], label [[LOOP]] ; OPT: loop_end: -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 [[TMP0]]) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP0]]) ; OPT-NEXT: [[EXIT:%.*]] = icmp sgt i32 [[TMP5]], [[TMP2]] -; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break(i1 [[EXIT]], i64 [[PHI_BROKEN1]]) -; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop(i64 [[TMP7]]) +; OPT-NEXT: [[TMP7]] = call i64 @llvm.amdgcn.if.break.i64.i64(i1 [[EXIT]], i64 [[PHI_BROKEN1]]) +; OPT-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.loop.i64(i64 [[TMP7]]) ; OPT-NEXT: br i1 [[TMP3]], label [[LOOP_EXIT:%.*]], label [[LOOP]] ; OPT: loop_exit: -; OPT-NEXT: call void @llvm.amdgcn.end.cf(i64 [[TMP7]]) +; OPT-NEXT: call void @llvm.amdgcn.end.cf.i64(i64 [[TMP7]]) ; OPT-NEXT: [[TMP12:%.*]] = zext i32 [[TMP]] to i64 ; OPT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[ARG1:%.*]], i64 [[TMP12]] ; OPT-NEXT: [[TMP14:%.*]] = addrspacecast i32* [[TMP13]] to i32 addrspace(1)* Index: test/CodeGen/AMDGPU/sub_i1.ll =================================================================== --- test/CodeGen/AMDGPU/sub_i1.ll +++ test/CodeGen/AMDGPU/sub_i1.ll @@ -1,8 +1,10 @@ -; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE64 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,WAVE32 %s ; GCN-LABEL: {{^}}sub_var_var_i1: -; GCN: s_xor_b64 +; WAVE32: s_xor_b32 +; WAVE64: s_xor_b64 define amdgpu_kernel void @sub_var_var_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) { %a = load volatile i1, i1 addrspace(1)* %in0 %b = load volatile i1, i1 addrspace(1)* %in1 @@ -12,7 +14,8 @@ } ; GCN-LABEL: {{^}}sub_var_imm_i1: -; GCN: s_not_b64 +; WAVE32: s_not_b32 +; WAVE64: s_not_b64 define amdgpu_kernel void @sub_var_imm_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) { %a = load volatile i1, i1 addrspace(1)* %in %sub = sub i1 %a, 1 @@ -22,7 +25,8 @@ ; GCN-LABEL: {{^}}sub_i1_cf: ; GCN: ; %endif -; GCN: s_not_b64 +; WAVE32: s_not_b32 +; WAVE64: s_not_b64 define amdgpu_kernel void @sub_i1_cf(i1 addrspace(1)* %out, i1 addrspace(1)* %a, i1 addrspace(1)* %b) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/wave32.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/wave32.ll @@ -0,0 +1,1154 @@ +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1064 %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s + +; GCN-LABEL: {{^}}test_vopc_i32: +; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo +; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}} +define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp sgt i32 %load, 0 + %sel = select i1 %cmp, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_f32: +; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo +; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}} +define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid + %load = load float, float addrspace(1)* %gep, align 4 + %cmp = fcmp ugt float %load, 0.0 + %sel = select i1 %cmp, float 1.0, float 2.0 + store float %sel, float addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_vcmpx: +; GFX1032: v_cmpx_le_f32_e32 0, v{{[0-9]+}} +; GFX1064: v_cmpx_le_f32_e32 0, v{{[0-9]+}} +define amdgpu_ps void @test_vopc_vcmpx(float %x) { + %cmp = fcmp oge float %x, 0.0 + call void @llvm.amdgcn.kill(i1 %cmp) + ret void +} + +; GCN-LABEL: {{^}}test_vopc_2xf16: +; GFX1032: v_cmp_le_f16_sdwa [[SC:s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] +; GFX1064: v_cmp_le_f16_sdwa [[SC:s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]] +define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid + %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4 + %elt = extractelement <2 x half> %load, i32 1 + %cmp = fcmp ugt half %elt, 0.0 + %sel = select i1 %cmp, <2 x half> , <2 x half> %load + store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vopc_class: +; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204 +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] +; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204 +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}} +define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 { + %fabs = tail call float @llvm.fabs.f32(float %x) + %cmp = fcmp oeq float %fabs, 0x7FF0000000000000 + %ext = zext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vcmp_vcnd_f16: +; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]] + +; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}} +define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 { + %cmp = fcmp oeq half %x, 0x7FF0000000000000 + %sel = select i1 %cmp, half 1.0, half %x + store half %sel, half addrspace(1)* %out, align 2 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and: +; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}} +; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] +; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}} +; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid + %load = load float, float addrspace(1)* %gep, align 4 + %cmp = fcmp ugt float %load, 0.0 + %cmp2 = fcmp ult float %load, 1.0 + %and = and i1 %cmp, %cmp2 + %sel = select i1 %and, float 1.0, float 2.0 + store float %sel, float addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor: +; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}} +; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}} +; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}} +; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}} +; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp sgt i32 %load, 0 + %cmp2 = icmp slt i32 %load, 1 + %xor = xor i1 %cmp, %cmp2 + %sel = select i1 %xor, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or: +; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}} +; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}} +; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]] +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}} +; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}} +; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]] +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]] +define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %cmp = icmp ugt i32 %load, 3 + %cmp2 = icmp ult i32 %load, 2 + %or = or i1 %cmp, %cmp2 + %sel = select i1 %or, i32 1, i32 2 + store i32 %sel, i32 addrspace(1)* %gep, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_mask_if: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: ; mask branch +define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 { + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %cmp = icmp ugt i32 %lid, 10 + br i1 %cmp, label %if, label %endif + +if: + store i32 0, i32 addrspace(1)* %arg, align 4 + br label %endif + +endif: + ret void +} + +; GCN-LABEL: {{^}}test_loop_with_if: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: ; mask branch BB10_3 +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}} +; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} +; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}] +; GCN: ; mask branch BB +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}} +; GCN: ; mask branch BB +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} +; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}] +; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] +; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}] +; GCN: s_cbranch_execnz +define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + br label %bb2 + +bb1: + ret void + +bb2: + %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ] + %tmp4 = icmp slt i32 %tmp3, %tmp + br i1 %tmp4, label %bb5, label %bb11 + +bb5: + %tmp6 = sext i32 %tmp3 to i64 + %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6 + %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4 + %tmp9 = icmp sgt i32 %tmp8, 10 + br i1 %tmp9, label %bb10, label %bb11 + +bb10: + store i32 %tmp, i32 addrspace(1)* %tmp7, align 4 + br label %bb13 + +bb11: + %tmp12 = sdiv i32 %tmp3, 2 + br label %bb13 + +bb13: + %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ] + %tmp15 = add nsw i32 %tmp14, 1 + %tmp16 = icmp slt i32 %tmp14, 255 + br i1 %tmp16, label %bb2, label %bb1 +} + +; GCN-LABEL: {{^}}test_loop_with_if_else_break: +; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo +; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}} +; GCN: ; mask branch +; GCN: s_cbranch_execz +; GCN: BB{{.*}}: +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, exec_lo +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], exec +; GCN: s_cbranch_scc1 +; GCN: ; {{BB|%bb}}{{.*}}: +; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}} +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}] +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GCN: s_branch +; GCN: BB{{.*}}: +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: s_mov_b32 s{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}} +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GFX1064: s_mov_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}] +; GCN: s_cbranch_execnz +; GCN: BB{{.*}}: +define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp1 = icmp eq i32 %tmp, 0 + br i1 %tmp1, label %.loopexit, label %.preheader + +.preheader: + br label %bb2 + +bb2: + %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ] + %tmp4 = zext i32 %tmp3 to i64 + %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4 + %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4 + %tmp7 = icmp sgt i32 %tmp6, 10 + br i1 %tmp7, label %bb8, label %.loopexit + +bb8: + store i32 %tmp, i32 addrspace(1)* %tmp5, align 4 + %tmp9 = add nuw nsw i32 %tmp3, 1 + %tmp10 = icmp ult i32 %tmp9, 256 + %tmp11 = icmp ult i32 %tmp9, %tmp + %tmp12 = and i1 %tmp10, %tmp11 + br i1 %tmp12, label %bb2, label %.loopexit + +.loopexit: + ret void +} + +; GCN-LABEL: {{^}}test_addc_vop2b: +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}} +define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = add nsw i64 %tmp4, %arg1 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_subbrev_vop2b: +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} +; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}} +; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}} +define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = sub nsw i64 %tmp4, %arg1 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_subb_vop2b: +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} +; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}} +; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}} +define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp + %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8 + %tmp5 = sub nsw i64 %arg1, %tmp4 + store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_udiv64: +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo +; GFX1032: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_u32_e64 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo +; GFX1032: v_sub_co_u32_e64 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} +; GFX1032: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc_lo +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, [[SDST:s\[[0-9:]+\]]], v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_add_co_ci_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, [[SDST]] +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}} +; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}} +define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 { +bb: + %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1 + %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8 + %tmp2 = load i64, i64 addrspace(1)* %arg, align 8 + %tmp3 = udiv i64 %tmp1, %tmp2 + %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2 + store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_div_scale_f32: +; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 + + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 + + %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone + %result0 = extractvalue { float, i1 } %result, 0 + store float %result0, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_scale_f64: +; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid + %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 + + %a = load volatile double, double addrspace(1)* %gep.0, align 8 + %b = load volatile double, double addrspace(1)* %gep.1, align 8 + + %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone + %result0 = extractvalue { double, i1 } %result, 0 + store double %result0, double addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_mad_i64_i32: +; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = sext i32 %arg0 to i64 + %sext1 = sext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}test_mad_u64_u32: +; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}] +define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = zext i32 %arg0 to i64 + %sext1 = zext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}test_div_fmas_f32: +; GFX1032: v_cmp_eq_u32_e64 vcc_lo, +; GFX1064: v_cmp_eq_u32_e64 vcc, +; GCN: v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind { + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone + store float %result, float addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f64: +; GFX1032: v_cmp_eq_u32_e64 vcc_lo, +; GFX1064: v_cmp_eq_u32_e64 vcc, +; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}] +define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind { + %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone + store double %result, double addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: +; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}} +; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}} +; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}} +; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}} + +; GCN: load_dword [[LOAD:v[0-9]+]] +; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]] + +; GCN: BB{{[0-9_]+}}: +; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]] +; GFX1064: s_or_b64 exec, exec, [[SAVE]] +; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} +define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 { +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone + %gep.out = getelementptr float, float addrspace(1)* %out, i32 2 + %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid + %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1 + %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2 + + %a = load float, float addrspace(1)* %gep.a + %b = load float, float addrspace(1)* %gep.b + %c = load float, float addrspace(1)* %gep.c + + %cmp0 = icmp eq i32 %tid, 0 + br i1 %cmp0, label %bb, label %exit + +bb: + %val = load volatile i32, i32 addrspace(1)* %dummy + %cmp1 = icmp ne i32 %val, 0 + br label %exit + +exit: + %cond = phi i1 [false, %entry], [%cmp1, %bb] + %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone + store float %result, float addrspace(1)* %gep.out, align 4 + ret void +} + +; GCN-LABEL: {{^}}fdiv_f32: +; GFC1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} +; GFC1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} +; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN-NOT: vcc +; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 { +entry: + %fdiv = fdiv float %a, %b + store float %fdiv, float addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_br_cc_f16: +; GFX1032: v_cmp_nlt_f16_e32 vcc_lo, +; GFX1032-NEXT: s_and_b32 vcc_lo, exec_lo, vcc_lo +; GFX1064: v_cmp_nlt_f16_e32 vcc, +; GFX1064-NEXT: s_and_b64 vcc, exec, vcc{{$}} +; GCN-NEXT: s_cbranch_vccnz +define amdgpu_kernel void @test_br_cc_f16( + half addrspace(1)* %r, + half addrspace(1)* %a, + half addrspace(1)* %b) { +entry: + %a.val = load half, half addrspace(1)* %a + %b.val = load half, half addrspace(1)* %b + %fcmp = fcmp olt half %a.val, %b.val + br i1 %fcmp, label %one, label %two + +one: + store half %a.val, half addrspace(1)* %r + ret void + +two: + store half %b.val, half addrspace(1)* %r + ret void +} + +; GCN-LABEL: {{^}}test_brcc_i1: +; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0 +; GCN-NEXT: s_cbranch_scc1 +define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 { + %cmp0 = icmp ne i1 %val, 0 + br i1 %cmp0, label %store, label %end + +store: + store i32 222, i32 addrspace(1)* %out + ret void + +end: + ret void +} + +; GCN-LABEL: {{^}}test_preserve_condition_undef_flag: +; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 +; GFX1032: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0 +; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0 +; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}} +; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}} +; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]] +; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 +; GFX1064: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0 +; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0 +; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}] +; GFX1064: s_and_b64 vcc, exec, [[OR2]] +; GCN: s_cbranch_vccnz +define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 { +bb0: + %tmp = icmp sgt i32 %arg1, 4 + %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef) + %tmp4 = select i1 %undef, float %arg, float 1.000000e+00 + %tmp5 = fcmp ogt float %arg2, 0.000000e+00 + %tmp6 = fcmp olt float %arg2, 1.000000e+00 + %tmp7 = fcmp olt float %arg, %tmp4 + %tmp8 = and i1 %tmp5, %tmp6 + %tmp9 = and i1 %tmp8, %tmp7 + br i1 %tmp9, label %bb1, label %bb2 + +bb1: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb2 + +bb2: + ret void +} + +; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop: +; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1 +; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}} +; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1 +; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}] +define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 { +bb: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %tmp = sub i32 %id, %arg + br label %bb1 + +bb1: ; preds = %Flow, %bb + %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ] + %lsr.iv.next = add i32 %lsr.iv, 1 + %cmp0 = icmp slt i32 %lsr.iv.next, 0 + br i1 %cmp0, label %bb4, label %Flow + +bb4: ; preds = %bb1 + %load = load volatile i32, i32 addrspace(1)* undef, align 4 + %cmp1 = icmp sge i32 %tmp, %load + br label %Flow + +Flow: ; preds = %bb4, %bb1 + %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ] + %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ] + br i1 %tmp3, label %bb1, label %bb9 + +bb9: ; preds = %Flow + store volatile i32 7, i32 addrspace(3)* undef + ret void +} + +; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo +; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo +; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}} +; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo +; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc +; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}} +; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc +define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 { +entry: + %id = call i32 @llvm.amdgcn.workitem.id.x() #1 + %index = add i32 %id, -512 + %value = extractelement <4 x i32> , i32 %index + store i32 %value, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_set_inactive: +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42 +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1064: s_not_b64 exec, exec{{$}} +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42 +; GFX1064: s_not_b64 exec, exec{{$}} +define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 { + %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42) + store i32 %tmp, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_set_inactive_64: +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1032: s_not_b32 exec_lo, exec_lo +; GFX1064: s_not_b64 exec, exec{{$}} +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0 +; GFX1064: s_not_b64 exec, exec{{$}} +define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 { + %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0) + store i64 %tmp, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_kill_i1_terminator_float: +; GFX1032: s_mov_b32 exec_lo, 0 +; GFX1064: s_mov_b64 exec, 0 +define amdgpu_ps void @test_kill_i1_terminator_float() #0 { + call void @llvm.amdgcn.kill(i1 false) + ret void +} + +; GCN-LABEL: {{^}}test_kill_i1_terminator_i1: +; GFX1032: s_or_b32 [[OR:s[0-9]+]], +; GFX1032: s_and_b32 exec_lo, exec_lo, [[OR]] +; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]], +; GFX1064: s_and_b64 exec, exec, [[OR]] +define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 { + %c1 = icmp slt i32 %a, %b + %c2 = icmp slt i32 %c, %d + %x = or i1 %c1, %c2 + call void @llvm.amdgcn.kill(i1 %x) + ret void +} + +; GCN-LABEL: {{^}}test_loop_vcc: +; GFX1032: v_cmp_lt_f32_e32 vcc_lo, +; GFX1064: v_cmp_lt_f32_e32 vcc, +; GCN: s_cbranch_vccz +define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 { +entry: + br label %loop + +loop: + %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ] + %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ] + %cc = fcmp ogt float %ctr.iv, 7.0 + br i1 %cc, label %break, label %body + +body: + %c.iv0 = extractelement <4 x float> %c.iv, i32 0 + %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0) + %ctr.next = fadd float %ctr.iv, 2.0 + br label %loop + +break: + ret <4 x float> %c.iv +} + +; GCN-LABEL: {{^}}test_wwm1: +; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1 +; GFX1032: s_mov_b32 exec_lo, [[SAVE]] +; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1 +; GFX1064: s_mov_b64 exec, [[SAVE]] +define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) { +main_body: + %out = fadd float %src0, %src1 + %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) + ret float %out.0 +} + +; GCN-LABEL: {{^}}test_wwm2: +; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 32, v{{[0-9]+}} +; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo +; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1 +; GFX1032: s_mov_b32 exec_lo, [[SAVE2]] +; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]] +; GFX1064: v_cmp_gt_u32_e32 vcc, 32, v{{[0-9]+}} +; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}} +; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1 +; GFX1064: s_mov_b64 exec, [[SAVE2]] +; GFX1064: s_or_b64 exec, exec, [[SAVE1]] +define amdgpu_ps float @test_wwm2(i32 inreg %idx) { +main_body: + ; use mbcnt to make sure the branch is divergent + %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) + %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) + %cc = icmp uge i32 %hi, 32 + br i1 %cc, label %endif, label %if + +if: + %src = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0) + %out = fadd float %src, %src + %out.0 = call float @llvm.amdgcn.wwm.f32(float %out) + %out.1 = fadd float %src, %out.0 + br label %endif + +endif: + %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ] + ret float %out.2 +} + +; GCN-LABEL: {{^}}test_wqm1: +; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo +; GFX1032: s_wqm_b32 exec_lo, exec_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]] +; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}} +; GFX1064: s_wqm_b64 exec, exec{{$}} +; GFX1064: s_and_b64 exec, exec, [[ORIG]] +define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 { +main_body: + %inst23 = extractelement <2 x float> %pos, i32 0 + %inst24 = extractelement <2 x float> %pos, i32 1 + %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0) + %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0) + %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0) + %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0) + %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0) + ret <4 x float> %tex +} + +; GCN-LABEL: {{^}}test_wqm2: +; GFX1032: s_wqm_b32 exec_lo, exec_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9+]}} +; GFX1064: s_wqm_b64 exec, exec{{$}} +; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}] +define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 { +main_body: + %src0 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i1 0, i1 0) + %src1 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i1 0, i1 0) + %out = fadd float %src0, %src1 + %out.0 = bitcast float %out to i32 + %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0) + %out.2 = bitcast i32 %out.1 to float + ret float %out.2 +} + +; GCN-LABEL: {{^}}test_intr_fcmp_i64: +; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} +; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]], +define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) { + %temp = call float @llvm.fabs.f32(float %a) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_icmp_i64: +; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}} +; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}} +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]] +; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}} +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]] +; GCN: store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[V_LO]]:[[V_HI]]], +define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) { + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32) + store i64 %result, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_fcmp_i32: +; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GFX1064: v_cmp_eq_f32_e64 s{{\[}}[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}| +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]] +; GCN: store_dword v[{{[0-9:]+}}], v[[V_LO]], +define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) { + %temp = call float @llvm.fabs.f32(float %a) + %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1) + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_intr_icmp_i32: +; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}} +; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} +; GFX1064: v_cmp_eq_u32_e64 s{{\[}}[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}} +; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}} +; GCN: store_dword v[{{[0-9:]+}}], v[[V_LO]], +define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) { + %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32) + store i32 %result, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_wqm_vote: +; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0 +; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo +; GFX1032: s_and_b32 exec_lo, exec_lo, [[WQM]] +; GFX1064: v_cmp_neq_f32_e32 vcc, 0 +; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc{{$}} +; GFX1064: s_and_b64 exec, exec, [[WQM]] +define amdgpu_ps void @test_wqm_vote(float %a) { + %c1 = fcmp une float %a, 0.0 + %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1) + call void @llvm.amdgcn.kill(i1 %c2) + ret void +} + +; GCN-LABEL: {{^}}test_branch_true: +; GFX1032: s_and_b32 vcc_lo, exec_lo, -1 +; GFX1064: s_and_b64 vcc, exec, -1 +define amdgpu_kernel void @test_branch_true() #2 { +entry: + br i1 true, label %for.end, label %for.body.lr.ph + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body, %for.body.lr.ph + br i1 undef, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +; GCN-LABEL: {{^}}test_ps_live: +; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo +; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}} +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]] +define amdgpu_ps float @test_ps_live() #0 { + %live = call i1 @llvm.amdgcn.ps.live() + %live.32 = zext i1 %live to i32 + %r = bitcast i32 %live.32 to float + ret float %r +} + +; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64: +; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0 +; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]] +; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0 +; GFX1064: s_and_b64 vcc, exec, [[C]] +define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 { +entry: + %v = load double, double addrspace(1)* %in + %cc = fcmp oeq double %v, 1.000000e+00 + br i1 %cc, label %if, label %endif + +if: + %u = fadd double %v, %v + br label %endif + +endif: + %r = phi double [ %v, %entry ], [ %u, %if ] + store double %r, double addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}test_init_exec: +; GFX1032: s_mov_b32 exec_lo, 0x12345 +; GFX1064: s_mov_b64 exec, 0x12345 +; GCN: v_add_f32_e32 v0, +define amdgpu_ps float @test_init_exec(float %a, float %b) { +main_body: + %s = fadd float %a, %b + call void @llvm.amdgcn.init.exec(i64 74565) + ret float %s +} + +; GCN-LABEL: {{^}}test_init_exec_from_input: +; GCN: s_bfe_u32 s0, s3, 0x70008 +; GFX1032: s_bfm_b32 exec_lo, s0, 0 +; GFX1032: s_cmp_eq_u32 s0, 32 +; GFX1032: s_cmov_b32 exec_lo, -1 +; GFX1064: s_bfm_b64 exec, s0, 0 +; GFX1064: s_cmp_eq_u32 s0, 64 +; GFX1064: s_cmov_b64 exec, -1 +; GCN: v_add_f32_e32 v0, +define amdgpu_ps float @test_init_exec_from_input(i32 inreg, i32 inreg, i32 inreg, i32 inreg %count, float %a, float %b) { +main_body: + %s = fadd float %a, %b + call void @llvm.amdgcn.init.exec.from.input(i32 %count, i32 8) + ret float %s +} + +; GCN-LABEL: {{^}}test_vgprblocks_w32_attr: +; Test that the wave size can be overridden in function attributes and that the block size is correct as a result +; GFX10DEFWAVE: ; VGPRBlocks: 1 +define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 { +main_body: + %s = fadd float %a, %b + %s.1 = fadd float %s, %c + %s.2 = fadd float %s.1, %d + %s.3 = fadd float %s.2, %e + %s.4 = fadd float %s.3, %f + %s.5 = fadd float %s.4, %g + %s.6 = fadd float %s.5, %h + %s.7 = fadd float %s.6, %i + %s.8 = fadd float %s.7, %j + %s.9 = fadd float %s.8, %k + %s.10 = fadd float %s.9, %l + ret float %s.10 +} + +; GCN-LABEL: {{^}}test_vgprblocks_w64_attr: +; Test that the wave size can be overridden in function attributes and that the block size is correct as a result +; GFX10DEFWAVE: ; VGPRBlocks: 2 +define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 { +main_body: + %s = fadd float %a, %b + %s.1 = fadd float %s, %c + %s.2 = fadd float %s.1, %d + %s.3 = fadd float %s.2, %e + %s.4 = fadd float %s.3, %f + %s.5 = fadd float %s.4, %g + %s.6 = fadd float %s.5, %h + %s.7 = fadd float %s.6, %i + %s.8 = fadd float %s.7, %j + %s.9 = fadd float %s.8, %k + %s.10 = fadd float %s.9, %l + ret float %s.10 +} + +; GCN-LABEL: {{^}}icmp64: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v +define amdgpu_kernel void @icmp64(i32 %n, i32 %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %mul4 = mul nsw i32 %s, %n + %cmp = icmp slt i32 0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem = urem i32 %id, %s + %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32) + %shr = lshr i64 %icmp, 1 + %notmask = shl nsw i64 -1, 0 + %and = and i64 %notmask, %shr + %or = or i64 %and, -9223372036854775808 + %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) + %cast = trunc i64 %cttz to i32 + %cmp3 = icmp ugt i32 10, %cast + %cmp6 = icmp ne i32 %rem, 0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}fcmp64: +; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v +define amdgpu_kernel void @fcmp64(float %n, float %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %id.f = uitofp i32 %id to float + %mul4 = fmul float %s, %n + %cmp = fcmp ult float 0.0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem.f = frem float %id.f, %s + %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1) + %shr = lshr i64 %fcmp, 1 + %notmask = shl nsw i64 -1, 0 + %and = and i64 %notmask, %shr + %or = or i64 %and, -9223372036854775808 + %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true) + %cast = trunc i64 %cttz to i32 + %cmp3 = icmp ugt i32 10, %cast + %cmp6 = fcmp one float %rem.f, 0.0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}icmp32: +; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v +define amdgpu_kernel void @icmp32(i32 %n, i32 %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %mul4 = mul nsw i32 %s, %n + %cmp = icmp slt i32 0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem = urem i32 %id, %s + %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32) + %shr = lshr i32 %icmp, 1 + %notmask = shl nsw i32 -1, 0 + %and = and i32 %notmask, %shr + %or = or i32 %and, 2147483648 + %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) + %cmp3 = icmp ugt i32 10, %cttz + %cmp6 = icmp ne i32 %rem, 0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +; GCN-LABEL: {{^}}fcmp32: +; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v +; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v +define amdgpu_kernel void @fcmp32(float %n, float %s) { +entry: + %id = tail call i32 @llvm.amdgcn.workitem.id.x() + %id.f = uitofp i32 %id to float + %mul4 = fmul float %s, %n + %cmp = fcmp ult float 0.0, %mul4 + br label %if.end + +if.end: ; preds = %entry + %rem.f = frem float %id.f, %s + %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1) + %shr = lshr i32 %fcmp, 1 + %notmask = shl nsw i32 -1, 0 + %and = and i32 %notmask, %shr + %or = or i32 %and, 2147483648 + %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true) + %cmp3 = icmp ugt i32 10, %cttz + %cmp6 = fcmp one float %rem.f, 0.0 + %brmerge = or i1 %cmp6, %cmp3 + br i1 %brmerge, label %if.end2, label %if.then + +if.then: ; preds = %if.end + unreachable + +if.end2: ; preds = %if.end + ret void +} + +declare void @external_void_func_void() #1 + +; Test save/restore of VGPR needed for SGPR spilling. + +; GCN-LABEL: {{^}}callee_no_stack_with_call: +; GCN: s_waitcnt +; GCN: s_mov_b32 s5, s32 +; GFX1064: s_add_u32 s32, s32, 0x400 +; GFX1032: s_add_u32 s32, s32, 0x200 + +; GFX1064: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}} +; GFX1032: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]]], -1{{$}} + +; GCN-NEXT: buffer_store_dword v32, off, s[0:3], s5 ; 4-byte Folded Spill + +; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]] +; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]] + +; GCN-DAG: v_writelane_b32 v32, s33, 0 +; GCN-DAG: v_writelane_b32 v32, s34, 1 +; GCN-DAG: s_mov_b32 s33, s5 +; GCN: s_swappc_b64 +; GCN-DAG: s_mov_b32 s5, s33 +; GCN-DAG: v_readlane_b32 s34, v32, 1 +; GCN-DAG: v_readlane_b32 s33, v32, 0 + +; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}} +; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}} +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s5 ; 4-byte Folded Reload +; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]] +; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]] + +; GFX1064: s_sub_u32 s32, s32, 0x400 +; GFX1032: s_sub_u32 s32, s32, 0x200 +; GCN: s_setpc_b64 +define void @callee_no_stack_with_call() #1 { + call void @external_void_func_void() + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x() +declare float @llvm.fabs.f32(float) +declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1) +declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1) +declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1) +declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1) +declare i1 @llvm.amdgcn.class.f32(float, i32) +declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32) +declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64) +declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32) +declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) +declare float @llvm.amdgcn.wwm.f32(float) +declare i32 @llvm.amdgcn.wqm.i32(i32) +declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) +declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) +declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) +declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) +declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) +declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32) +declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32) +declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32) +declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32) +declare void @llvm.amdgcn.kill(i1) +declare i1 @llvm.amdgcn.wqm.vote(i1) +declare i1 @llvm.amdgcn.ps.live() +declare void @llvm.amdgcn.init.exec(i64) +declare void @llvm.amdgcn.init.exec.from.input(i32, i32) +declare i64 @llvm.cttz.i64(i64, i1) +declare i32 @llvm.cttz.i32(i32, i1) + +attributes #0 = { nounwind readnone speculatable } +attributes #1 = { nounwind } +attributes #2 = { nounwind readnone optnone noinline } +attributes #3 = { "target-features"="+wavefrontsize32" } +attributes #4 = { "target-features"="+wavefrontsize64" } Index: test/CodeGen/AMDGPU/xor3.ll =================================================================== --- test/CodeGen/AMDGPU/xor3.ll +++ test/CodeGen/AMDGPU/xor3.ll @@ -16,6 +16,7 @@ ; GFX10-LABEL: xor3: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -33,6 +34,7 @@ ; GFX10-LABEL: xor3_vgpr_b: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, s2, v0, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -50,6 +52,7 @@ ; GFX10-LABEL: xor3_vgpr_all2: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v1, v2, v0 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %b, %c %result = xor i32 %a, %x @@ -67,6 +70,7 @@ ; GFX10-LABEL: xor3_vgpr_bc: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, s2, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, %c @@ -84,6 +88,7 @@ ; GFX10-LABEL: xor3_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, 16 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = xor i32 %x, 16 @@ -102,6 +107,7 @@ ; GFX10-LABEL: xor3_multiuse_outer: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor3_b32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_mul_lo_u32 v1, v0, v3 ; GFX10-NEXT: ; return to shader part epilog %inner = xor i32 %a, %b @@ -123,6 +129,7 @@ ; GFX10-LABEL: xor3_multiuse_inner: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xor_b32_e32 v0, v0, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_xor_b32_e32 v1, v0, v2 ; GFX10-NEXT: ; return to shader part epilog %inner = xor i32 %a, %b @@ -151,6 +158,7 @@ ; GFX10-NEXT: v_add_f32_e64 v1, s3, 2.0 ; GFX10-NEXT: v_add_f32_e64 v2, s2, 1.0 ; GFX10-NEXT: v_add_f32_e64 v0, 0x40400000, s4 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: v_xor_b32_e32 v1, v2, v1 ; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0 ; GFX10-NEXT: ; return to shader part epilog Index: test/CodeGen/AMDGPU/xor_add.ll =================================================================== --- test/CodeGen/AMDGPU/xor_add.ll +++ test/CodeGen/AMDGPU/xor_add.ll @@ -22,6 +22,7 @@ ; GFX10-LABEL: xor_add: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -46,6 +47,7 @@ ; GFX10-LABEL: xor_add_vgpr_a: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, s2, s3 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -68,6 +70,7 @@ ; GFX10-LABEL: xor_add_vgpr_all: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, v2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -90,6 +93,7 @@ ; GFX10-LABEL: xor_add_vgpr_ab: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, v1, s2 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, %b %result = add i32 %x, %c @@ -112,6 +116,7 @@ ; GFX10-LABEL: xor_add_vgpr_const: ; GFX10: ; %bb.0: ; GFX10-NEXT: v_xad_u32 v0, v0, 3, v1 +; GFX10-NEXT: ; implicit-def: $vcc_hi ; GFX10-NEXT: ; return to shader part epilog %x = xor i32 %a, 3 %result = add i32 %x, %b Index: test/MC/AMDGPU/gfx10-constant-bus.s =================================================================== --- test/MC/AMDGPU/gfx10-constant-bus.s +++ test/MC/AMDGPU/gfx10-constant-bus.s @@ -33,3 +33,13 @@ v_div_fmas_f64 v[5:6], v[1:2], s[2:3], 0x123456 // GFX10-ERR: error: invalid operand (violates constant bus restrictions) + +//----------------------------------------------------------------------------------------- +// v_mad_u64_u32 has operands of different sizes. +// When these operands are literals, they are counted as 2 scalar values even if literals are identical. + +v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678 +// GFX10: v_mad_u64_u32 v[5:6], s12, v1, 0x12345678, 0x12345678 ; encoding: [0x05,0x0c,0x76,0xd5,0x01,0xff,0xfd,0x03,0x78,0x56,0x34,0x12] + +v_mad_u64_u32 v[5:6], s12, s1, 0x12345678, 0x12345678 +// GFX10-ERR: error: invalid operand (violates constant bus restrictions) Index: test/MC/AMDGPU/wave32.s =================================================================== --- /dev/null +++ test/MC/AMDGPU/wave32.s @@ -0,0 +1,412 @@ +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1032 %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck -check-prefix=GFX1064 %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1032-ERR %s +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s 2>&1 | FileCheck -check-prefix=GFX1064-ERR %s + +v_cmp_ge_i32_e32 s0, v0 +// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] +// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] + +v_cmp_ge_i32_e32 vcc_lo, s0, v1 +// GFX1032: v_cmp_ge_i32_e32 vcc_lo, s0, v1 ; encoding: [0x00,0x02,0x0c,0x7d] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cmp_ge_i32_e32 vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_ge_i32_e32 vcc, s0, v2 ; encoding: [0x00,0x04,0x0c,0x7d] + +v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] + +v_cmp_class_f32_e32 vcc_lo, s0, v0 +// GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cmp_class_f32_e32 vcc, s0, v0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_class_f32_e32 vcc, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] + +// TODO-GFX10: The following encoding does not match SP3's encoding, which is: +// [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06] +v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +// TODO-GFX10: The following encoding does not match SP3's encoding, which is: +// [0xf9,0x04,0x1e,0x7d,0x01,0x06,0x06,0x06] +v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06] + +v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06] +// GFX1064-ERR: error: invalid operand for instruction + +v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06] + +v_cndmask_b32_e32 v1, v2, v3, +// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02] + +v_cndmask_b32_e32 v1, v2, v3, vcc_lo +// GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_cndmask_b32_e32 v1, v2, v3, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; encoding: [0x02,0x07,0x02,0x02] + +v_add_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50] + +v_add_co_ci_u32_e32 v3, v3, v4 +// GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50] +// GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x50] + +v_sub_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_u32_e32 v2, vcc_lo, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_u32_e32 v2, vcc, s0, v2 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52] + +v_sub_co_ci_u32_e32 v3, v3, v4 +// GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x52] +// GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; encoding: [0x03,0x09,0x06,0x52] + +v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54] + +v_subrev_co_ci_u32_e32 v1, 0, v1 +// GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; encoding: [0x80,0x02,0x02,0x54] +// GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; encoding: [0x80,0x02,0x02,0x54] + +v_add_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_add_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_add_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] + +v_add_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06] + +v_sub_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_sub_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_sub_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_sdwa v0, vcc_lo, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: invalid operand +// GFX1064-ERR: error: invalid operand + +v_subrev_co_u32_sdwa v0, vcc, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: instruction not supported +// GFX1064-ERR: error: instruction not supported + +v_subrev_co_u32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] + +v_sub_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] +// GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06] + +v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] + +v_subrev_co_ci_u32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] +// GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06] + +v_add_co_ci_u32 v1, sext(v1), sext(v4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] + +v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; encoding: [0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e] + +v_add_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_add_co_ci_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] +// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] + +v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00] + +v_sub_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00] + +v_subrev_co_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_dpp v5, vcc_lo, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_u32_dpp v5, vcc, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: not a valid operand +// GFX1064-ERR: error: not a valid operand + +v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00] +// GFX1064-ERR: error: instruction not supported on this GPU + +v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +// GFX1032-ERR: error: instruction not supported on this GPU +// GFX1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00] + +v_add_co_u32 v0, s0, v0, v2 +// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_u32 v0, s0, v0, v2 +// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_u32 v0, s0, v0, v2 +// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_u32_e64 v0, s0, v0, v2 +// GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 +// GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] + +v_add_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] + +v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] + +v_sub_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] + +v_sub_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] + +v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] + +v_subrev_co_u32 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] + +v_subrev_co_u32_e64 v0, s[0:1], v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] + +v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] + +v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 +// GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] ; encoding: [0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00] + +v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo +// GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01] +// GFX1064-ERR: error: invalid operand for instruction + +v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01] + +v_div_scale_f32 v2, s2, v0, v0, v2 +// GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_div_scale_f32 v2, s[2:3], v0, v0, v2 +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] + +v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] +// GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04] + +v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] +// GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04] + +v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] +// GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04] +// GFX1064-ERR: error: invalid operand for instruction + +v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] +// GFX1032-ERR: error: invalid operand for instruction +// GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04] + +v_cmpx_neq_f32_e32 v0, v1 +// GFX1032: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] +// GFX1064: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] + +v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06] +// GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06] + +v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86] +// GFX1064: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86] + +v_cmpx_class_f32_e64 v0, 1 +// GFX1032: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] +// GFX1064: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] + +v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +// GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86] +// GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86] Index: test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt =================================================================== --- test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt +++ test/MC/Disassembler/AMDGPU/gfx10-sgpr-max.txt @@ -8,3 +8,9 @@ # GFX10: s_mov_b32 s105, s104 ; encoding: [0x68,0x03,0xe9,0xbe] 0x68,0x03,0xe9,0xbe + +# GFX10: v_cmp_eq_f32_e64 s105, v0, s105 +0x69,0x00,0x02,0xd4,0x00,0xd3,0x00,0x00 + +# GFX10: v_cmp_eq_f32_sdwa s105, v0, s105 src0_sel:DWORD src1_sel:DWORD +0xf9,0xd2,0x04,0x7c,0x00,0xe9,0x06,0x86 Index: test/MC/Disassembler/AMDGPU/wave32.txt =================================================================== --- /dev/null +++ test/MC/Disassembler/AMDGPU/wave32.txt @@ -0,0 +1,164 @@ +# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1032 %s +# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64,-wavefrontsize32 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX1064 %s + +# GFX1032: v_cmp_lt_f32_e32 vcc_lo, s2, v4 +# GFX1064: v_cmp_lt_f32_e32 vcc, s2, v4 +0x02,0x08,0x02,0x7c + +# GFX1032: v_cmp_ge_i32_e64 s2, s0, v2 +# GFX1064: v_cmp_ge_i32_e64 s[2:3], s0, v2 +0x02,0x00,0x86,0xd4,0x00,0x04,0x02,0x00 + +# GFX1032: v_cmp_ge_i32_sdwa vcc_lo, v0, v2 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmp_ge_i32_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x04,0x0c,0x7d,0x00,0x00,0x05,0x06 + +# GFX1032: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06 + +# GFX1032: v_cmp_class_f32_e32 vcc_lo, s0, v0 +# GFX1064: v_cmp_class_f32_e32 vcc, s0, v0 +0x00,0x00,0x10,0x7d + +# GFX1032: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD +# GFX1064: v_cmp_class_f16_sdwa vcc, v1, v2 src0_sel:DWORD src1_sel:DWORD +0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06 + +# GFX1032: v_cmp_class_f16_sdwa s0, v1, v2 src0_sel:DWORD src1_sel:DWORD +# GFX1064: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD +0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06 + +# GFX1032: v_cndmask_b32_e32 v5, 0, v2, vcc_lo +# GFX1064: v_cndmask_b32_e32 v5, 0, v2, vcc ; +0x80,0x04,0x0a,0x02 + +# GFX1032: v_cndmask_b32_e32 v1, v2, v3, vcc_lo +# GFX1064: v_cndmask_b32_e32 v1, v2, v3, vcc ; +0x02,0x07,0x02,0x02 + +# GFX1032: v_add_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_add_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x0f,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +# GFX1064: v_add_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; +0x03,0x09,0x06,0x50 + +# GFX1032: v_sub_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_sub_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x10,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_subrev_co_u32_e64 v2, vcc_lo, s0, v2 +# GFX1064: v_subrev_co_u32_e64 v2, vcc, s0, v2 +0x02,0x6a,0x19,0xd7,0x00,0x04,0x02,0x00 + +# GFX1032: v_sub_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo +# GFX1064: v_sub_co_ci_u32_e32 v3, vcc, v3, v4, vcc ; +0x03,0x09,0x06,0x52 + +# GFX1032: v_subrev_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +# GFX1064: v_subrev_co_ci_u32_e32 v1, vcc, 0, v1, vcc ; +0x80,0x02,0x02,0x54 + +# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x50,0x01,0x06,0x00,0x06 + +# GFX1032: v_sub_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_sub_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x52,0x01,0x06,0x00,0x06 + +# GFX1032: v_subrev_co_ci_u32_sdwa v1, vcc_lo, v1, v4, vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_subrev_co_ci_u32_sdwa v1, vcc, v1, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x54,0x01,0x06,0x00,0x06 + +# GFX1032: v_add_co_ci_u32_sdwa v1, vcc_lo, sext(v1), sext(v4), vcc_lo dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +# GFX1064: v_add_co_ci_u32_sdwa v1, vcc, sext(v1), sext(v4), vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +0xf9,0x08,0x02,0x50,0x01,0x06,0x08,0x0e + +# GFX1032: v_add_nc_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# GFX1064: v_add_nc_u32_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +0xfa,0x04,0x0a,0x4a,0x01,0xe4,0x00,0x00 + +# FIXME: Results in invalid v_subrev_u16_dpp which apparently has the same encoding but does not exist in GFX10 + +# gfx1032: v_add_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_add_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x50,0x01,0xe4,0x00,0x00 + +# FIXME: Results in v_mul_lo_u16_dpp + +# gfx1032: v_sub_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_sub_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x52,0x01,0xe4,0x00,0x00 + +# FIXME: gives v_lshlrev_b16_dpp + +# gfx1032: v_subrev_co_ci_u32_dpp v5, vcc_lo, v1, v2, vcc_lo quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# gfx1064: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 +# 0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00 + +# GFX1032: v_add_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_add_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_sub_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_sub_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_subrev_co_u32_e64 v0, s0, v0, v2 +# GFX1064: v_subrev_co_u32_e64 v0, s[0:1], v0, v2 +0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00 + +# GFX1032: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 +# GFX1064: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] +0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, vcc_lo, v1, v5, s2 +# GFX1064: v_add_co_ci_u32_e64 v4, vcc, v1, v5, s[2:3] +0x04,0x6a,0x28,0xd5,0x01,0x0b,0x0a,0x00 + +# GFX1032: v_add_co_ci_u32_e64 v4, s0, v1, v5, vcc_lo +# GFX1064: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; +0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01 + +# GFX1032: v_div_scale_f32 v2, s2, v0, v0, v2 +# GFX1064: v_div_scale_f32 v2, s[2:3], v0, v0, v2 +0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04 + +# GFX1032: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] +# GFX1064: v_div_scale_f64 v[2:3], s[2:3], v[0:1], v[0:1], v[2:3] +0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04 + +# GFX1032: v_mad_i64_i32 v[0:1], s6, v0, v1, v[2:3] +# GFX1064: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] +0x00,0x06,0x77,0xd5,0x00,0x03,0x0a,0x04 + +# GFX1032: v_mad_u64_u32 v[0:1], s6, v0, v1, v[2:3] +# GFX1064: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] +0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04 + +# GFX1032: v_cmpx_neq_f32_e32 v0, v1 +# GFX1064: v_cmpx_neq_f32_e32 v0, v1 +0x00,0x03,0x3a,0x7c + +# GFX1032: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06 + +# GFX1032: v_cmpx_class_f32_e64 v0, 1 +# GFX1064: v_cmpx_class_f32_e64 v0, 1 +0x00,0x00,0x98,0xd4,0x00,0x03,0x01,0x00 + +# GFX1032: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +# GFX1064: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD +0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86 Index: test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll =================================================================== --- test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll +++ test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll @@ -87,7 +87,6 @@ declare float @llvm.amdgcn.frexp.mant.f32(float) nounwind readnone declare double @llvm.amdgcn.frexp.mant.f64(double) nounwind readnone - define float @test_constant_fold_frexp_mant_f32_undef() nounwind { ; CHECK-LABEL: @test_constant_fold_frexp_mant_f32_undef( ; CHECK-NEXT: ret float undef @@ -248,7 +247,6 @@ ret double %val } - ; -------------------------------------------------------------------- ; llvm.amdgcn.frexp.exp ; -------------------------------------------------------------------- @@ -886,7 +884,6 @@ ret <2 x half> %cvt } -; Test constant values where rtz changes result define <2 x half> @constant_rtz_pkrtz() { ; CHECK-LABEL: @constant_rtz_pkrtz( ; CHECK-NEXT: ret <2 x half> @@ -1273,11 +1270,7 @@ declare void @llvm.amdgcn.exp.f32(i32 immarg, i32 immarg, float, float, float, float, i1 immarg, i1 immarg) nounwind inaccessiblememonly - - - define void @exp_disabled_inputs_to_undef(float %x, float %y, float %z, float %w) { - ; enable src0..src3 constants ; CHECK-LABEL: @exp_disabled_inputs_to_undef( ; CHECK-NEXT: call void @llvm.amdgcn.exp.f32(i32 0, i32 1, float 1.000000e+00, float undef, float undef, float undef, i1 true, i1 false) ; CHECK-NEXT: call void @llvm.amdgcn.exp.f32(i32 0, i32 2, float undef, float 2.000000e+00, float undef, float undef, i1 true, i1 false) @@ -1323,8 +1316,6 @@ declare void @llvm.amdgcn.exp.compr.v2f16(i32 immarg, i32 immarg, <2 x half>, <2 x half>, i1 immarg, i1 immarg) nounwind inaccessiblememonly - - define void @exp_compr_disabled_inputs_to_undef(<2 x half> %xy, <2 x half> %zw) { ; CHECK-LABEL: @exp_compr_disabled_inputs_to_undef( ; CHECK-NEXT: call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 0, <2 x half> undef, <2 x half> undef, i1 true, i1 false) @@ -1495,7 +1486,6 @@ ret float %med3 } -; This can return any of the qnans. define float @fmed3_qnan0_qnan1_qnan2_f32(float %x, float %y) { ; CHECK-LABEL: @fmed3_qnan0_qnan1_qnan2_f32( ; CHECK-NEXT: ret float 0x7FF8030000000000 @@ -1628,19 +1618,19 @@ ; llvm.amdgcn.icmp ; -------------------------------------------------------------------- -declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32 immarg) nounwind readnone convergent -declare i64 @llvm.amdgcn.icmp.i64(i64, i64, i32 immarg) nounwind readnone convergent -declare i64 @llvm.amdgcn.icmp.i1(i1, i1, i32 immarg) nounwind readnone convergent +declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32 immarg) nounwind readnone convergent +declare i64 @llvm.amdgcn.icmp.i64.i64(i64, i64, i32 immarg) nounwind readnone convergent +declare i64 @llvm.amdgcn.icmp.i64.i1(i1, i1, i32 immarg) nounwind readnone convergent define i64 @invalid_icmp_code(i32 %a, i32 %b) { ; CHECK-LABEL: @invalid_icmp_code( -; CHECK-NEXT: [[UNDER:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 31) -; CHECK-NEXT: [[OVER:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A]], i32 [[B]], i32 42) +; CHECK-NEXT: [[UNDER:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 31) +; CHECK-NEXT: [[OVER:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A]], i32 [[B]], i32 42) ; CHECK-NEXT: [[OR:%.*]] = or i64 [[UNDER]], [[OVER]] ; CHECK-NEXT: ret i64 [[OR]] ; - %under = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 31) - %over = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 42) + %under = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 31) + %over = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 42) %or = or i64 %under, %over ret i64 %or } @@ -1649,7 +1639,7 @@ ; CHECK-LABEL: @icmp_constant_inputs_false( ; CHECK-NEXT: ret i64 0 ; - %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 8, i32 32) + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 9, i32 8, i32 32) ret i64 %result } @@ -1658,284 +1648,283 @@ ; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata !0) #5 ; CHECK-NEXT: ret i64 [[RESULT]] ; - %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 8, i32 34) + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 9, i32 8, i32 34) ret i64 %result } define i64 @icmp_constant_to_rhs_slt(i32 %x) { ; CHECK-LABEL: @icmp_constant_to_rhs_slt( -; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[X:%.*]], i32 9, i32 38) +; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[X:%.*]], i32 9, i32 38) ; CHECK-NEXT: ret i64 [[RESULT]] ; - %result = call i64 @llvm.amdgcn.icmp.i32(i32 9, i32 %x, i32 40) + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 9, i32 %x, i32 40) ret i64 %result } define i64 @fold_icmp_ne_0_zext_icmp_eq_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_ne_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ne_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ne i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_sle_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_sle_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 41) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 41) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp sle i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_ugt_i64(i64 %a, i64 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ugt_i64( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64(i64 [[A:%.*]], i64 [[B:%.*]], i32 34) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]], i32 34) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ugt i64 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_ult_swap_i64(i64 %a, i64 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ult_swap_i64( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64(i64 [[A:%.*]], i64 [[B:%.*]], i32 34) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]], i32 34) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ugt i64 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 0, i32 %zext.cmp, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 0, i32 %zext.cmp, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_fcmp_oeq_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_oeq_f32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 1) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 1) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq float %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_fcmp_une_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_une_f32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 14) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 14) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp une float %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_fcmp_olt_f64(double %a, double %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_olt_f64( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f64(double [[A:%.*]], double [[B:%.*]], i32 4) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f64(double [[A:%.*]], double [[B:%.*]], i32 4) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp olt double %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_sext_icmp_ne_0_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_sext_icmp_ne_0_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %sext.cmp = sext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_eq_0_zext_icmp_eq_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_eq_0_zext_icmp_eq_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 32) ret i64 %mask } define i64 @fold_icmp_eq_0_zext_icmp_slt_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_eq_0_zext_icmp_slt_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 39) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 39) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 32) ret i64 %mask } define i64 @fold_icmp_eq_0_zext_fcmp_oeq_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_oeq_f32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 14) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 14) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq float %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 32) ret i64 %mask } define i64 @fold_icmp_eq_0_zext_fcmp_ule_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_ule_f32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 2) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 2) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp ule float %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 32) ret i64 %mask } define i64 @fold_icmp_eq_0_zext_fcmp_ogt_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_eq_0_zext_fcmp_ogt_f32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 13) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 13) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp ogt float %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 32) ret i64 %mask } define i64 @fold_icmp_zext_icmp_eq_1_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_zext_icmp_eq_1_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 1, i32 32) ret i64 %mask } define i64 @fold_icmp_zext_argi1_eq_1_i32(i1 %cond) { ; CHECK-LABEL: @fold_icmp_zext_argi1_eq_1_i32( ; CHECK-NEXT: [[ZEXT_COND:%.*]] = zext i1 [[COND:%.*]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[ZEXT_COND]], i32 0, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[ZEXT_COND]], i32 0, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %zext.cond = zext i1 %cond to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cond, i32 1, i32 32) ret i64 %mask } define i64 @fold_icmp_zext_argi1_eq_neg1_i32(i1 %cond) { ; CHECK-LABEL: @fold_icmp_zext_argi1_eq_neg1_i32( ; CHECK-NEXT: [[ZEXT_COND:%.*]] = zext i1 [[COND:%.*]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[ZEXT_COND]], i32 -1, i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[ZEXT_COND]], i32 -1, i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %zext.cond = zext i1 %cond to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cond, i32 -1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cond, i32 -1, i32 32) ret i64 %mask } define i64 @fold_icmp_sext_argi1_eq_1_i32(i1 %cond) { ; CHECK-LABEL: @fold_icmp_sext_argi1_eq_1_i32( ; CHECK-NEXT: [[SEXT_COND:%.*]] = sext i1 [[COND:%.*]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[SEXT_COND]], i32 1, i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[SEXT_COND]], i32 1, i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %sext.cond = sext i1 %cond to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cond, i32 1, i32 32) ret i64 %mask } define i64 @fold_icmp_sext_argi1_eq_neg1_i32(i1 %cond) { ; CHECK-LABEL: @fold_icmp_sext_argi1_eq_neg1_i32( ; CHECK-NEXT: [[SEXT_COND:%.*]] = sext i1 [[COND:%.*]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[SEXT_COND]], i32 0, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[SEXT_COND]], i32 0, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %sext.cond = sext i1 %cond to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cond, i32 -1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cond, i32 -1, i32 32) ret i64 %mask } define i64 @fold_icmp_sext_argi1_eq_neg1_i64(i1 %cond) { ; CHECK-LABEL: @fold_icmp_sext_argi1_eq_neg1_i64( ; CHECK-NEXT: [[SEXT_COND:%.*]] = sext i1 [[COND:%.*]] to i64 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64(i64 [[SEXT_COND]], i64 0, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[SEXT_COND]], i64 0, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %sext.cond = sext i1 %cond to i64 - %mask = call i64 @llvm.amdgcn.icmp.i64(i64 %sext.cond, i64 -1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %sext.cond, i64 -1, i32 32) ret i64 %mask } -; TODO: Should be able to fold to false define i64 @fold_icmp_sext_icmp_eq_1_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_sext_icmp_eq_1_i32( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[SEXT_CMP:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[SEXT_CMP]], i32 1, i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[SEXT_CMP]], i32 1, i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %sext.cmp = sext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cmp, i32 1, i32 32) ret i64 %mask } define i64 @fold_icmp_sext_icmp_eq_neg1_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_sext_icmp_eq_neg1_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b %sext.cmp = sext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 -1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cmp, i32 -1, i32 32) ret i64 %mask } define i64 @fold_icmp_sext_icmp_sge_neg1_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_sext_icmp_sge_neg1_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 39) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 39) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp sge i32 %a, %b %sext.cmp = sext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %sext.cmp, i32 -1, i32 32) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %sext.cmp, i32 -1, i32 32) ret i64 %mask } define i64 @fold_not_icmp_ne_0_zext_icmp_sle_i32(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_not_icmp_ne_0_zext_icmp_sle_i32( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 38) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 38) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp sle i32 %a, %b %not = xor i1 %cmp, true %zext.cmp = zext i1 %not to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -1943,12 +1932,12 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i4( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = zext i4 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i4 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -1956,23 +1945,23 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i8( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i8 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_eq_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i16( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i16 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -1980,12 +1969,12 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i36( ; CHECK-NEXT: [[TMP1:%.*]] = zext i36 [[A:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = zext i36 [[B:%.*]] to i64 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64(i64 [[TMP1]], i64 [[TMP2]], i32 32) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 32) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i36 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -1993,37 +1982,36 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_eq_i128( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i128 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[ZEXT_CMP:%.*]] = zext i1 [[CMP]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[ZEXT_CMP]], i32 0, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[ZEXT_CMP]], i32 0, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i128 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_fcmp_oeq_f16(half %a, half %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_oeq_f16( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.f16(half [[A:%.*]], half [[B:%.*]], i32 1) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f16(half [[A:%.*]], half [[B:%.*]], i32 1) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq half %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_fcmp_oeq_f128(fp128 %a, fp128 %b) { -; ; CHECK-LABEL: @fold_icmp_ne_0_zext_fcmp_oeq_f128( ; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq fp128 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[ZEXT_CMP:%.*]] = zext i1 [[CMP]] to i32 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i32(i32 [[ZEXT_CMP]], i32 0, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i32(i32 [[ZEXT_CMP]], i32 0, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq fp128 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -2031,12 +2019,12 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_slt_i4( ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = sext i4 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 40) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 40) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i4 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -2044,23 +2032,23 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_slt_i8( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = sext i8 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 40) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 40) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i8 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_slt_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_slt_i16( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 40) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 40) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i16 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -2068,12 +2056,12 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ult_i4( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = zext i4 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 36) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 36) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i4 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } @@ -2081,257 +2069,254 @@ ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ult_i8( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[A:%.*]] to i16 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[B:%.*]] to i16 -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[TMP1]], i16 [[TMP2]], i32 36) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[TMP1]], i16 [[TMP2]], i32 36) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i8 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } define i64 @fold_icmp_ne_0_zext_icmp_ult_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_ne_0_zext_icmp_ult_i16( -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 36) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i16(i16 [[A:%.*]], i16 [[B:%.*]], i32 36) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i16 %a, %b %zext.cmp = zext i1 %cmp to i32 - %mask = call i64 @llvm.amdgcn.icmp.i32(i32 %zext.cmp, i32 0, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %zext.cmp, i32 0, i32 33) ret i64 %mask } -; 1-bit NE comparisons - define i64 @fold_icmp_i1_ne_0_icmp_eq_i1(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i1( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i32 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ne_i1(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ne_i1( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ne i32 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_sle_i1(i32 %a, i32 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_sle_i1( ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp sle i32 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ugt_i64(i64 %a, i64 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ugt_i64( ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ugt i64 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ult_swap_i64(i64 %a, i64 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_swap_i64( ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ugt i64 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 false, i1 %cmp, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 false, i1 %cmp, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f32( ; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq float %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_fcmp_une_f32(float %a, float %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_une_f32( ; CHECK-NEXT: [[CMP:%.*]] = fcmp une float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp une float %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_fcmp_olt_f64(double %a, double %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_olt_f64( ; CHECK-NEXT: [[CMP:%.*]] = fcmp olt double [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp olt double %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_eq_i4(i4 %a, i4 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i4( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i4 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_eq_i8(i8 %a, i8 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i8( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i8 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_eq_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i16( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i16 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_eq_i36(i36 %a, i36 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i36( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i36 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i36 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_eq_i128(i128 %a, i128 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_eq_i128( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i128 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp eq i128 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f16(half %a, half %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f16( ; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq half [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq half %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_fcmp_oeq_f128(fp128 %a, fp128 %b) { -; ; CHECK-LABEL: @fold_icmp_i1_ne_0_fcmp_oeq_f128( ; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq fp128 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = fcmp oeq fp128 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_slt_i4(i4 %a, i4 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i4( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i4 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i4 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_slt_i8(i8 %a, i8 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i8( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i8 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_slt_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_slt_i16( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp slt i16 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ult_i4(i4 %a, i4 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i4( ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i4 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i4 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ult_i8(i8 %a, i8 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i8( ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i8 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } define i64 @fold_icmp_i1_ne_0_icmp_ult_i16(i16 %a, i16 %b) { ; CHECK-LABEL: @fold_icmp_i1_ne_0_icmp_ult_i16( ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i16 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i1(i1 [[CMP]], i1 false, i32 33) +; CHECK-NEXT: [[MASK:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i1(i1 [[CMP]], i1 false, i32 33) ; CHECK-NEXT: ret i64 [[MASK]] ; %cmp = icmp ult i16 %a, %b - %mask = call i64 @llvm.amdgcn.icmp.i1(i1 %cmp, i1 false, i32 33) + %mask = call i64 @llvm.amdgcn.icmp.i64.i1(i1 %cmp, i1 false, i32 33) ret i64 %mask } @@ -2339,17 +2324,17 @@ ; llvm.amdgcn.fcmp ; -------------------------------------------------------------------- -declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32 immarg) nounwind readnone convergent +declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32 immarg) nounwind readnone convergent define i64 @invalid_fcmp_code(float %a, float %b) { ; CHECK-LABEL: @invalid_fcmp_code( -; CHECK-NEXT: [[UNDER:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A:%.*]], float [[B:%.*]], i32 -1) -; CHECK-NEXT: [[OVER:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[A]], float [[B]], i32 16) +; CHECK-NEXT: [[UNDER:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A:%.*]], float [[B:%.*]], i32 -1) +; CHECK-NEXT: [[OVER:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[A]], float [[B]], i32 16) ; CHECK-NEXT: [[OR:%.*]] = or i64 [[UNDER]], [[OVER]] ; CHECK-NEXT: ret i64 [[OR]] ; - %under = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 -1) - %over = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 16) + %under = call i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 -1) + %over = call i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 16) %or = or i64 %under, %over ret i64 %or } @@ -2358,7 +2343,7 @@ ; CHECK-LABEL: @fcmp_constant_inputs_false( ; CHECK-NEXT: ret i64 0 ; - %result = call i64 @llvm.amdgcn.fcmp.f32(float 2.0, float 4.0, i32 1) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float 2.0, float 4.0, i32 1) ret i64 %result } @@ -2367,16 +2352,16 @@ ; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.read_register.i64(metadata !0) #5 ; CHECK-NEXT: ret i64 [[RESULT]] ; - %result = call i64 @llvm.amdgcn.fcmp.f32(float 2.0, float 4.0, i32 4) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float 2.0, float 4.0, i32 4) ret i64 %result } define i64 @fcmp_constant_to_rhs_olt(float %x) { ; CHECK-LABEL: @fcmp_constant_to_rhs_olt( -; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.amdgcn.fcmp.f32(float [[X:%.*]], float 4.000000e+00, i32 2) +; CHECK-NEXT: [[RESULT:%.*]] = call i64 @llvm.amdgcn.fcmp.i64.f32(float [[X:%.*]], float 4.000000e+00, i32 2) ; CHECK-NEXT: ret i64 [[RESULT]] ; - %result = call i64 @llvm.amdgcn.fcmp.f32(float 4.0, float %x, i32 4) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float 4.0, float %x, i32 4) ret i64 %result } @@ -2473,4 +2458,3 @@ } ; CHECK: attributes #5 = { convergent } - Index: test/Verifier/AMDGPU/intrinsic-immarg.ll =================================================================== --- test/Verifier/AMDGPU/intrinsic-immarg.ll +++ test/Verifier/AMDGPU/intrinsic-immarg.ll @@ -123,22 +123,22 @@ ret void } -declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) +declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32) define i64 @invalid_nonconstant_icmp_code(i32 %a, i32 %b, i32 %c) { ; CHECK: immarg operand has non-immediate parameter ; CHECK-NEXT: i32 %c - ; CHECK-NEXT: %result = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 %c) - %result = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 %c) + ; CHECK-NEXT: %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 %c) + %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %a, i32 %b, i32 %c) ret i64 %result } -declare i64 @llvm.amdgcn.fcmp.f32(float, float, i32) +declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32) define i64 @invalid_nonconstant_fcmp_code(float %a, float %b, i32 %c) { ; CHECK: immarg operand has non-immediate parameter ; CHECK-NEXT: i32 %c - ; CHECK-NEXT: %result = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 %c) - %result = call i64 @llvm.amdgcn.fcmp.f32(float %a, float %b, i32 %c) + ; CHECK-NEXT: %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 %c) + %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %a, float %b, i32 %c) ret i64 %result }