diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h --- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h @@ -358,6 +358,29 @@ } }; +/// Represents a call to an intrinsic. +class GIntrinsic final : public GenericMachineInstr { +public: + Intrinsic::ID getIntrinsicID() const { + return getOperand(getNumExplicitDefs()).getIntrinsicID(); + } + + bool is(Intrinsic::ID ID) const { return getIntrinsicID() == ID; } + bool hasSideEffects() const { + return getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS; + } + + static bool classof(const MachineInstr *MI) { + switch (MI->getOpcode()) { + case TargetOpcode::G_INTRINSIC: + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + return true; + default: + return false; + } + } +}; + } // namespace llvm #endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -1930,12 +1930,6 @@ /// and point them to \p Reg instead. void changeDebugValuesDefReg(Register Reg); - /// Returns the Intrinsic::ID for this instruction. - /// \pre Must have an intrinsic ID operand. - unsigned getIntrinsicID() const { - return getOperand(getNumExplicitDefs()).getIntrinsicID(); - } - /// Sets all register debug operands in this debug value instruction to be /// undef. void setDebugValueUndef() { diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -5742,7 +5742,7 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects( MachineInstr &I, MachineRegisterInfo &MRI) { // Find the intrinsic ID. - unsigned IntrinID = I.getIntrinsicID(); + unsigned IntrinID = cast(I).getIntrinsicID(); const LLT S8 = LLT::scalar(8); const LLT S16 = LLT::scalar(16); @@ -5891,7 +5891,7 @@ bool AArch64InstructionSelector::selectIntrinsic(MachineInstr &I, MachineRegisterInfo &MRI) { - unsigned IntrinID = I.getIntrinsicID(); + unsigned IntrinID = cast(I).getIntrinsicID(); switch (IntrinID) { default: diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -14,6 +14,7 @@ #include "AArch64LegalizerInfo.h" #include "AArch64RegisterBankInfo.h" #include "AArch64Subtarget.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" @@ -1119,7 +1120,7 @@ bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::vacopy: { unsigned PtrSize = ST->isTargetILP32() ? 4 : 8; unsigned VaListSize = diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp @@ -486,9 +486,8 @@ /// \returns true if a given intrinsic only uses and defines FPRs. static bool isFPIntrinsic(const MachineRegisterInfo &MRI, const MachineInstr &MI) { - assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC); // TODO: Add more intrinsics. - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { default: return false; case Intrinsic::aarch64_neon_uaddlv: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp @@ -9,6 +9,7 @@ #include "AMDGPUCombinerHelper.h" #include "GCNSubtarget.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/Target/TargetMachine.h" @@ -42,7 +43,7 @@ case AMDGPU::G_AMDGPU_FMAX_LEGACY: return true; case AMDGPU::G_INTRINSIC: { - unsigned IntrinsicID = MI.getIntrinsicID(); + unsigned IntrinsicID = cast(MI).getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_rcp: case Intrinsic::amdgcn_rcp_legacy: @@ -92,7 +93,7 @@ case AMDGPU::G_PHI: return false; case AMDGPU::G_INTRINSIC: { - unsigned IntrinsicID = MI.getIntrinsicID(); + unsigned IntrinsicID = cast(MI).getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_interp_p1: case Intrinsic::amdgcn_interp_p2: @@ -228,7 +229,7 @@ case AMDGPU::G_AMDGPU_RCP_IFLAG: return true; case AMDGPU::G_INTRINSIC: { - unsigned IntrinsicID = MatchInfo->getIntrinsicID(); + unsigned IntrinsicID = cast(MatchInfo)->getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_rcp: case Intrinsic::amdgcn_rcp_legacy: @@ -327,7 +328,7 @@ NegateOperand(MatchInfo->getOperand(1)); break; case AMDGPU::G_INTRINSIC: { - unsigned IntrinsicID = MatchInfo->getIntrinsicID(); + unsigned IntrinsicID = cast(MatchInfo)->getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_rcp: case Intrinsic::amdgcn_rcp_legacy: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h @@ -21,6 +21,7 @@ class GCNSubtarget; class MachineMemOperand; +class MachineInstr; class AMDGPUInstrInfo { public: @@ -31,6 +32,13 @@ namespace AMDGPU { +/// Return the intrinsic ID for opcodes with the G_AMDGPU_INTRIN_ prefix. +/// +/// These opcodes have an Intrinsic::ID operand similar to a GIntrinsic. But +/// they are not actual instances of GIntrinsics, so we cannot use +/// GIntrinsic::getIntrinsicID() on them. +unsigned getIntrinsicID(const MachineInstr &I); + struct RsrcIntrinsic { unsigned Intr; uint8_t RsrcArg; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp @@ -14,6 +14,7 @@ #include "AMDGPUInstrInfo.h" #include "AMDGPU.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instruction.h" @@ -26,6 +27,9 @@ AMDGPUInstrInfo::AMDGPUInstrInfo(const GCNSubtarget &ST) { } +Intrinsic::ID AMDGPU::getIntrinsicID(const MachineInstr &I) { + return I.getOperand(I.getNumExplicitDefs()).getIntrinsicID(); +} // TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence. bool AMDGPUInstrInfo::isUniformMMO(const MachineMemOperand *MMO) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -21,6 +21,7 @@ #include "Utils/AMDGPUBaseInfo.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -1001,7 +1002,7 @@ } bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { - unsigned IntrinsicID = I.getIntrinsicID(); + unsigned IntrinsicID = cast(I).getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_if_break: { MachineBasicBlock *BB = I.getParent(); @@ -2008,7 +2009,7 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( MachineInstr &I) const { - unsigned IntrinsicID = I.getIntrinsicID(); + unsigned IntrinsicID = cast(I).getIntrinsicID(); switch (IntrinsicID) { case Intrinsic::amdgcn_end_cf: return selectEndCfIntrinsic(I); @@ -2689,8 +2690,8 @@ return isVCmpResult(MI.getOperand(1).getReg(), MRI) && isVCmpResult(MI.getOperand(2).getReg(), MRI); - if (Opcode == TargetOpcode::G_INTRINSIC) - return MI.getIntrinsicID() == Intrinsic::amdgcn_class; + if (auto *GI = dyn_cast(&MI)) + return GI->is(Intrinsic::amdgcn_class); return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP; } @@ -3252,7 +3253,7 @@ bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const { unsigned Opc; - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16: Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64; break; @@ -3457,8 +3458,8 @@ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { - const AMDGPU::ImageDimIntrinsicInfo *Intr - = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); + const AMDGPU::ImageDimIntrinsicInfo *Intr = + AMDGPU::getImageDimIntrinsicInfo(AMDGPU::getIntrinsicID(I)); assert(Intr && "not an image intrinsic with image pseudo"); return selectImageIntrinsic(I, Intr); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -21,6 +21,7 @@ #include "Utils/AMDGPUBaseInfo.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/BinaryFormat/ELF.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" @@ -6524,7 +6525,7 @@ MachineRegisterInfo &MRI = *B.getMRI(); // Replace the use G_BRCOND with the exec manipulate and branch pseudos. - auto IntrID = MI.getIntrinsicID(); + auto IntrID = cast(MI).getIntrinsicID(); switch (IntrID) { case Intrinsic::amdgcn_if: case Intrinsic::amdgcn_else: { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/TargetPassConfig.h" @@ -268,10 +269,10 @@ auto getRcpSrc = [=](const MachineInstr &MI) { MachineInstr *ResMI = nullptr; - if (MI.getOpcode() == TargetOpcode::G_INTRINSIC && - MI.getIntrinsicID() == Intrinsic::amdgcn_rcp) - ResMI = MRI.getVRegDef(MI.getOperand(2).getReg()); - + if (auto *GI = dyn_cast(&MI)) { + if (GI->is(Intrinsic::amdgcn_rcp)) + ResMI = MRI.getVRegDef(MI.getOperand(2).getReg()); + } return ResMI; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -337,7 +337,7 @@ RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsic( const MachineInstr &MI, const MachineRegisterInfo &MRI) const { - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::amdgcn_readlane: { static const OpRegBankEntry<3> Table[2] = { // Perfectly legal. @@ -378,7 +378,7 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsicWSideEffects( const MachineInstr &MI, const MachineRegisterInfo &MRI) const { - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::amdgcn_s_buffer_load: { static const OpRegBankEntry<2> Table[4] = { // Perfectly legal. @@ -2949,7 +2949,7 @@ return; } case AMDGPU::G_INTRINSIC: { - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::amdgcn_readlane: { substituteSimpleCopyRegs(OpdMapper, 2); @@ -3019,8 +3019,8 @@ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { - const AMDGPU::RsrcIntrinsic *RSrcIntrin - = AMDGPU::lookupRsrcIntrinsic(MI.getIntrinsicID()); + const AMDGPU::RsrcIntrinsic *RSrcIntrin = + AMDGPU::lookupRsrcIntrinsic(AMDGPU::getIntrinsicID(MI)); assert(RSrcIntrin && RSrcIntrin->IsImage); // Non-images can have complications from operands that allow both SGPR // and VGPR. For now it's too complicated to figure out the final opcode @@ -3035,7 +3035,7 @@ return; } case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { - auto IntrID = MI.getIntrinsicID(); + auto IntrID = cast(MI).getIntrinsicID(); switch (IntrID) { case Intrinsic::amdgcn_ds_ordered_add: case Intrinsic::amdgcn_ds_ordered_swap: { @@ -4198,7 +4198,7 @@ break; } case AMDGPU::G_INTRINSIC: { - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { default: return getInvalidInstructionMapping(); case Intrinsic::amdgcn_div_fmas: @@ -4531,7 +4531,7 @@ case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { - auto IntrID = MI.getIntrinsicID(); + auto IntrID = AMDGPU::getIntrinsicID(MI); const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID); assert(RSrcIntrin && "missing RsrcIntrinsic for image intrinsic"); // Non-images can have complications from operands that allow both SGPR @@ -4560,7 +4560,7 @@ break; } case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { - auto IntrID = MI.getIntrinsicID(); + auto IntrID = cast(MI).getIntrinsicID(); switch (IntrID) { case Intrinsic::amdgcn_s_getreg: case Intrinsic::amdgcn_s_memtime: diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -28,6 +28,7 @@ #include "llvm/CodeGen/ByteProvider.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -11302,7 +11303,7 @@ return false; return true; case AMDGPU::G_INTRINSIC: - switch (MI->getIntrinsicID()) { + switch (cast(MI)->getIntrinsicID()) { case Intrinsic::amdgcn_fmul_legacy: case Intrinsic::amdgcn_fmad_ftz: case Intrinsic::amdgcn_sqrt: @@ -13736,7 +13737,7 @@ const MachineInstr *MI = MRI.getVRegDef(R); switch (MI->getOpcode()) { case AMDGPU::G_INTRINSIC: { - switch (MI->getIntrinsicID()) { + switch (cast(MI)->getIntrinsicID()) { case Intrinsic::amdgcn_workitem_id_x: knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); break; @@ -13801,21 +13802,17 @@ GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, unsigned Depth) const { const MachineInstr *MI = MRI.getVRegDef(R); - switch (MI->getOpcode()) { - case AMDGPU::G_INTRINSIC: - case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { + if (auto *GI = dyn_cast(MI)) { // FIXME: Can this move to generic code? What about the case where the call // site specifies a lower alignment? - Intrinsic::ID IID = MI->getIntrinsicID(); + Intrinsic::ID IID = GI->getIntrinsicID(); LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); if (MaybeAlign RetAlign = Attrs.getRetAlignment()) return *RetAlign; return Align(1); } - default: - return Align(1); - } + return Align(1); } Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -18,6 +18,7 @@ #include "GCNSubtarget.h" #include "SIMachineFunctionInfo.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/LiveIntervals.h" #include "llvm/CodeGen/LiveVariables.h" #include "llvm/CodeGen/MachineDominators.h" @@ -8572,9 +8573,8 @@ InstructionUniformity SIInstrInfo::getGenericInstructionUniformity(const MachineInstr &MI) const { unsigned opcode = MI.getOpcode(); - if (opcode == AMDGPU::G_INTRINSIC || - opcode == AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS) { - auto IID = static_cast(MI.getIntrinsicID()); + if (auto *GI = dyn_cast(&MI)) { + auto IID = GI->getIntrinsicID(); if (AMDGPU::isIntrinsicSourceOfDivergence(IID)) return InstructionUniformity::NeverUniform; if (AMDGPU::isIntrinsicAlwaysUniform(IID)) diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -12,6 +12,7 @@ #include "MipsLegalizerInfo.h" #include "MipsTargetMachine.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/IR/IntrinsicsMips.h" @@ -510,7 +511,7 @@ const MipsRegisterInfo &TRI = *ST.getRegisterInfo(); const RegisterBankInfo &RBI = *ST.getRegBankInfo(); - switch (MI.getIntrinsicID()) { + switch (cast(MI).getIntrinsicID()) { case Intrinsic::trap: { MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP); MI.eraseFromParent(); diff --git a/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp b/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp --- a/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp +++ b/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp @@ -12,6 +12,7 @@ #include "PPCRegisterBankInfo.h" #include "PPCRegisterInfo.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Support/Debug.h" @@ -289,8 +290,11 @@ const TargetRegisterInfo &TRI, unsigned Depth) const { unsigned Op = MI.getOpcode(); - if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MI.getIntrinsicID())) - return true; + + if (auto *GI = dyn_cast(&MI)) { + if (isFPIntrinsic(GI->getIntrinsicID())) + return true; + } // Do we have an explicit floating point instruction? if (isPreISelGenericFloatingPointOpcode(Op)) diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -21,6 +21,7 @@ #include "SPIRVUtils.h" #include "llvm/ADT/APFloat.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -1314,7 +1315,7 @@ const SPIRVType *ResType, MachineInstr &I) const { MachineBasicBlock &BB = *I.getParent(); - switch (I.getIntrinsicID()) { + switch (cast(I).getIntrinsicID()) { case Intrinsic::spv_load: return selectLoad(ResVReg, ResType, I); case Intrinsic::spv_store: diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -15,6 +15,7 @@ #include "SPIRV.h" #include "SPIRVInstrInfo.h" #include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -209,13 +210,14 @@ MachineInstr *getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI) { MachineInstr *ConstInstr = MRI->getVRegDef(ConstReg); - if (ConstInstr->getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS && - ConstInstr->getIntrinsicID() == Intrinsic::spv_track_constant) { - ConstReg = ConstInstr->getOperand(2).getReg(); - ConstInstr = MRI->getVRegDef(ConstReg); + if (auto *GI = dyn_cast(ConstInstr)) { + if (GI->is(Intrinsic::spv_track_constant)) { + ConstReg = ConstInstr->getOperand(2).getReg(); + return MRI->getVRegDef(ConstReg); + } } else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) { ConstReg = ConstInstr->getOperand(1).getReg(); - ConstInstr = MRI->getVRegDef(ConstReg); + return MRI->getVRegDef(ConstReg); } return ConstInstr; } @@ -227,8 +229,9 @@ } bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID) { - return MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS && - MI.getIntrinsicID() == IntrinsicID; + if (auto *GI = dyn_cast(&MI)) + return GI->is(IntrinsicID); + return false; } Type *getMDOperandAsType(const MDNode *N, unsigned I) {