diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1967,12 +1967,6 @@ /// Should be used only when getIRStackGuard returns nullptr. virtual Function *getSSPStackGuardCheck(const Module &M) const; - /// \returns true if a constant G_UBFX is legal on the target. - virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, - LLT Ty2) const { - return false; - } - protected: Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -4156,8 +4156,7 @@ Register Dst = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(Dst); LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); - if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( - TargetOpcode::G_UBFX, Ty, ExtractTy)) + if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}})) return false; int64_t AndImm, LSBImm; @@ -4243,8 +4242,7 @@ const Register Dst = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(Dst); LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); - if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( - TargetOpcode::G_UBFX, Ty, ExtractTy)) + if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}})) return false; // Try to match shr (and x, c1), c2 diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1243,9 +1243,6 @@ SDValue getPStateSM(SelectionDAG &DAG, SDValue Chain, SMEAttrs Attrs, SDLoc DL, EVT VT) const; - bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, - LLT Ty2) const override; - bool preferScalarizeSplat(SDNode *N) const override; }; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -26096,11 +26096,6 @@ TargetLowering::isTargetCanonicalConstantNode(Op); } -bool AArch64TargetLowering::isConstantUnsignedBitfieldExtractLegal( - unsigned Opc, LLT Ty1, LLT Ty2) const { - return Ty1 == Ty2 && (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)); -} - bool AArch64TargetLowering::isComplexDeinterleavingSupported() const { return Subtarget->hasSVE() || Subtarget->hasSVE2() || Subtarget->hasComplxNum(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -372,9 +372,6 @@ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, - LLT Ty2) const override; - bool shouldSinkOperands(Instruction *I, SmallVectorImpl &Ops) const override; }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -5777,12 +5777,6 @@ } } -bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal( - unsigned Opc, LLT Ty1, LLT Ty2) const { - return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) && - Ty2 == LLT::scalar(32); -} - /// Whether it is profitable to sink the operands of an /// Instruction I to the basic block of I. /// This helps using several modifiers (like abs and neg) more often.