diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -452,6 +452,9 @@ const DataLayout &getDataLayout() const { return MF->getDataLayout(); } const TargetMachine &getTarget() const { return TM; } const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); } + template const STC &getSubtarget() const { + return MF->getSubtarget(); + } const TargetLowering &getTargetLoweringInfo() const { return *TLI; } const TargetLibraryInfo &getLibInfo() const { return *LibInfo; } const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; } diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -132,7 +132,7 @@ bool runOnMachineFunction(MachineFunction &MF) override { AArch64FI = MF.getInfo(); - STI = static_cast(&MF.getSubtarget()); + STI = &MF.getSubtarget(); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp --- a/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ b/llvm/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -813,7 +813,7 @@ } bool FalkorHWPFFix::runOnMachineFunction(MachineFunction &Fn) { - auto &ST = static_cast(Fn.getSubtarget()); + auto &ST = Fn.getSubtarget(); if (ST.getProcFamily() != AArch64Subtarget::Falkor) return false; diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -283,8 +283,7 @@ explicit AArch64FastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) : FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) { - Subtarget = - &static_cast(FuncInfo.MF->getSubtarget()); + Subtarget = &FuncInfo.MF->getSubtarget(); Context = &FuncInfo.Fn->getContext(); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2576,8 +2576,7 @@ EVT VT = LHS.getValueType(); assert(VT != MVT::f128); - const bool FullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool FullFP16 = DAG.getSubtarget().hasFullFP16(); if (VT == MVT::f16 && !FullFP16) { LHS = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f32, MVT::Other}, @@ -2595,8 +2594,7 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG) { EVT VT = LHS.getValueType(); - const bool FullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool FullFP16 = DAG.getSubtarget().hasFullFP16(); if (VT.isFloatingPoint()) { assert(VT != MVT::f128); @@ -2704,8 +2702,7 @@ AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG) { unsigned Opcode = 0; - const bool FullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool FullFP16 = DAG.getSubtarget().hasFullFP16(); if (LHS.getValueType().isFloatingPoint()) { assert(LHS.getValueType() != MVT::f128); @@ -11823,8 +11820,7 @@ return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); } - const bool FullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool FullFP16 = DAG.getSubtarget().hasFullFP16(); // Make v4f16 (only) fcmp operations utilise vector instructions // v8f16 support will be a litle more complicated @@ -11958,7 +11954,7 @@ SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const { - auto &Subtarget = static_cast(DAG.getSubtarget()); + auto &Subtarget = DAG.getSubtarget(); if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) return SDValue(); @@ -11975,7 +11971,7 @@ SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const { - auto &Subtarget = static_cast(DAG.getSubtarget()); + auto &Subtarget = DAG.getSubtarget(); if (!Subtarget.hasLSE() && !Subtarget.outlineAtomics()) return SDValue(); @@ -14732,8 +14728,7 @@ ConstantSDNode *ConstantN1 = dyn_cast(N1); EVT VT = N->getValueType(0); - const bool FullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool FullFP16 = DAG.getSubtarget().hasFullFP16(); bool IsStrict = N0->isStrictFPOpcode(); // Rewrite for pairwise fadd pattern @@ -15918,8 +15913,7 @@ // If we're compiling for a specific vector-length, we can check if the // pattern's VL equals that of the scalable vector at runtime. if (N.getOpcode() == AArch64ISD::PTRUE) { - const auto &Subtarget = - static_cast(DAG.getSubtarget()); + const auto &Subtarget = DAG.getSubtarget(); unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits(); unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits(); if (MaxSVESize && MinSVESize == MaxSVESize) { @@ -17140,8 +17134,7 @@ Stride > std::numeric_limits::max()) return Changed; - const auto &Subtarget = - static_cast(DAG.getSubtarget()); + const auto &Subtarget = DAG.getSubtarget(); unsigned MaxVScale = Subtarget.getMaxSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock; int64_t LastElementOffset = @@ -20074,8 +20067,7 @@ // For vectors that are exactly getMaxSVEVectorSizeInBits big, we can use // AArch64SVEPredPattern::all, which can enable the use of unpredicated // variants of instructions when available. - const auto &Subtarget = - static_cast(DAG.getSubtarget()); + const auto &Subtarget = DAG.getSubtarget(); unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits(); unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits(); if (MaxSVESize && MinSVESize == MaxSVESize && diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp --- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -2308,7 +2308,7 @@ if (skipFunction(Fn.getFunction())) return false; - Subtarget = &static_cast(Fn.getSubtarget()); + Subtarget = &Fn.getSubtarget(); TII = static_cast(Subtarget->getInstrInfo()); TRI = Subtarget->getRegisterInfo(); AA = &getAnalysis().getAAResults(); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -2325,8 +2325,7 @@ MachineFunction &MF = *MBB.getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - const AArch64Subtarget *Subtarget = - &static_cast(MF.getSubtarget()); + const AArch64Subtarget *Subtarget = &MF.getSubtarget(); if (Subtarget->requiresStrictAlign()) { // We don't support this feature yet. LLVM_DEBUG(dbgs() << "AArch64 GISel does not support strict-align yet\n"); diff --git a/llvm/lib/Target/ARM/ARMBlockPlacement.cpp b/llvm/lib/Target/ARM/ARMBlockPlacement.cpp --- a/llvm/lib/Target/ARM/ARMBlockPlacement.cpp +++ b/llvm/lib/Target/ARM/ARMBlockPlacement.cpp @@ -213,7 +213,7 @@ bool ARMBlockPlacement::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(MF.getFunction())) return false; - const ARMSubtarget &ST = static_cast(MF.getSubtarget()); + const ARMSubtarget &ST = MF.getSubtarget(); if (!ST.hasLOB()) return false; LLVM_DEBUG(dbgs() << DEBUG_PREFIX << "Running on " << MF.getName() << "\n"); diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -396,7 +396,7 @@ << MCP->getConstants().size() << " CP entries, aligned to " << MCP->getConstantPoolAlign().value() << " bytes *****\n"); - STI = &static_cast(MF->getSubtarget()); + STI = &MF->getSubtarget(); TII = STI->getInstrInfo(); isPositionIndependentOrROPI = STI->getTargetLowering()->isPositionIndependent() || STI->isROPI(); diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp --- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -3132,7 +3132,7 @@ } bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) { - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); TRI = STI->getRegisterInfo(); AFI = MF.getInfo(); diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -122,8 +122,7 @@ explicit ARMFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) : FastISel(funcInfo, libInfo), - Subtarget( - &static_cast(funcInfo.MF->getSubtarget())), + Subtarget(&funcInfo.MF->getSubtarget()), M(const_cast(*funcInfo.Fn->getParent())), TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) { @@ -156,7 +155,7 @@ const LoadInst *LI) override; bool fastLowerArguments() override; - #include "ARMGenFastISel.inc" +#include "ARMGenFastISel.inc" // Instruction selection routines. diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -392,8 +392,7 @@ const DebugLoc &DL, const unsigned Reg, const Align Alignment, const bool MustBeSingleInstruction) { - const ARMSubtarget &AST = - static_cast(MF.getSubtarget()); + const ARMSubtarget &AST = MF.getSubtarget(); const bool CanUseBFC = AST.hasV6T2Ops() || AST.hasV7Ops(); const unsigned AlignMask = Alignment.value() - 1U; const unsigned NrBitsToZero = Log2(Alignment); @@ -1768,7 +1767,7 @@ return; // We are planning to use NEON instructions vst1 / vld1. - if (!static_cast(MF.getSubtarget()).hasNEON()) + if (!MF.getSubtarget().hasNEON()) return; // Don't bother if the default stack alignment is sufficiently high. diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5809,8 +5809,7 @@ return DAG.UnrollVectorOp(Op.getNode()); } - const bool HasFullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool HasFullFP16 = DAG.getSubtarget().hasFullFP16(); EVT NewTy; const EVT OpTy = Op.getOperand(0).getValueType(); @@ -5920,8 +5919,7 @@ Op.getOperand(0).getValueType() == MVT::v8i16) && "Invalid type for custom lowering!"); - const bool HasFullFP16 = - static_cast(DAG.getSubtarget()).hasFullFP16(); + const bool HasFullFP16 = DAG.getSubtarget().hasFullFP16(); EVT DestVecType; if (VT == MVT::v4f32) @@ -9884,7 +9882,7 @@ if (N->getOpcode() != ISD::SDIV) return SDValue(); - const auto &ST = static_cast(DAG.getSubtarget()); + const auto &ST = DAG.getSubtarget(); const bool MinSize = ST.hasMinSize(); const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() : ST.hasDivideInARMMode(); diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -2110,7 +2110,7 @@ return false; MF = &Fn; - STI = &static_cast(Fn.getSubtarget()); + STI = &Fn.getSubtarget(); TL = STI->getTargetLowering(); AFI = Fn.getInfo(); TII = STI->getInstrInfo(); @@ -2201,7 +2201,7 @@ return false; TD = &Fn.getDataLayout(); - STI = &static_cast(Fn.getSubtarget()); + STI = &Fn.getSubtarget(); TII = STI->getInstrInfo(); TRI = STI->getRegisterInfo(); MRI = &Fn.getRegInfo(); diff --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp --- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp +++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp @@ -1299,7 +1299,7 @@ } bool ARMLowOverheadLoops::runOnMachineFunction(MachineFunction &mf) { - const ARMSubtarget &ST = static_cast(mf.getSubtarget()); + const ARMSubtarget &ST = mf.getSubtarget(); if (!ST.hasLOB()) return false; diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp --- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp +++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp @@ -1041,8 +1041,7 @@ } bool MVETPAndVPTOptimisations::runOnMachineFunction(MachineFunction &Fn) { - const ARMSubtarget &STI = - static_cast(Fn.getSubtarget()); + const ARMSubtarget &STI = Fn.getSubtarget(); if (!STI.isThumb2() || !STI.hasLOB()) return false; diff --git a/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp b/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp --- a/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp +++ b/llvm/lib/Target/ARM/MVEVPTBlockPass.cpp @@ -312,8 +312,7 @@ } bool MVEVPTBlock::runOnMachineFunction(MachineFunction &Fn) { - const ARMSubtarget &STI = - static_cast(Fn.getSubtarget()); + const ARMSubtarget &STI = Fn.getSubtarget(); if (!STI.isThumb2() || !STI.hasMVEIntegerOps()) return false; diff --git a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp --- a/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp +++ b/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp @@ -284,8 +284,7 @@ } bool Thumb2ITBlock::runOnMachineFunction(MachineFunction &Fn) { - const ARMSubtarget &STI = - static_cast(Fn.getSubtarget()); + const ARMSubtarget &STI = Fn.getSubtarget(); if (!STI.isThumb2()) return false; AFI = Fn.getInfo(); diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -1130,7 +1130,7 @@ if (PredicateFtor && !PredicateFtor(MF.getFunction())) return false; - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); if (STI->isThumb1Only() || STI->prefers32BitThumb()) return false; diff --git a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp --- a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp +++ b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp @@ -287,7 +287,7 @@ bool CSKYConstantIslands::runOnMachineFunction(MachineFunction &Mf) { MF = &Mf; MCP = Mf.getConstantPool(); - STI = &static_cast(Mf.getSubtarget()); + STI = &Mf.getSubtarget(); LLVM_DEBUG(dbgs() << "***** CSKYConstantIslands: " << MCP->getConstants().size() << " CP entries, aligned to " diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -801,7 +801,7 @@ return static_cast(G.getTargetLoweringInfo()); } static const HexagonSubtarget &getHexagonSubtarget(SelectionDAG &G) { - return static_cast(G.getSubtarget()); + return G.getSubtarget(); } namespace llvm { diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1396,10 +1396,9 @@ Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag); InFlag = Chain.getValue(1); - unsigned Flags = - static_cast(DAG.getSubtarget()).useLongCalls() - ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended - : HexagonII::MO_GDPLT; + unsigned Flags = DAG.getSubtarget().useLongCalls() + ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended + : HexagonII::MO_GDPLT; return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT, Hexagon::R0, Flags); diff --git a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td --- a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td +++ b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td @@ -37,7 +37,7 @@ def HexagonVINSERTW0: SDNode<"HexagonISD::VINSERTW0", SDTHexagonVINSERTW0>; def HwLen2: SDNodeXForm(CurDAG->getSubtarget()); + const auto &ST = CurDAG->getSubtarget(); return CurDAG->getTargetConstant(ST.getVectorLength()/2, SDLoc(N), MVT::i32); }]>; diff --git a/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp b/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp --- a/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp +++ b/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp @@ -231,7 +231,7 @@ } bool runOnMachineFunction(MachineFunction &MF) override { - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); TRI = STI->getRegisterInfo(); MFI = MF.getInfo(); diff --git a/llvm/lib/Target/M68k/M68kExpandPseudo.cpp b/llvm/lib/Target/M68k/M68kExpandPseudo.cpp --- a/llvm/lib/Target/M68k/M68kExpandPseudo.cpp +++ b/llvm/lib/Target/M68k/M68kExpandPseudo.cpp @@ -302,7 +302,7 @@ } bool M68kExpandPseudo::runOnMachineFunction(MachineFunction &MF) { - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); TRI = STI->getRegisterInfo(); MFI = MF.getInfo(); diff --git a/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp --- a/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp +++ b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp @@ -312,7 +312,7 @@ } // namespace bool M68kDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } diff --git a/llvm/lib/Target/Mips/MicroMipsSizeReduction.cpp b/llvm/lib/Target/Mips/MicroMipsSizeReduction.cpp --- a/llvm/lib/Target/Mips/MicroMipsSizeReduction.cpp +++ b/llvm/lib/Target/Mips/MicroMipsSizeReduction.cpp @@ -774,7 +774,7 @@ bool MicroMipsSizeReduce::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); // TODO: Add support for the subtarget microMIPS32R6. if (!Subtarget->inMicroMipsMode() || !Subtarget->hasMips32r2() || diff --git a/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -35,7 +35,7 @@ #define DEBUG_TYPE "mips-isel" bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); if (!Subtarget->inMips16Mode()) return false; return MipsDAGToDAGISel::runOnMachineFunction(MF); diff --git a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp --- a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp +++ b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp @@ -880,7 +880,7 @@ const TargetMachine &TM = MF.getTarget(); IsPIC = TM.isPositionIndependent(); ABI = static_cast(TM).getABI(); - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = static_cast(STI->getInstrInfo()); if (IsPIC && ABI.IsO32() && diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -541,8 +541,7 @@ } MIRBuilder.insertInstr(MIB); if (MIB->getOpcode() == Mips::JALRPseudo) { - const MipsSubtarget &STI = - static_cast(MIRBuilder.getMF().getSubtarget()); + const MipsSubtarget &STI = MIRBuilder.getMF().getSubtarget(); MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), *STI.getRegBankInfo()); } diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -436,7 +436,7 @@ // FIXME: MF = &mf; MCP = mf.getConstantPool(); - STI = &static_cast(mf.getSubtarget()); + STI = &mf.getSubtarget(); LLVM_DEBUG(dbgs() << "constant island machine function " << "\n"); if (!STI->inMips16Mode() || !MipsSubtarget::useConstantIslands()) { diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp --- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp +++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp @@ -892,7 +892,7 @@ } bool MipsExpandPseudo::runOnMachineFunction(MachineFunction &MF) { - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); bool Modified = false; diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -54,7 +54,7 @@ } bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); bool Ret = SelectionDAGISel::runOnMachineFunction(MF); processFunctionAfterISel(MF); diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -503,8 +503,7 @@ bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; - const MipsSubtarget &ST = - static_cast(MI.getMF()->getSubtarget()); + const MipsSubtarget &ST = MI.getMF()->getSubtarget(); const MipsInstrInfo &TII = *ST.getInstrInfo(); const MipsRegisterInfo &TRI = *ST.getRegisterInfo(); const RegisterBankInfo &RBI = *ST.getRegBankInfo(); diff --git a/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/llvm/lib/Target/Mips/MipsMachineFunction.cpp --- a/llvm/lib/Target/Mips/MipsMachineFunction.cpp +++ b/llvm/lib/Target/Mips/MipsMachineFunction.cpp @@ -29,7 +29,7 @@ } static const TargetRegisterClass &getGlobalBaseRegClass(MachineFunction &MF) { - auto &STI = static_cast(MF.getSubtarget()); + auto &STI = MF.getSubtarget(); auto &TM = static_cast(MF.getTarget()); if (STI.inMips16Mode()) diff --git a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp --- a/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp +++ b/llvm/lib/Target/Mips/MipsOptimizePICCall.cpp @@ -194,7 +194,7 @@ // OptimizePICCall methods. bool OptimizePICCall::runOnMachineFunction(MachineFunction &F) { - if (static_cast(F.getSubtarget()).inMips16Mode()) + if (F.getSubtarget().inMips16Mode()) return false; // Do a pre-order traversal of the dominator tree. diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp --- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp @@ -51,8 +51,7 @@ // Don't attempt to combine non power of 2 loads or unaligned loads when // subtarget doesn't support them. auto MMO = *MI.memoperands_begin(); - const MipsSubtarget &STI = - static_cast(MI.getMF()->getSubtarget()); + const MipsSubtarget &STI = MI.getMF()->getSubtarget(); if (!isPowerOf2_64(MMO->getSize())) return false; bool isUnaligned = MMO->getAlign() < MMO->getSize(); diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp --- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp @@ -153,8 +153,7 @@ if (MI->getOpcode() == TargetOpcode::G_LOAD || MI->getOpcode() == TargetOpcode::G_STORE) { auto MMO = *MI->memoperands_begin(); - const MipsSubtarget &STI = - static_cast(MI->getMF()->getSubtarget()); + const MipsSubtarget &STI = MI->getMF()->getSubtarget(); if (MMO->getSize() == 4 && (!STI.systemSupportsUnalignedAccess() && MMO->getAlign() < MMO->getSize())) return true; @@ -398,7 +397,7 @@ static const MipsRegisterBankInfo::ValueMapping * getMSAMapping(const MachineFunction &MF) { - assert(static_cast(MF.getSubtarget()).hasMSA() && + assert(MF.getSubtarget().hasMSA() && "MSA mapping not available on target without MSA."); return &Mips::ValueMappings[Mips::MSAIdx]; } diff --git a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -97,7 +97,7 @@ ExpandPseudo::ExpandPseudo(MachineFunction &MF_) : MF(MF_), MRI(MF.getRegInfo()), - Subtarget(static_cast(MF.getSubtarget())), + Subtarget(MF.getSubtarget()), TII(*static_cast(Subtarget.getInstrInfo())), RegInfo(*Subtarget.getRegisterInfo()) {} diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -38,7 +38,7 @@ #define DEBUG_TYPE "mips-isel" bool MipsSEDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); if (Subtarget->inMips16Mode()) return false; return MipsDAGToDAGISel::runOnMachineFunction(MF); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -42,7 +42,7 @@ } bool NVPTXDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { - Subtarget = &static_cast(MF.getSubtarget()); + Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1883,8 +1883,7 @@ /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG) { - const PPCSubtarget& Subtarget = - static_cast(DAG.getSubtarget()); + const PPCSubtarget &Subtarget = DAG.getSubtarget(); if (!Subtarget.hasP8Vector()) return false; @@ -6912,8 +6911,7 @@ if (useSoftFloat()) report_fatal_error("Soft float support is unimplemented on AIX."); - const PPCSubtarget &Subtarget = - static_cast(DAG.getSubtarget()); + const PPCSubtarget &Subtarget = DAG.getSubtarget(); const bool IsPPC64 = Subtarget.isPPC64(); const unsigned PtrByteSize = IsPPC64 ? 8 : 4; @@ -7218,8 +7216,7 @@ if (CFlags.IsPatchPoint) report_fatal_error("This call type is unimplemented on AIX."); - const PPCSubtarget& Subtarget = - static_cast(DAG.getSubtarget()); + const PPCSubtarget &Subtarget = DAG.getSubtarget(); MachineFunction &MF = DAG.getMachineFunction(); SmallVector ArgLocs; diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp --- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -820,8 +820,7 @@ } void SystemZAsmPrinter::emitFunctionEntryLabel() { - const SystemZSubtarget &Subtarget = - static_cast(MF->getSubtarget()); + const SystemZSubtarget &Subtarget = MF->getSubtarget(); if (Subtarget.getTargetTriple().isOSzOS()) { MCContext &OutContext = OutStreamer->getContext(); diff --git a/llvm/lib/Target/X86/X86CallingConv.cpp b/llvm/lib/Target/X86/X86CallingConv.cpp --- a/llvm/lib/Target/X86/X86CallingConv.cpp +++ b/llvm/lib/Target/X86/X86CallingConv.cpp @@ -299,7 +299,7 @@ ISD::ArgFlagsTy &ArgFlags, CCState &State) { const MachineFunction &MF = State.getMachineFunction(); size_t ArgCount = State.getMachineFunction().getFunction().arg_size(); - bool Is64Bit = static_cast(MF.getSubtarget()).is64Bit(); + bool Is64Bit = MF.getSubtarget().is64Bit(); unsigned SlotSize = Is64Bit ? 8 : 4; unsigned Offset; if (ArgCount == 1 && ValNo == 0) { diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -730,7 +730,7 @@ } bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) { - STI = &static_cast(MF.getSubtarget()); + STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); TRI = STI->getRegisterInfo(); X86FI = MF.getInfo(); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -26073,8 +26073,7 @@ // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after // prologue to RBP in the parent function. - const X86Subtarget &Subtarget = - static_cast(DAG.getSubtarget()); + const X86Subtarget &Subtarget = DAG.getSubtarget(); if (Subtarget.is64Bit()) return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset); @@ -39107,8 +39106,7 @@ SDValue N0 = V.getOperand(0); SDValue N1 = V.getOperand(1); unsigned Imm = V.getConstantOperandVal(2); - const X86Subtarget &Subtarget = - static_cast(DAG.getSubtarget()); + const X86Subtarget &Subtarget = DAG.getSubtarget(); if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) || X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget)) return SDValue();