diff --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp @@ -91,61 +91,59 @@ AMDGPUFunctionArgInfo::PreloadedValue Value) const { switch (Value) { case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: { - return std::make_tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer - : nullptr, - &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); + return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr, + &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); } case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR: - return std::make_tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr, - &AMDGPU::SGPR_64RegClass, - LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); + return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr, + &AMDGPU::SGPR_64RegClass, + LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); case AMDGPUFunctionArgInfo::WORKGROUP_ID_X: - return std::make_tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr, - &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr, + &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y: - return std::make_tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr, - &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr, + &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z: - return std::make_tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr, - &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr, + &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::LDS_KERNEL_ID: - return std::make_tuple(LDSKernelId ? &LDSKernelId : nullptr, - &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); + return std::tuple(LDSKernelId ? &LDSKernelId : nullptr, + &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET: - return std::make_tuple( + return std::tuple( PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr, &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR: - return std::make_tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr, - &AMDGPU::SGPR_64RegClass, - LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); + return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr, + &AMDGPU::SGPR_64RegClass, + LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR: - return std::make_tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr, - &AMDGPU::SGPR_64RegClass, - LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); + return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr, + &AMDGPU::SGPR_64RegClass, + LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); case AMDGPUFunctionArgInfo::DISPATCH_ID: - return std::make_tuple(DispatchID ? &DispatchID : nullptr, - &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); + return std::tuple(DispatchID ? &DispatchID : nullptr, + &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT: - return std::make_tuple(FlatScratchInit ? &FlatScratchInit : nullptr, - &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); + return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr, + &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); case AMDGPUFunctionArgInfo::DISPATCH_PTR: - return std::make_tuple(DispatchPtr ? &DispatchPtr : nullptr, - &AMDGPU::SGPR_64RegClass, - LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); + return std::tuple(DispatchPtr ? &DispatchPtr : nullptr, + &AMDGPU::SGPR_64RegClass, + LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); case AMDGPUFunctionArgInfo::QUEUE_PTR: - return std::make_tuple(QueuePtr ? &QueuePtr : nullptr, - &AMDGPU::SGPR_64RegClass, - LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); + return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass, + LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); case AMDGPUFunctionArgInfo::WORKITEM_ID_X: - return std::make_tuple(WorkItemIDX ? &WorkItemIDX : nullptr, - &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr, + &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::WORKITEM_ID_Y: - return std::make_tuple(WorkItemIDY ? &WorkItemIDY : nullptr, - &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr, + &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); case AMDGPUFunctionArgInfo::WORKITEM_ID_Z: - return std::make_tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr, - &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); + return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr, + &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); } llvm_unreachable("unexpected preloaded value type"); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -872,7 +872,7 @@ Value *Lo = Builder.CreateTrunc(MUL64, I32Ty); Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32)); Hi = Builder.CreateTrunc(Hi, I32Ty); - return std::make_pair(Lo, Hi); + return std::pair(Lo, Hi); } static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp @@ -28,24 +28,24 @@ else Offset = Op.getCImm()->getZExtValue(); - return std::make_pair(Register(), Offset); + return std::pair(Register(), Offset); } int64_t Offset; if (Def->getOpcode() == TargetOpcode::G_ADD) { // TODO: Handle G_OR used for add case if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) - return std::make_pair(Def->getOperand(1).getReg(), Offset); + return std::pair(Def->getOperand(1).getReg(), Offset); // FIXME: matcher should ignore copies if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) - return std::make_pair(Def->getOperand(1).getReg(), Offset); + return std::pair(Def->getOperand(1).getReg(), Offset); } Register Base; if (KnownBits && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) && KnownBits->maskedValueIsZero(Base, APInt(32, Offset))) - return std::make_pair(Base, Offset); + return std::pair(Base, Offset); // Handle G_PTRTOINT (G_PTR_ADD base, const) case if (Def->getOpcode() == TargetOpcode::G_PTRTOINT) { @@ -54,14 +54,14 @@ m_GPtrAdd(m_MInstr(Base), m_ICst(Offset)))) { // If Base was int converted to pointer, simply return int and offset. if (Base->getOpcode() == TargetOpcode::G_INTTOPTR) - return std::make_pair(Base->getOperand(1).getReg(), Offset); + return std::pair(Base->getOperand(1).getReg(), Offset); // Register returned here will be of pointer type. - return std::make_pair(Base->getOperand(0).getReg(), Offset); + return std::pair(Base->getOperand(0).getReg(), Offset); } } - return std::make_pair(Reg, 0); + return std::pair(Reg, 0); } bool AMDGPU::hasAtomicFaddRtnForTy(const GCNSubtarget &Subtarget, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp @@ -33,7 +33,7 @@ if (!ArgAlign) ArgAlign = DL.getABITypeAlign(Ty); - return std::make_pair(Ty, *ArgAlign); + return std::pair(Ty, *ArgAlign); } namespace llvm { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -347,7 +347,7 @@ for (auto &SUsToCandSGs : SyncInstrMap.second) { if (PipelineInstrs[PipelineIDx].size() == 0) { PipelineInstrs[PipelineIDx].push_back( - std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second)); + std::pair(SUsToCandSGs.first, SUsToCandSGs.second)); continue; } auto SortPosition = PipelineInstrs[PipelineIDx].begin(); @@ -357,8 +357,7 @@ SUsToCandSGs.first->NodeNum > SortPosition->first->NodeNum) ++SortPosition; PipelineInstrs[PipelineIDx].insert( - SortPosition, - std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second)); + SortPosition, std::pair(SUsToCandSGs.first, SUsToCandSGs.second)); } --PipelineIDx; } @@ -508,15 +507,15 @@ if (UseCostHeur) { if (Match->isFull()) { - ReadyList.push_back(std::make_pair(*I, MissPenalty)); + ReadyList.push_back(std::pair(*I, MissPenalty)); continue; } int TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges); - ReadyList.push_back(std::make_pair(*I, TempCost)); + ReadyList.push_back(std::pair(*I, TempCost)); removeEdges(AddedEdges); } else - ReadyList.push_back(std::make_pair(*I, -1)); + ReadyList.push_back(std::pair(*I, -1)); } if (UseCostHeur) { @@ -913,7 +912,7 @@ // the A->B edge impossible, otherwise it returns true; bool Added = tryAddEdge(A, B); if (Added) - AddedEdges.push_back(std::make_pair(A, B)); + AddedEdges.push_back(std::pair(A, B)); else ++MissedEdges; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1358,7 +1358,7 @@ // use constant 0 for soffset. This value must be retained until // frame elimination and eliminateFrameIndex will choose the appropriate // frame register if need be. - return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32)); + return std::pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32)); } bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -1473,7 +1473,7 @@ SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); - return std::make_pair(Lo, Hi); + return std::pair(Lo, Hi); } SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { @@ -1505,7 +1505,7 @@ HiVT = NumElts - LoNumElts == 1 ? EltVT : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts); - return std::make_pair(LoVT, HiVT); + return std::pair(LoVT, HiVT); } // Split a vector value into two parts of types LoVT and HiVT. HiVT could be @@ -1523,7 +1523,7 @@ SDValue Hi = DAG.getNode( HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL, HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL)); - return std::make_pair(Lo, Hi); + return std::pair(Lo, Hi); } SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2914,8 +2914,8 @@ // Skip out of bounds offsets, or else we would end up using an undefined // register. if (static_cast(Offset) >= SubRegs.size()) - return std::make_pair(IdxReg, SubRegs[0]); - return std::make_pair(IdxBaseReg, SubRegs[Offset]); + return std::pair(IdxReg, SubRegs[0]); + return std::pair(IdxBaseReg, SubRegs[Offset]); } bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( @@ -3526,7 +3526,7 @@ if (OpSel) Mods |= SISrcMods::OP_SEL_0; - return std::make_pair(Src, Mods); + return std::pair(Src, Mods); } Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded( @@ -3659,7 +3659,7 @@ // Packed instructions do not have abs modifiers. Mods |= SISrcMods::OP_SEL_1; - return std::make_pair(Src, Mods); + return std::pair(Src, Mods); } InstructionSelector::ComplexRendererFns @@ -3894,7 +3894,7 @@ uint64_t FlatVariant) const { MachineInstr *MI = Root.getParent(); - auto Default = std::make_pair(Root.getReg(), 0); + auto Default = std::pair(Root.getReg(), 0); if (!STI.hasFlatInstOffsets()) return Default; @@ -3910,7 +3910,7 @@ if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant)) return Default; - return std::make_pair(PtrBase, ConstOffset); + return std::pair(PtrBase, ConstOffset); } InstructionSelector::ComplexRendererFns @@ -4374,7 +4374,7 @@ AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); if (!RootDef) - return std::make_pair(Root.getReg(), 0); + return std::pair(Root.getReg(), 0); int64_t ConstAddr = 0; @@ -4386,7 +4386,7 @@ if (Offset) { if (isDSOffsetLegal(PtrBase, Offset)) { // (add n0, c0) - return std::make_pair(PtrBase, Offset); + return std::pair(PtrBase, Offset); } } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { // TODO @@ -4397,7 +4397,7 @@ } - return std::make_pair(Root.getReg(), 0); + return std::pair(Root.getReg(), 0); } InstructionSelector::ComplexRendererFns @@ -4439,7 +4439,7 @@ unsigned Size) const { const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); if (!RootDef) - return std::make_pair(Root.getReg(), 0); + return std::pair(Root.getReg(), 0); int64_t ConstAddr = 0; @@ -4453,7 +4453,7 @@ int64_t OffsetValue1 = Offset + Size; if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { // (add n0, c0) - return std::make_pair(PtrBase, OffsetValue0 / Size); + return std::pair(PtrBase, OffsetValue0 / Size); } } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { // TODO @@ -4463,7 +4463,7 @@ } - return std::make_pair(Root.getReg(), 0); + return std::pair(Root.getReg(), 0); } /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -96,8 +96,8 @@ return [=](const LegalityQuery &Query) { const LLT Ty = Query.Types[TypeIdx]; const LLT EltTy = Ty.getElementType(); - return std::make_pair(TypeIdx, - LLT::fixed_vector(Ty.getNumElements() + 1, EltTy)); + return std::pair(TypeIdx, + LLT::fixed_vector(Ty.getNumElements() + 1, EltTy)); }; } @@ -108,9 +108,8 @@ unsigned Size = Ty.getSizeInBits(); unsigned Pieces = (Size + 63) / 64; unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces; - return std::make_pair( - TypeIdx, - LLT::scalarOrVector(ElementCount::getFixed(NewNumElts), EltTy)); + return std::pair(TypeIdx, LLT::scalarOrVector( + ElementCount::getFixed(NewNumElts), EltTy)); }; } @@ -128,7 +127,7 @@ assert(EltSize < 32); const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize; - return std::make_pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltTy)); + return std::pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltTy)); }; } @@ -147,7 +146,7 @@ static LegalizeMutation bitcastToRegisterType(unsigned TypeIdx) { return [=](const LegalityQuery &Query) { const LLT Ty = Query.Types[TypeIdx]; - return std::make_pair(TypeIdx, getBitcastRegisterType(Ty)); + return std::pair(TypeIdx, getBitcastRegisterType(Ty)); }; } @@ -156,7 +155,7 @@ const LLT Ty = Query.Types[TypeIdx]; unsigned Size = Ty.getSizeInBits(); assert(Size % 32 == 0); - return std::make_pair( + return std::pair( TypeIdx, LLT::scalarOrVector(ElementCount::getFixed(Size / 32), 32)); }; } @@ -1069,36 +1068,35 @@ } getActionDefinitionsBuilder(G_INTTOPTR) - // List the common cases - .legalForCartesianProduct(AddrSpaces64, {S64}) - .legalForCartesianProduct(AddrSpaces32, {S32}) - .scalarize(0) - // Accept any address space as long as the size matches - .legalIf(sameSize(0, 1)) - .widenScalarIf(smallerThan(1, 0), - [](const LegalityQuery &Query) { - return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits())); - }) - .narrowScalarIf(largerThan(1, 0), - [](const LegalityQuery &Query) { - return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits())); + // List the common cases + .legalForCartesianProduct(AddrSpaces64, {S64}) + .legalForCartesianProduct(AddrSpaces32, {S32}) + .scalarize(0) + // Accept any address space as long as the size matches + .legalIf(sameSize(0, 1)) + .widenScalarIf(smallerThan(1, 0), + [](const LegalityQuery &Query) { + return std::pair( + 1, LLT::scalar(Query.Types[0].getSizeInBits())); + }) + .narrowScalarIf(largerThan(1, 0), [](const LegalityQuery &Query) { + return std::pair(1, LLT::scalar(Query.Types[0].getSizeInBits())); }); getActionDefinitionsBuilder(G_PTRTOINT) - // List the common cases - .legalForCartesianProduct(AddrSpaces64, {S64}) - .legalForCartesianProduct(AddrSpaces32, {S32}) - .scalarize(0) - // Accept any address space as long as the size matches - .legalIf(sameSize(0, 1)) - .widenScalarIf(smallerThan(0, 1), - [](const LegalityQuery &Query) { - return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits())); - }) - .narrowScalarIf( - largerThan(0, 1), - [](const LegalityQuery &Query) { - return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits())); + // List the common cases + .legalForCartesianProduct(AddrSpaces64, {S64}) + .legalForCartesianProduct(AddrSpaces32, {S32}) + .scalarize(0) + // Accept any address space as long as the size matches + .legalIf(sameSize(0, 1)) + .widenScalarIf(smallerThan(0, 1), + [](const LegalityQuery &Query) { + return std::pair( + 0, LLT::scalar(Query.Types[1].getSizeInBits())); + }) + .narrowScalarIf(largerThan(0, 1), [](const LegalityQuery &Query) { + return std::pair(0, LLT::scalar(Query.Types[1].getSizeInBits())); }); getActionDefinitionsBuilder(G_ADDRSPACE_CAST) @@ -1223,16 +1221,16 @@ // Split extloads. if (DstSize > MemSize) - return std::make_pair(0, LLT::scalar(MemSize)); + return std::pair(0, LLT::scalar(MemSize)); unsigned MaxSize = maxSizeForAddrSpace(ST, PtrTy.getAddressSpace(), Op == G_LOAD); if (MemSize > MaxSize) - return std::make_pair(0, LLT::scalar(MaxSize)); + return std::pair(0, LLT::scalar(MaxSize)); uint64_t Align = Query.MMODescrs[0].AlignInBits; - return std::make_pair(0, LLT::scalar(Align)); + return std::pair(0, LLT::scalar(Align)); }) .fewerElementsIf( [=](const LegalityQuery &Query) -> bool { @@ -1259,7 +1257,7 @@ unsigned EltSize = EltTy.getSizeInBits(); if (MaxSize % EltSize == 0) { - return std::make_pair( + return std::pair( 0, LLT::scalarOrVector( ElementCount::getFixed(MaxSize / EltSize), EltTy)); } @@ -1270,15 +1268,15 @@ // The scalars will need to be re-legalized. if (NumPieces == 1 || NumPieces >= NumElts || NumElts % NumPieces != 0) - return std::make_pair(0, EltTy); + return std::pair(0, EltTy); - return std::make_pair( - 0, LLT::fixed_vector(NumElts / NumPieces, EltTy)); + return std::pair(0, + LLT::fixed_vector(NumElts / NumPieces, EltTy)); } // FIXME: We could probably handle weird extending loads better. if (DstTy.getSizeInBits() > MemSize) - return std::make_pair(0, EltTy); + return std::pair(0, EltTy); unsigned EltSize = EltTy.getSizeInBits(); unsigned DstSize = DstTy.getSizeInBits(); @@ -1287,13 +1285,13 @@ // to the widest type. TODO: Account for alignment. As-is it // should be OK, since the new parts will be further legalized. unsigned FloorSize = PowerOf2Floor(DstSize); - return std::make_pair( + return std::pair( 0, LLT::scalarOrVector( ElementCount::getFixed(FloorSize / EltSize), EltTy)); } // May need relegalization for the scalars. - return std::make_pair(0, EltTy); + return std::pair(0, EltTy); }) .minScalar(0, S32) .narrowScalarIf(isWideScalarExtLoadTruncStore(0), changeTo(0, S32)) @@ -1472,7 +1470,7 @@ const unsigned VecSize = VecTy.getSizeInBits(); const unsigned TargetEltSize = DstEltSize % 64 == 0 ? 64 : 32; - return std::make_pair( + return std::pair( VecTypeIdx, LLT::fixed_vector(VecSize / TargetEltSize, TargetEltSize)); }) @@ -1638,7 +1636,7 @@ if (RoundedTo < NewSizeInBits) NewSizeInBits = RoundedTo; } - return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); + return std::pair(BigTyIdx, LLT::scalar(NewSizeInBits)); }) // Any vectors left are the wrong size. Scalarize them. .scalarize(0) @@ -4274,7 +4272,7 @@ if (!BaseReg) BaseReg = B.buildConstant(S32, 0).getReg(0); - return std::make_pair(BaseReg, ImmOffset); + return std::pair(BaseReg, ImmOffset); } /// Update \p MMO based on the offset inputs to a raw/struct buffer intrinsic. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp @@ -1776,7 +1776,7 @@ for (MachineBasicBlock *Pred : StartMBB->predecessors()) if (Pred != EndMBB) - Succs.insert(std::make_pair(Pred, StartMBB)); + Succs.insert(std::pair(Pred, StartMBB)); for (auto SI : Succs) { std::pair Edge = SI; @@ -2072,8 +2072,7 @@ MachineBasicBlock *SourceMBB = Source.second; MachineOperand *Def = &(*(MRI->def_begin(SourceReg))); if (Def->getParent()->getParent() != MBB) { - ElimiatedSources.push_back( - std::make_tuple(DestReg, SourceReg, SourceMBB)); + ElimiatedSources.push_back(std::tuple(DestReg, SourceReg, SourceMBB)); } } } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp @@ -51,7 +51,7 @@ unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL, const GlobalVariable &GV, Align Trailing) { - auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0)); + auto Entry = LocalMemoryObjects.insert(std::pair(&GV, 0)); if (!Entry.second) return Entry.first->second; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -221,7 +221,7 @@ ST.makeLIDRangeMetadata(LocalSizeY); ST.makeLIDRangeMetadata(LocalSizeZ); - return std::make_pair(LocalSizeY, LocalSizeZ); + return std::pair(LocalSizeY, LocalSizeZ); } // We must read the size out of the dispatch pointer. @@ -290,7 +290,7 @@ // Extract y component. Upper half of LoadZU should be zero already. Value *Y = Builder.CreateLShr(LoadXY, 16); - return std::make_pair(Y, LoadZU); + return std::pair(Y, LoadZU); } Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp @@ -285,7 +285,7 @@ NewRoots.insert(NewF); } - ToReplace.push_back(std::make_pair(CI, NewF)); + ToReplace.push_back(std::pair(CI, NewF)); Replaced.insert(&F); Changed = true; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -914,7 +914,7 @@ Op.setReg(CurrentLaneReg); // Make sure we don't re-process this register again. - WaterfalledRegMap.insert(std::make_pair(OldReg, Op.getReg())); + WaterfalledRegMap.insert(std::pair(OldReg, Op.getReg())); } } @@ -1733,17 +1733,17 @@ if (ExtOpcode == TargetOpcode::G_SEXT) { auto ExtLo = B.buildSExtInReg(S32, Bitcast, 16); auto ShiftHi = B.buildAShr(S32, Bitcast, B.buildConstant(S32, 16)); - return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0)); + return std::pair(ExtLo.getReg(0), ShiftHi.getReg(0)); } auto ShiftHi = B.buildLShr(S32, Bitcast, B.buildConstant(S32, 16)); if (ExtOpcode == TargetOpcode::G_ZEXT) { auto ExtLo = B.buildAnd(S32, Bitcast, B.buildConstant(S32, 0xffff)); - return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0)); + return std::pair(ExtLo.getReg(0), ShiftHi.getReg(0)); } assert(ExtOpcode == TargetOpcode::G_ANYEXT); - return std::make_pair(Bitcast.getReg(0), ShiftHi.getReg(0)); + return std::pair(Bitcast.getReg(0), ShiftHi.getReg(0)); } // For cases where only a single copy is inserted for matching register banks. @@ -1789,14 +1789,14 @@ getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) { int64_t Const; if (mi_match(Reg, MRI, m_ICst(Const))) - return std::make_pair(Register(), Const); + return std::pair(Register(), Const); Register Base; if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const)))) - return std::make_pair(Base, Const); + return std::pair(Base, Const); // TODO: Handle G_OR used for add case - return std::make_pair(Reg, 0); + return std::pair(Reg, 0); } std::pair diff --git a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp @@ -171,7 +171,7 @@ // Insert new global LDS pointer which points to LDS. GlobalVariable *createLDSPointer(GlobalVariable *GV) { // LDS pointer which points to LDS is already created? Return it. - auto PointerEntry = LDSToPointer.insert(std::make_pair(GV, nullptr)); + auto PointerEntry = LDSToPointer.insert(std::pair(GV, nullptr)); if (!PointerEntry.second) return PointerEntry.first->second; @@ -199,7 +199,7 @@ BasicBlock *activateLaneZero(Function *K) { // If the entry basic block of kernel K is already split, then return // newly created basic block. - auto BasicBlockEntry = KernelToInitBB.insert(std::make_pair(K, nullptr)); + auto BasicBlockEntry = KernelToInitBB.insert(std::pair(K, nullptr)); if (!BasicBlockEntry.second) return BasicBlockEntry.first->second; @@ -227,7 +227,7 @@ GlobalVariable *LDSPointer) { // If LDS pointer is already initialized within K, then nothing to do. auto PointerEntry = KernelToLDSPointers.insert( - std::make_pair(K, SmallPtrSet())); + std::pair(K, SmallPtrSet())); if (!PointerEntry.second) if (PointerEntry.first->second.contains(LDSPointer)) return; @@ -297,10 +297,10 @@ // If the instruction which replaces LDS within F is already created, then // return it. auto LDSEntry = FunctionToLDSToReplaceInst.insert( - std::make_pair(F, DenseMap())); + std::pair(F, DenseMap())); if (!LDSEntry.second) { auto ReplaceInstEntry = - LDSEntry.first->second.insert(std::make_pair(GV, nullptr)); + LDSEntry.first->second.insert(std::pair(GV, nullptr)); if (!ReplaceInstEntry.second) return ReplaceInstEntry.first->second; } @@ -559,7 +559,7 @@ } } - FunctionToInsts.insert(std::make_pair(F, SmallPtrSet())); + FunctionToInsts.insert(std::pair(F, SmallPtrSet())); FunctionToInsts[F].insert(I); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp @@ -126,8 +126,8 @@ MachineFunction *MF = MMI.getMachineFunction(*F); assert(MF && "function must have been generated already"); - auto CI = CallGraphResourceInfo.insert( - std::make_pair(F, SIFunctionResourceInfo())); + auto CI = + CallGraphResourceInfo.insert(std::pair(F, SIFunctionResourceInfo())); SIFunctionResourceInfo &Info = CI.first->second; assert(CI.second && "should only be called once per function"); Info = analyzeResourceUsage(*MF, TM); @@ -142,8 +142,8 @@ if (!F || F->isDeclaration()) continue; - auto CI = CallGraphResourceInfo.insert( - std::make_pair(F, SIFunctionResourceInfo())); + auto CI = + CallGraphResourceInfo.insert(std::pair(F, SIFunctionResourceInfo())); if (!CI.second) // Skip already visited functions continue; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp @@ -28,7 +28,7 @@ /// into something like this: /// /// std::pair foo(int a, int b) { -/// return std::make_pair(a + b, bar()); +/// return std::pair(a + b, bar()); /// } /// /// Typically the incoming pointer is a simple alloca for a temporary variable diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -367,9 +367,9 @@ case CallingConv::AMDGPU_ES: case CallingConv::AMDGPU_GS: case CallingConv::AMDGPU_PS: - return std::make_pair(1, getWavefrontSize()); + return std::pair(1, getWavefrontSize()); default: - return std::make_pair(1u, getMaxFlatWorkGroupSize()); + return std::pair(1u, getMaxFlatWorkGroupSize()); } } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -761,13 +761,13 @@ if (auto *II = dyn_cast(V)) { switch (II->getIntrinsicID()) { case Intrinsic::amdgcn_is_shared: - return std::make_pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS); + return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS); case Intrinsic::amdgcn_is_private: - return std::make_pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS); + return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS); default: break; } - return std::make_pair(nullptr, -1); + return std::pair(nullptr, -1); } // Check the global pointer predication based on // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and @@ -778,9 +778,9 @@ m_c_And(m_Not(m_Intrinsic(m_Value(Ptr))), m_Not(m_Intrinsic( m_Deferred(Ptr)))))) - return std::make_pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS); + return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS); - return std::make_pair(nullptr, -1); + return std::pair(nullptr, -1); } unsigned diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -5492,10 +5492,10 @@ const char *AssemblerDirectiveEnd; std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) = isHsaAbiVersion3AndAbove(&getSTI()) - ? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin, - HSAMD::V3::AssemblerDirectiveEnd) - : std::make_tuple(HSAMD::AssemblerDirectiveBegin, - HSAMD::AssemblerDirectiveEnd); + ? std::tuple(HSAMD::V3::AssemblerDirectiveBegin, + HSAMD::V3::AssemblerDirectiveEnd) + : std::tuple(HSAMD::AssemblerDirectiveBegin, + HSAMD::AssemblerDirectiveEnd); if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) { return Error(getLoc(), diff --git a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp --- a/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp +++ b/llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp @@ -259,10 +259,10 @@ default: continue; case NSA_Status::CONTIGUOUS: - Candidates.push_back(std::make_pair(&MI, true)); + Candidates.push_back(std::pair(&MI, true)); break; case NSA_Status::NON_CONTIGUOUS: - Candidates.push_back(std::make_pair(&MI, false)); + Candidates.push_back(std::pair(&MI, false)); ++NumNSAInstructions; break; } diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -468,7 +468,7 @@ void GCNScheduleDAGMILive::schedule() { // Collect all scheduling regions. The actual scheduling is performed in // GCNScheduleDAGMILive::finalizeSchedule. - Regions.push_back(std::make_pair(RegionBegin, RegionEnd)); + Regions.push_back(std::pair(RegionBegin, RegionEnd)); } GCNRegPressure @@ -841,7 +841,7 @@ } void GCNSchedStage::finalizeGCNRegion() { - DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd); + DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd); DAG.RescheduleRegions[RegionIdx] = false; if (S.HasHighPressure) DAG.RegionsWithHighRP[RegionIdx] = true; @@ -1065,7 +1065,7 @@ // RegionBegin and RegionEnd if needed. DAG.placeDebugValues(); - DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd); + DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd); } void PreRARematStage::collectRematerializableInstructions() { @@ -1326,22 +1326,21 @@ // MI is in a region with size 1, after removing, the region will be // size 0, set RegionBegin and RegionEnd to pass end of block iterator. RegionBoundaries[I] = - std::make_pair(MI->getParent()->end(), MI->getParent()->end()); + std::pair(MI->getParent()->end(), MI->getParent()->end()); return; } if (MI == RegionBoundaries[I].first) { if (Removing) RegionBoundaries[I] = - std::make_pair(std::next(MI), RegionBoundaries[I].second); + std::pair(std::next(MI), RegionBoundaries[I].second); else // Inserted NewMI in front of region, set new RegionBegin to NewMI - RegionBoundaries[I] = std::make_pair(MachineBasicBlock::iterator(NewMI), - RegionBoundaries[I].second); + RegionBoundaries[I] = std::pair(MachineBasicBlock::iterator(NewMI), + RegionBoundaries[I].second); return; } if (Removing && MI == RegionBoundaries[I].second) { - RegionBoundaries[I] = - std::make_pair(RegionBoundaries[I].first, std::prev(MI)); + RegionBoundaries[I] = std::pair(RegionBoundaries[I].first, std::prev(MI)); return; } } diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp @@ -275,7 +275,7 @@ if (Reg == R600::ALU_CONST) { MachineOperand &Sel = MI.getOperand(getOperandIdx(MI.getOpcode(), Op[1])); - Result.push_back(std::make_pair(&MO, Sel.getImm())); + Result.push_back(std::pair(&MO, Sel.getImm())); continue; } } @@ -296,19 +296,19 @@ Register Reg = MO.getReg(); if (Reg == R600::ALU_CONST) { MachineOperand &Sel = MI.getOperand(getOperandIdx(MI.getOpcode(), Op[1])); - Result.push_back(std::make_pair(&MO, Sel.getImm())); + Result.push_back(std::pair(&MO, Sel.getImm())); continue; } if (Reg == R600::ALU_LITERAL_X) { MachineOperand &Operand = MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal)); if (Operand.isImm()) { - Result.push_back(std::make_pair(&MO, Operand.getImm())); + Result.push_back(std::pair(&MO, Operand.getImm())); continue; } assert(Operand.isGlobal()); } - Result.push_back(std::make_pair(&MO, 0)); + Result.push_back(std::pair(&MO, 0)); } return Result; } @@ -326,11 +326,11 @@ Register Reg = Src.first->getReg(); int Index = RI.getEncodingValue(Reg) & 0xff; if (Reg == R600::OQAP) { - Result.push_back(std::make_pair(Index, 0U)); + Result.push_back(std::pair(Index, 0U)); } if (PV.find(Reg) != PV.end()) { // 255 is used to tells its a PS/PV reg - Result.push_back(std::make_pair(255, 0U)); + Result.push_back(std::pair(255, 0U)); continue; } if (Index > 127) { @@ -339,7 +339,7 @@ continue; } unsigned Chan = RI.getHWRegChan(Reg); - Result.push_back(std::make_pair(Index, Chan)); + Result.push_back(std::pair(Index, Chan)); } for (; i < 3; ++i) Result.push_back(DummyPair); diff --git a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp --- a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp +++ b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp @@ -283,7 +283,7 @@ Modified = true; } if (!Modified) { - return std::make_tuple(nullptr, nullptr); + return std::tuple(nullptr, nullptr); } // Create function with new signature and clone the old body into it. @@ -311,7 +311,7 @@ KernelMDArgs.push_back(MDNode::get(*Context, MDV)); MDNode *NewMDNode = MDNode::get(*Context, KernelMDArgs); - return std::make_tuple(NewF, NewMDNode); + return std::tuple(NewF, NewMDNode); } bool transformKernels(Module &M) { diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp --- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp @@ -162,7 +162,7 @@ /// Push a BB and saved value to the control flow stack void SIAnnotateControlFlow::push(BasicBlock *BB, Value *Saved) { - Stack.push_back(std::make_pair(BB, Saved)); + Stack.push_back(std::pair(BB, Saved)); } /// Can the condition represented by this PHI node treated like diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -196,7 +196,7 @@ ? MRI.getRegClass(DstReg) : TRI.getPhysRegClass(DstReg); - return std::make_pair(SrcRC, DstRC); + return std::pair(SrcRC, DstRC); } static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, @@ -949,8 +949,8 @@ // the COPY has already been MoveToVALUed continue; - SrcRegs.insert(std::make_pair(SiblingCopy->getOperand(1).getReg(), - SiblingCopy->getOperand(1).getSubReg())); + SrcRegs.insert(std::pair(SiblingCopy->getOperand(1).getReg(), + SiblingCopy->getOperand(1).getSubReg())); } } Info->SiblingPenalty = SrcRegs.size(); diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1457,7 +1457,7 @@ ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64 || Op == AMDGPU::V_MUL_F16_t16_e64) && MFI->getMode().FP64FP16OutputDenormals)) - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); const MachineOperand *RegOp = nullptr; const MachineOperand *ImmOp = nullptr; @@ -1470,7 +1470,7 @@ ImmOp = Src1; RegOp = Src0; } else - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); int OMod = getOModValue(Op, ImmOp->getImm()); if (OMod == SIOutMods::NONE || @@ -1478,9 +1478,9 @@ TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); - return std::make_pair(RegOp, OMod); + return std::pair(RegOp, OMod); } case AMDGPU::V_ADD_F64_e64: case AMDGPU::V_ADD_F32_e64: @@ -1491,7 +1491,7 @@ ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64 || Op == AMDGPU::V_ADD_F16_t16_e64) && MFI->getMode().FP64FP16OutputDenormals)) - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); @@ -1503,12 +1503,12 @@ !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) - return std::make_pair(Src0, SIOutMods::MUL2); + return std::pair(Src0, SIOutMods::MUL2); - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); } default: - return std::make_pair(nullptr, SIOutMods::NONE); + return std::pair(nullptr, SIOutMods::NONE); } } diff --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp --- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp +++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp @@ -232,7 +232,7 @@ auto Loc = Map.find(Reg); unsigned State = getMopState(MO); if (Loc == Map.end()) { - Map[Reg] = std::make_pair(State, Mask); + Map[Reg] = std::pair(State, Mask); } else { Loc->second.first |= State; Loc->second.second |= Mask; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -3240,7 +3240,7 @@ } if (VA.isRegLoc()) { - RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + RegsToPass.push_back(std::pair(VA.getLocReg(), Arg)); } else { assert(VA.isMemLoc()); @@ -3551,7 +3551,7 @@ MBB.addSuccessor(LoopBB); - return std::make_pair(LoopBB, RemainderBB); + return std::pair(LoopBB, RemainderBB); } /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. @@ -3769,9 +3769,9 @@ // Skip out of bounds offsets, or else we would end up using an undefined // register. if (Offset >= NumElts || Offset < 0) - return std::make_pair(AMDGPU::sub0, Offset); + return std::pair(AMDGPU::sub0, Offset); - return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); + return std::pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); } static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, @@ -4668,8 +4668,8 @@ SDValue Lo0, Hi0; SDValue Op0 = Op.getOperand(0); std::tie(Lo0, Hi0) = Op0.getValueType().isVector() - ? DAG.SplitVectorOperand(Op.getNode(), 0) - : std::make_pair(Op0, Op0); + ? DAG.SplitVectorOperand(Op.getNode(), 0) + : std::pair(Op0, Op0); SDValue Lo1, Hi1; std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); SDValue Lo2, Hi2; @@ -12262,7 +12262,7 @@ default: RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); if (!RC) - return std::make_pair(0U, nullptr); + return std::pair(0U, nullptr); break; } break; @@ -12274,7 +12274,7 @@ default: RC = TRI->getVGPRClassForBitWidth(BitWidth); if (!RC) - return std::make_pair(0U, nullptr); + return std::pair(0U, nullptr); break; } break; @@ -12288,7 +12288,7 @@ default: RC = TRI->getAGPRClassForBitWidth(BitWidth); if (!RC) - return std::make_pair(0U, nullptr); + return std::pair(0U, nullptr); break; } break; @@ -12297,7 +12297,7 @@ // even if they are not reported as legal if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) - return std::make_pair(0U, RC); + return std::pair(0U, RC); } if (Constraint.startswith("{") && Constraint.endswith("}")) { @@ -12329,13 +12329,13 @@ RC = TRI->getAGPRClassForBitWidth(Width); if (RC) { Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC); - return std::make_pair(Reg, RC); + return std::pair(Reg, RC); } } } else { bool Failed = RegName.getAsInteger(10, Idx); if (!Failed && Idx < RC->getNumRegs()) - return std::make_pair(RC->getRegister(Idx), RC); + return std::pair(RC->getRegister(Idx), RC); } } } diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1621,7 +1621,7 @@ // there cannot be a vector store to the same memory location. if (!Memop->isInvariant()) { const Value *Ptr = Memop->getValue(); - SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent())); + SLoadAddresses.insert(std::pair(Ptr, Inst.getParent())); } } if (ST->hasReadVCCZBug()) { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2278,7 +2278,7 @@ AMDGPU::isLegal64BitDPPControl( getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) { MI.setDesc(get(AMDGPU::V_MOV_B64_dpp)); - return std::make_pair(&MI, nullptr); + return std::pair(&MI, nullptr); } MachineBasicBlock &MBB = *MI.getParent(); @@ -2331,7 +2331,7 @@ .addImm(AMDGPU::sub1); MI.eraseFromParent(); - return std::make_pair(Split[0], Split[1]); + return std::pair(Split[0], Split[1]); } bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, @@ -5815,7 +5815,7 @@ .addReg(SRsrcFormatHi) .addImm(AMDGPU::sub3); - return std::make_tuple(RsrcPtr, NewSRsrc); + return std::tuple(RsrcPtr, NewSRsrc); } MachineBasicBlock * @@ -6591,10 +6591,10 @@ MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); - return std::make_pair(true, NewBB); + return std::pair(true, NewBB); } - return std::make_pair(false, nullptr); + return std::pair(false, nullptr); } void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, @@ -7775,7 +7775,7 @@ std::pair SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { - return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); + return std::pair(TF & MO_MASK, TF & ~MO_MASK); } ArrayRef> diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -1819,7 +1819,7 @@ Idx1 = Idxs[CI.Width][Paired.Width - 1]; } - return std::make_pair(Idx0, Idx1); + return std::pair(Idx0, Idx1); } const TargetRegisterClass * @@ -2166,7 +2166,7 @@ MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg) continue; - InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset)); + InstsWCommonBase.push_back(std::pair(&MINext, MAddrNext.Offset)); int64_t Dist = MAddr.Offset - MAddrNext.Offset; TargetLoweringBase::AddrMode AM; @@ -2316,7 +2316,7 @@ ++I; } - return std::make_pair(BlockI, Modified); + return std::pair(BlockI, Modified); } // Scan through looking for adjacent LDS operations with constant offsets from diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -548,7 +548,7 @@ } if (Succ->isHighLatencyBlock()) ++NumHighLatencySuccessors; - Succs.push_back(std::make_pair(Succ, Kind)); + Succs.push_back(std::pair(Succ, Kind)); assert(none_of(Preds, [=](SIScheduleBlock *P) { return SuccID == P->getID(); }) && diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -626,45 +626,33 @@ SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const { if (SSID == SyncScope::System) - return std::make_tuple(SIAtomicScope::SYSTEM, - SIAtomicAddrSpace::ATOMIC, - true); + return std::tuple(SIAtomicScope::SYSTEM, SIAtomicAddrSpace::ATOMIC, true); if (SSID == MMI->getAgentSSID()) - return std::make_tuple(SIAtomicScope::AGENT, - SIAtomicAddrSpace::ATOMIC, - true); + return std::tuple(SIAtomicScope::AGENT, SIAtomicAddrSpace::ATOMIC, true); if (SSID == MMI->getWorkgroupSSID()) - return std::make_tuple(SIAtomicScope::WORKGROUP, - SIAtomicAddrSpace::ATOMIC, - true); + return std::tuple(SIAtomicScope::WORKGROUP, SIAtomicAddrSpace::ATOMIC, + true); if (SSID == MMI->getWavefrontSSID()) - return std::make_tuple(SIAtomicScope::WAVEFRONT, - SIAtomicAddrSpace::ATOMIC, - true); + return std::tuple(SIAtomicScope::WAVEFRONT, SIAtomicAddrSpace::ATOMIC, + true); if (SSID == SyncScope::SingleThread) - return std::make_tuple(SIAtomicScope::SINGLETHREAD, - SIAtomicAddrSpace::ATOMIC, - true); + return std::tuple(SIAtomicScope::SINGLETHREAD, SIAtomicAddrSpace::ATOMIC, + true); if (SSID == MMI->getSystemOneAddressSpaceSSID()) - return std::make_tuple(SIAtomicScope::SYSTEM, - SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, - false); + return std::tuple(SIAtomicScope::SYSTEM, + SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false); if (SSID == MMI->getAgentOneAddressSpaceSSID()) - return std::make_tuple(SIAtomicScope::AGENT, - SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, - false); + return std::tuple(SIAtomicScope::AGENT, + SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false); if (SSID == MMI->getWorkgroupOneAddressSpaceSSID()) - return std::make_tuple(SIAtomicScope::WORKGROUP, - SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, - false); + return std::tuple(SIAtomicScope::WORKGROUP, + SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false); if (SSID == MMI->getWavefrontOneAddressSpaceSSID()) - return std::make_tuple(SIAtomicScope::WAVEFRONT, - SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, - false); + return std::tuple(SIAtomicScope::WAVEFRONT, + SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false); if (SSID == MMI->getSingleThreadOneAddressSpaceSSID()) - return std::make_tuple(SIAtomicScope::SINGLETHREAD, - SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, - false); + return std::tuple(SIAtomicScope::SINGLETHREAD, + SIAtomicAddrSpace::ATOMIC & InstrAddrSpace, false); return std::nullopt; } diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -712,7 +712,7 @@ if (!Op2Def) return CheckRetType(std::nullopt); - return CheckRetType(std::make_pair(Op1Def, Op2Def)); + return CheckRetType(std::pair(Op1Def, Op2Def)); }; MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0); diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp --- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -107,10 +107,8 @@ static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) { static const std::pair Mapping[] = { - std::make_pair(StateWQM, "WQM"), - std::make_pair(StateStrictWWM, "StrictWWM"), - std::make_pair(StateStrictWQM, "StrictWQM"), - std::make_pair(StateExact, "Exact")}; + std::pair(StateWQM, "WQM"), std::pair(StateStrictWWM, "StrictWWM"), + std::pair(StateStrictWQM, "StrictWQM"), std::pair(StateExact, "Exact")}; char State = PS.State; for (auto M : Mapping) { if (State & M.first) { diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp --- a/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDKernelCodeTUtils.cpp @@ -47,8 +47,8 @@ StringMap map; assert(names.size() == altNames.size()); for (unsigned i = 0; i < names.size(); ++i) { - map.insert(std::make_pair(names[i], i)); - map.insert(std::make_pair(altNames[i], i)); + map.insert(std::pair(names[i], i)); + map.insert(std::pair(altNames[i], i)); } return map; }