diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -2343,6 +2343,11 @@ TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); } + void setTargetDAGCombine(ArrayRef NTs) { + for (auto NT : NTs) + setTargetDAGCombine(NT); + } + /// Set the target's minimum function alignment. void setMinFunctionAlignment(Align Alignment) { MinFunctionAlignment = Alignment; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -847,50 +847,33 @@ // Vector add and sub nodes may conceal a high-half opportunity. // Also, try to fold ADD into CSINC/CSINV.. - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::ABS); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::XOR); - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::UINT_TO_FP); - - setTargetDAGCombine(ISD::FP_TO_SINT); - setTargetDAGCombine(ISD::FP_TO_UINT); - setTargetDAGCombine(ISD::FP_TO_SINT_SAT); - setTargetDAGCombine(ISD::FP_TO_UINT_SAT); - setTargetDAGCombine(ISD::FDIV); + setTargetDAGCombine({ISD::ADD, ISD::ABS, ISD::SUB, ISD::XOR, ISD::SINT_TO_FP, + ISD::UINT_TO_FP}); + + setTargetDAGCombine({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT, + ISD::FP_TO_UINT_SAT, ISD::FDIV}); // Try and combine setcc with csel setTargetDAGCombine(ISD::SETCC); setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); - setTargetDAGCombine(ISD::ANY_EXTEND); - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::VECTOR_SPLICE); - setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::CONCAT_VECTORS); - setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); - setTargetDAGCombine(ISD::INSERT_SUBVECTOR); - setTargetDAGCombine(ISD::STORE); + setTargetDAGCombine({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND, + ISD::VECTOR_SPLICE, ISD::SIGN_EXTEND_INREG, + ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR, + ISD::INSERT_SUBVECTOR, ISD::STORE}); if (Subtarget->supportsAddressTopByteIgnored()) setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::SELECT); - setTargetDAGCombine(ISD::VSELECT); + setTargetDAGCombine({ISD::SELECT, ISD::VSELECT}); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::VECREDUCE_ADD); - setTargetDAGCombine(ISD::STEP_VECTOR); + setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::VECREDUCE_ADD, ISD::STEP_VECTOR}); - setTargetDAGCombine(ISD::MGATHER); - setTargetDAGCombine(ISD::MSCATTER); + setTargetDAGCombine({ISD::MGATHER, ISD::MSCATTER}); setTargetDAGCombine(ISD::FP_EXTEND); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -591,26 +591,16 @@ if (AMDGPUBypassSlowDiv) addBypassSlowDiv(64, 32); - setTargetDAGCombine(ISD::BITCAST); - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::TRUNCATE); - setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::SMUL_LOHI); - setTargetDAGCombine(ISD::UMUL_LOHI); - setTargetDAGCombine(ISD::MULHU); - setTargetDAGCombine(ISD::MULHS); - setTargetDAGCombine(ISD::SELECT); - setTargetDAGCombine(ISD::SELECT_CC); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::FADD); - setTargetDAGCombine(ISD::FSUB); - setTargetDAGCombine(ISD::FNEG); - setTargetDAGCombine(ISD::FABS); - setTargetDAGCombine(ISD::AssertZext); - setTargetDAGCombine(ISD::AssertSext); - setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); + setTargetDAGCombine({ISD::BITCAST, ISD::SHL, + ISD::SRA, ISD::SRL, + ISD::TRUNCATE, ISD::MUL, + ISD::SMUL_LOHI, ISD::UMUL_LOHI, + ISD::MULHU, ISD::MULHS, + ISD::SELECT, ISD::SELECT_CC, + ISD::STORE, ISD::FADD, + ISD::FSUB, ISD::FNEG, + ISD::FABS, ISD::AssertZext, + ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN}); } bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const { diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -247,12 +247,8 @@ setSchedulingPreference(Sched::Source); - setTargetDAGCombine(ISD::FP_ROUND); - setTargetDAGCombine(ISD::FP_TO_SINT); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::SELECT_CC); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - setTargetDAGCombine(ISD::LOAD); + setTargetDAGCombine({ISD::FP_ROUND, ISD::FP_TO_SINT, ISD::EXTRACT_VECTOR_ELT, + ISD::SELECT_CC, ISD::INSERT_VECTOR_ELT, ISD::LOAD}); } static inline bool isEOP(MachineBasicBlock::iterator I) { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -888,56 +888,56 @@ setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::ADDCARRY); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::SUBCARRY); - setTargetDAGCombine(ISD::FADD); - setTargetDAGCombine(ISD::FSUB); - setTargetDAGCombine(ISD::FMINNUM); - setTargetDAGCombine(ISD::FMAXNUM); - setTargetDAGCombine(ISD::FMINNUM_IEEE); - setTargetDAGCombine(ISD::FMAXNUM_IEEE); - setTargetDAGCombine(ISD::FMA); - setTargetDAGCombine(ISD::SMIN); - setTargetDAGCombine(ISD::SMAX); - setTargetDAGCombine(ISD::UMIN); - setTargetDAGCombine(ISD::UMAX); - setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::XOR); - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::UINT_TO_FP); - setTargetDAGCombine(ISD::FCANONICALIZE); - setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); + setTargetDAGCombine({ISD::ADD, + ISD::ADDCARRY, + ISD::SUB, + ISD::SUBCARRY, + ISD::FADD, + ISD::FSUB, + ISD::FMINNUM, + ISD::FMAXNUM, + ISD::FMINNUM_IEEE, + ISD::FMAXNUM_IEEE, + ISD::FMA, + ISD::SMIN, + ISD::SMAX, + ISD::UMIN, + ISD::UMAX, + ISD::SETCC, + ISD::AND, + ISD::OR, + ISD::XOR, + ISD::SINT_TO_FP, + ISD::UINT_TO_FP, + ISD::FCANONICALIZE, + ISD::SCALAR_TO_VECTOR, + ISD::ZERO_EXTEND, + ISD::SIGN_EXTEND_INREG, + ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT}); // All memory operations. Some folding on the pointer operand is done to help // matching the constant offsets in the addressing modes. - setTargetDAGCombine(ISD::LOAD); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::ATOMIC_LOAD); - setTargetDAGCombine(ISD::ATOMIC_STORE); - setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); - setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); - setTargetDAGCombine(ISD::ATOMIC_SWAP); - setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); - setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); - setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); - setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); - setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); - setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); - setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); - setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); - setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); - setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); - setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); + setTargetDAGCombine({ISD::LOAD, + ISD::STORE, + ISD::ATOMIC_LOAD, + ISD::ATOMIC_STORE, + ISD::ATOMIC_CMP_SWAP, + ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, + ISD::ATOMIC_SWAP, + ISD::ATOMIC_LOAD_ADD, + ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_AND, + ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, + ISD::ATOMIC_LOAD_NAND, + ISD::ATOMIC_LOAD_MIN, + ISD::ATOMIC_LOAD_MAX, + ISD::ATOMIC_LOAD_UMIN, + ISD::ATOMIC_LOAD_UMAX, + ISD::ATOMIC_LOAD_FADD, + ISD::INTRINSIC_VOID, + ISD::INTRINSIC_W_CHAIN}); // FIXME: In other contexts we pretend this is a per-function property. setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -814,8 +814,7 @@ // Combine low-overhead loop intrinsics so that we can lower i1 types. if (Subtarget->hasLOB()) { - setTargetDAGCombine(ISD::BRCOND); - setTargetDAGCombine(ISD::BR_CC); + setTargetDAGCombine({ISD::BRCOND, ISD::BR_CC}); } if (Subtarget->hasNEON()) { @@ -987,13 +986,8 @@ setOperationAction(ISD::FMA, MVT::v4f32, Expand); } - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::FP_TO_SINT); - setTargetDAGCombine(ISD::FP_TO_UINT); - setTargetDAGCombine(ISD::FDIV); - setTargetDAGCombine(ISD::LOAD); + setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, + ISD::FP_TO_UINT, ISD::FDIV, ISD::LOAD}); // It is legal to extload from v4i8 to v4i16 or v4i32. for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, @@ -1007,32 +1001,17 @@ } if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { - setTargetDAGCombine(ISD::BUILD_VECTOR); - setTargetDAGCombine(ISD::VECTOR_SHUFFLE); - setTargetDAGCombine(ISD::INSERT_SUBVECTOR); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::ANY_EXTEND); - setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::VECREDUCE_ADD); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::BITCAST); + setTargetDAGCombine( + {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::INSERT_SUBVECTOR, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::SIGN_EXTEND_INREG, ISD::STORE, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, + ISD::ANY_EXTEND, ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, + ISD::INTRINSIC_VOID, ISD::VECREDUCE_ADD, ISD::ADD, ISD::BITCAST}); } if (Subtarget->hasMVEIntegerOps()) { - setTargetDAGCombine(ISD::SMIN); - setTargetDAGCombine(ISD::UMIN); - setTargetDAGCombine(ISD::SMAX); - setTargetDAGCombine(ISD::UMAX); - setTargetDAGCombine(ISD::FP_EXTEND); - setTargetDAGCombine(ISD::SELECT); - setTargetDAGCombine(ISD::SELECT_CC); - setTargetDAGCombine(ISD::SETCC); + setTargetDAGCombine({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, + ISD::FP_EXTEND, ISD::SELECT, ISD::SELECT_CC, + ISD::SETCC}); } if (Subtarget->hasMVEFloatOps()) { setTargetDAGCombine(ISD::FADD); @@ -1573,12 +1552,8 @@ // We have target-specific dag combine patterns for the following nodes: // ARMISD::VMOVRRD - No need to call setTargetDAGCombine - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::XOR); + setTargetDAGCombine( + {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}); if (Subtarget->hasMVEIntegerOps()) setTargetDAGCombine(ISD::VSELECT); @@ -1590,8 +1565,7 @@ // Attempt to lower smin/smax to ssat/usat if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) { - setTargetDAGCombine(ISD::SMIN); - setTargetDAGCombine(ISD::SMAX); + setTargetDAGCombine({ISD::SMIN, ISD::SMAX}); } setStackPointerRegisterToSaveRestore(ARM::SP); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -386,8 +386,7 @@ } } - setTargetDAGCombine(ISD::SPLAT_VECTOR); - setTargetDAGCombine(ISD::VSELECT); + setTargetDAGCombine({ISD::SPLAT_VECTOR, ISD::VSELECT}); } unsigned diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -138,11 +138,7 @@ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); } - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::XOR); + setTargetDAGCombine({ISD::ADD, ISD::SUB, ISD::AND, ISD::OR, ISD::XOR}); // Function alignments setMinFunctionAlignment(Align(4)); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -482,15 +482,8 @@ setOperationAction(ISD::TRAP, MVT::Other, Legal); - setTargetDAGCombine(ISD::SDIVREM); - setTargetDAGCombine(ISD::UDIVREM); - setTargetDAGCombine(ISD::SELECT); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::AssertZext); - setTargetDAGCombine(ISD::SHL); + setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND, + ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL}); if (ABI.IsO32()) { // These libcalls are not available in 32-bit. diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -99,11 +99,8 @@ setOperationAction(ISD::BITCAST, VecTy, Legal); } - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::VSELECT); + setTargetDAGCombine( + {ISD::SHL, ISD::SRA, ISD::SRL, ISD::SETCC, ISD::VSELECT}); if (Subtarget.hasMips32r2()) { setOperationAction(ISD::ADDC, MVT::i32, Legal); @@ -161,11 +158,7 @@ setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::VSELECT); - setTargetDAGCombine(ISD::XOR); + setTargetDAGCombine({ISD::AND, ISD::OR, ISD::SRA, ISD::VSELECT, ISD::XOR}); } if (!Subtarget.useSoftFloat()) { diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -499,13 +499,8 @@ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); // We have some custom DAG combine patterns for these nodes - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::FADD); - setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SREM); - setTargetDAGCombine(ISD::UREM); + setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::FADD, ISD::MUL, ISD::SHL, + ISD::SREM, ISD::UREM}); // setcc for f16x2 needs special handling to prevent legalizer's // attempt to scalarize it due to v2i1 not being legal. diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1351,43 +1351,26 @@ setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); // We have target-specific dag combine patterns for the following nodes: - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::FMA); - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::BUILD_VECTOR); + setTargetDAGCombine({ISD::ADD, ISD::SHL, ISD::SRA, ISD::SRL, ISD::MUL, + ISD::FMA, ISD::SINT_TO_FP, ISD::BUILD_VECTOR}); if (Subtarget.hasFPCVT()) setTargetDAGCombine(ISD::UINT_TO_FP); - setTargetDAGCombine(ISD::LOAD); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::BR_CC); + setTargetDAGCombine({ISD::LOAD, ISD::STORE, ISD::BR_CC}); if (Subtarget.useCRBits()) setTargetDAGCombine(ISD::BRCOND); - setTargetDAGCombine(ISD::BSWAP); - setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setTargetDAGCombine(ISD::INTRINSIC_VOID); + setTargetDAGCombine({ISD::BSWAP, ISD::INTRINSIC_WO_CHAIN, + ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::ANY_EXTEND); - - setTargetDAGCombine(ISD::TRUNCATE); - setTargetDAGCombine(ISD::VECTOR_SHUFFLE); + setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}); + setTargetDAGCombine({ISD::TRUNCATE, ISD::VECTOR_SHUFFLE}); if (Subtarget.useCRBits()) { - setTargetDAGCombine(ISD::TRUNCATE); - setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::SELECT_CC); + setTargetDAGCombine({ISD::TRUNCATE, ISD::SETCC, ISD::SELECT_CC}); } if (Subtarget.hasP9Altivec()) { - setTargetDAGCombine(ISD::ABS); - setTargetDAGCombine(ISD::VSELECT); + setTargetDAGCombine({ISD::ABS, ISD::VSELECT}); } setLibcallName(RTLIB::LOG_F128, "logf128"); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1034,39 +1034,22 @@ // Jumps are expensive, compared to logic setJumpIsExpensive(); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::XOR); - if (Subtarget.hasStdExtZbp()) { - setTargetDAGCombine(ISD::ROTL); - setTargetDAGCombine(ISD::ROTR); - } + setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND, + ISD::OR, ISD::XOR}); + + if (Subtarget.hasStdExtZbp()) + setTargetDAGCombine({ISD::ROTL, ISD::ROTR}); if (Subtarget.hasStdExtZbkb()) setTargetDAGCombine(ISD::BITREVERSE); - setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb()) setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - if (Subtarget.hasStdExtF()) { - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::FP_TO_SINT); - setTargetDAGCombine(ISD::FP_TO_UINT); - setTargetDAGCombine(ISD::FP_TO_SINT_SAT); - setTargetDAGCombine(ISD::FP_TO_UINT_SAT); - } - if (Subtarget.hasVInstructions()) { - setTargetDAGCombine(ISD::FCOPYSIGN); - setTargetDAGCombine(ISD::MGATHER); - setTargetDAGCombine(ISD::MSCATTER); - setTargetDAGCombine(ISD::VP_GATHER); - setTargetDAGCombine(ISD::VP_SCATTER); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::SPLAT_VECTOR); - } + if (Subtarget.hasStdExtF()) + setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}); + if (Subtarget.hasVInstructions()) + setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER, + ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL, + ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR}); setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -640,26 +640,26 @@ setOperationAction(ISD::VAEND, MVT::Other, Expand); // Codes for which we want to perform some z-specific combinations. - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::LOAD); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::VECTOR_SHUFFLE); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::FP_ROUND); - setTargetDAGCombine(ISD::STRICT_FP_ROUND); - setTargetDAGCombine(ISD::FP_EXTEND); - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::UINT_TO_FP); - setTargetDAGCombine(ISD::STRICT_FP_EXTEND); - setTargetDAGCombine(ISD::BSWAP); - setTargetDAGCombine(ISD::SDIV); - setTargetDAGCombine(ISD::UDIV); - setTargetDAGCombine(ISD::SREM); - setTargetDAGCombine(ISD::UREM); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); + setTargetDAGCombine({ISD::ZERO_EXTEND, + ISD::SIGN_EXTEND, + ISD::SIGN_EXTEND_INREG, + ISD::LOAD, + ISD::STORE, + ISD::VECTOR_SHUFFLE, + ISD::EXTRACT_VECTOR_ELT, + ISD::FP_ROUND, + ISD::STRICT_FP_ROUND, + ISD::FP_EXTEND, + ISD::SINT_TO_FP, + ISD::UINT_TO_FP, + ISD::STRICT_FP_EXTEND, + ISD::BSWAP, + ISD::SDIV, + ISD::UDIV, + ISD::SREM, + ISD::UREM, + ISD::INTRINSIC_VOID, + ISD::INTRINSIC_W_CHAIN}); // Handle intrinsics. setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -161,22 +161,17 @@ setTargetDAGCombine(ISD::VECTOR_SHUFFLE); // Combine extends of extract_subvectors into widening ops - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::ZERO_EXTEND); + setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}); // Combine int_to_fp or fp_extend of extract_vectors and vice versa into // conversions ops - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::UINT_TO_FP); - setTargetDAGCombine(ISD::FP_EXTEND); - setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); + setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND, + ISD::EXTRACT_SUBVECTOR}); // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa // into conversion ops - setTargetDAGCombine(ISD::FP_TO_SINT_SAT); - setTargetDAGCombine(ISD::FP_TO_UINT_SAT); - setTargetDAGCombine(ISD::FP_ROUND); - setTargetDAGCombine(ISD::CONCAT_VECTORS); + setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, + ISD::FP_ROUND, ISD::CONCAT_VECTORS}); setTargetDAGCombine(ISD::TRUNCATE); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2210,55 +2210,55 @@ setOperationAction(Op, MVT::f32, Promote); // We have target-specific dag combine patterns for the following nodes: - setTargetDAGCombine(ISD::VECTOR_SHUFFLE); - setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); - setTargetDAGCombine(ISD::CONCAT_VECTORS); - setTargetDAGCombine(ISD::INSERT_SUBVECTOR); - setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); - setTargetDAGCombine(ISD::BITCAST); - setTargetDAGCombine(ISD::VSELECT); - setTargetDAGCombine(ISD::SELECT); - setTargetDAGCombine(ISD::SHL); - setTargetDAGCombine(ISD::SRA); - setTargetDAGCombine(ISD::SRL); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::FADD); - setTargetDAGCombine(ISD::FSUB); - setTargetDAGCombine(ISD::FNEG); - setTargetDAGCombine(ISD::FMA); - setTargetDAGCombine(ISD::STRICT_FMA); - setTargetDAGCombine(ISD::FMINNUM); - setTargetDAGCombine(ISD::FMAXNUM); - setTargetDAGCombine(ISD::SUB); - setTargetDAGCombine(ISD::LOAD); - setTargetDAGCombine(ISD::MLOAD); - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::MSTORE); - setTargetDAGCombine(ISD::TRUNCATE); - setTargetDAGCombine(ISD::ZERO_EXTEND); - setTargetDAGCombine(ISD::ANY_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND); - setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG); - setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG); - setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG); - setTargetDAGCombine(ISD::SINT_TO_FP); - setTargetDAGCombine(ISD::UINT_TO_FP); - setTargetDAGCombine(ISD::STRICT_SINT_TO_FP); - setTargetDAGCombine(ISD::STRICT_UINT_TO_FP); - setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::MUL); - setTargetDAGCombine(ISD::XOR); - setTargetDAGCombine(ISD::MSCATTER); - setTargetDAGCombine(ISD::MGATHER); - setTargetDAGCombine(ISD::FP16_TO_FP); - setTargetDAGCombine(ISD::FP_EXTEND); - setTargetDAGCombine(ISD::STRICT_FP_EXTEND); - setTargetDAGCombine(ISD::FP_ROUND); + setTargetDAGCombine({ISD::VECTOR_SHUFFLE, + ISD::SCALAR_TO_VECTOR, + ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_VECTOR_ELT, + ISD::CONCAT_VECTORS, + ISD::INSERT_SUBVECTOR, + ISD::EXTRACT_SUBVECTOR, + ISD::BITCAST, + ISD::VSELECT, + ISD::SELECT, + ISD::SHL, + ISD::SRA, + ISD::SRL, + ISD::OR, + ISD::AND, + ISD::ADD, + ISD::FADD, + ISD::FSUB, + ISD::FNEG, + ISD::FMA, + ISD::STRICT_FMA, + ISD::FMINNUM, + ISD::FMAXNUM, + ISD::SUB, + ISD::LOAD, + ISD::MLOAD, + ISD::STORE, + ISD::MSTORE, + ISD::TRUNCATE, + ISD::ZERO_EXTEND, + ISD::ANY_EXTEND, + ISD::SIGN_EXTEND, + ISD::SIGN_EXTEND_INREG, + ISD::ANY_EXTEND_VECTOR_INREG, + ISD::SIGN_EXTEND_VECTOR_INREG, + ISD::ZERO_EXTEND_VECTOR_INREG, + ISD::SINT_TO_FP, + ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP, + ISD::SETCC, + ISD::MUL, + ISD::XOR, + ISD::MSCATTER, + ISD::MGATHER, + ISD::FP16_TO_FP, + ISD::FP_EXTEND, + ISD::STRICT_FP_EXTEND, + ISD::FP_ROUND}); computeRegisterProperties(Subtarget.getRegisterInfo()); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -167,10 +167,8 @@ = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; // We have target-specific dag combine patterns for the following nodes: - setTargetDAGCombine(ISD::STORE); - setTargetDAGCombine(ISD::ADD); - setTargetDAGCombine(ISD::INTRINSIC_VOID); - setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); + setTargetDAGCombine( + {ISD::STORE, ISD::ADD, ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN}); setMinFunctionAlignment(Align(2)); setPrefFunctionAlignment(Align(4));