diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -2253,6 +2253,21 @@ assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); OpActions[(unsigned)VT.SimpleTy][Op] = Action; } + void setOperationAction(ArrayRef Ops, MVT VT, + LegalizeAction Action) { + for (auto Op : Ops) + setOperationAction(Op, VT, Action); + } + void setOperationAction(ArrayRef Ops, ArrayRef VTs, + LegalizeAction Action) { + for (auto VT : VTs) + setOperationAction(Ops, VT, Action); + } + void setOperationAction(unsigned Op, ArrayRef VTs, + LegalizeAction Action) { + for (auto VT : VTs) + setOperationAction(Op, VT, Action); + } /// Indicate that the specified load with extension does not work with the /// specified type and indicate what to do about it. @@ -2265,6 +2280,11 @@ LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; } + void setLoadExtAction(ArrayRef ExtTypes, MVT ValVT, MVT MemVT, + LegalizeAction Action) { + for (auto ET : ExtTypes) + setLoadExtAction(ET, ValVT, MemVT, Action); + } /// Indicate that the specified truncating store does not work with the /// specified type and indicate what to do about it. @@ -2325,6 +2345,16 @@ CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; } + void setCondCodeAction(ArrayRef CCs, MVT VT, + LegalizeAction Action) { + for (auto CC : CCs) + setCondCodeAction(CC, VT, Action); + } + void setCondCodeAction(ArrayRef CCs, ArrayRef VTs, + LegalizeAction Action) { + for (auto VT : VTs) + setCondCodeAction(CCs, VT, Action); + } /// If Opc/OrigVT is specified as being promoted, the promotion code defaults /// to trying a larger integer/fp until it can find one that works. If that @@ -2975,9 +3005,13 @@ // /// Rename the default libcall routine name for the specified libcall. - void setLibcallName(RTLIB::Libcall Call, const char *Name) { + void setLibcallName(RTLIB::Libcall Call, const char *Name = nullptr) { LibcallRoutineNames[Call] = Name; } + void setLibcallName(ArrayRef Calls) { + for (auto Call : Calls) + setLibcallName(Call); + } /// Get the libcall routine name for the specified libcall. const char *getLibcallName(RTLIB::Libcall Call) const { diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -207,7 +207,7 @@ } if (TT.isOSOpenBSD()) { - setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); + setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL); } } @@ -762,91 +762,62 @@ setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); // These operations default to expand. - setOperationAction(ISD::FGETSIGN, VT, Expand); - setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); - setOperationAction(ISD::FMINNUM, VT, Expand); - setOperationAction(ISD::FMAXNUM, VT, Expand); - setOperationAction(ISD::FMINNUM_IEEE, VT, Expand); - setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand); - setOperationAction(ISD::FMINIMUM, VT, Expand); - setOperationAction(ISD::FMAXIMUM, VT, Expand); - setOperationAction(ISD::FMAD, VT, Expand); - setOperationAction(ISD::SMIN, VT, Expand); - setOperationAction(ISD::SMAX, VT, Expand); - setOperationAction(ISD::UMIN, VT, Expand); - setOperationAction(ISD::UMAX, VT, Expand); - setOperationAction(ISD::ABS, VT, Expand); - setOperationAction(ISD::FSHL, VT, Expand); - setOperationAction(ISD::FSHR, VT, Expand); - setOperationAction(ISD::SADDSAT, VT, Expand); - setOperationAction(ISD::UADDSAT, VT, Expand); - setOperationAction(ISD::SSUBSAT, VT, Expand); - setOperationAction(ISD::USUBSAT, VT, Expand); - setOperationAction(ISD::SSHLSAT, VT, Expand); - setOperationAction(ISD::USHLSAT, VT, Expand); - setOperationAction(ISD::SMULFIX, VT, Expand); - setOperationAction(ISD::SMULFIXSAT, VT, Expand); - setOperationAction(ISD::UMULFIX, VT, Expand); - setOperationAction(ISD::UMULFIXSAT, VT, Expand); - setOperationAction(ISD::SDIVFIX, VT, Expand); - setOperationAction(ISD::SDIVFIXSAT, VT, Expand); - setOperationAction(ISD::UDIVFIX, VT, Expand); - setOperationAction(ISD::UDIVFIXSAT, VT, Expand); - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand); + setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, + ISD::FMINNUM, ISD::FMAXNUM, + ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, + ISD::FMINIMUM, ISD::FMAXIMUM, + ISD::FMAD, ISD::SMIN, + ISD::SMAX, ISD::UMIN, + ISD::UMAX, ISD::ABS, + ISD::FSHL, ISD::FSHR, + ISD::SADDSAT, ISD::UADDSAT, + ISD::SSUBSAT, ISD::USUBSAT, + ISD::SSHLSAT, ISD::USHLSAT, + ISD::SMULFIX, ISD::SMULFIXSAT, + ISD::UMULFIX, ISD::UMULFIXSAT, + ISD::SDIVFIX, ISD::SDIVFIXSAT, + ISD::UDIVFIX, ISD::UDIVFIXSAT, + ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, + VT, Expand); // Overflow operations default to expand - setOperationAction(ISD::SADDO, VT, Expand); - setOperationAction(ISD::SSUBO, VT, Expand); - setOperationAction(ISD::UADDO, VT, Expand); - setOperationAction(ISD::USUBO, VT, Expand); - setOperationAction(ISD::SMULO, VT, Expand); - setOperationAction(ISD::UMULO, VT, Expand); + setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, + ISD::SMULO, ISD::UMULO}, + VT, Expand); // ADDCARRY operations default to expand - setOperationAction(ISD::ADDCARRY, VT, Expand); - setOperationAction(ISD::SUBCARRY, VT, Expand); - setOperationAction(ISD::SETCCCARRY, VT, Expand); - setOperationAction(ISD::SADDO_CARRY, VT, Expand); - setOperationAction(ISD::SSUBO_CARRY, VT, Expand); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY, ISD::SETCCCARRY, + ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, + VT, Expand); // ADDC/ADDE/SUBC/SUBE default to expand. - setOperationAction(ISD::ADDC, VT, Expand); - setOperationAction(ISD::ADDE, VT, Expand); - setOperationAction(ISD::SUBC, VT, Expand); - setOperationAction(ISD::SUBE, VT, Expand); + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, + Expand); // Halving adds - setOperationAction(ISD::AVGFLOORS, VT, Expand); - setOperationAction(ISD::AVGFLOORU, VT, Expand); - setOperationAction(ISD::AVGCEILS, VT, Expand); - setOperationAction(ISD::AVGCEILU, VT, Expand); + setOperationAction( + {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, + Expand); // Absolute difference - setOperationAction(ISD::ABDS, VT, Expand); - setOperationAction(ISD::ABDU, VT, Expand); + setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); // These default to Expand so they will be expanded to CTLZ/CTTZ by default. - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); + setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, + Expand); - setOperationAction(ISD::BITREVERSE, VT, Expand); - setOperationAction(ISD::PARITY, VT, Expand); + setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); // These library functions default to expand. - setOperationAction(ISD::FROUND, VT, Expand); - setOperationAction(ISD::FROUNDEVEN, VT, Expand); - setOperationAction(ISD::FPOWI, VT, Expand); + setOperationAction({ISD::FROUND, ISD::FROUNDEVEN, ISD::FPOWI}, VT, Expand); // These operations default to expand for vector types. - if (VT.isVector()) { - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::SPLAT_VECTOR, VT, Expand); - } + if (VT.isVector()) + setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, + ISD::ANY_EXTEND_VECTOR_INREG, + ISD::SIGN_EXTEND_VECTOR_INREG, + ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR}, + VT, Expand); // Constrained floating-point operations default to expand. #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ @@ -857,21 +828,13 @@ setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); // Vector reduction default to expand. - setOperationAction(ISD::VECREDUCE_FADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand); - setOperationAction(ISD::VECREDUCE_ADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_MUL, VT, Expand); - setOperationAction(ISD::VECREDUCE_AND, VT, Expand); - setOperationAction(ISD::VECREDUCE_OR, VT, Expand); - setOperationAction(ISD::VECREDUCE_XOR, VT, Expand); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand); + setOperationAction( + {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, + ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, + ISD::VECREDUCE_FMIN, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, + VT, Expand); // Named vector shuffles default to expand. setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); @@ -886,39 +849,23 @@ // ConstantFP nodes default to expand. Targets can either change this to // Legal, in which case all fp constants are legal, or use isFPImmLegal() // to optimize expansions for certain constants. - setOperationAction(ISD::ConstantFP, MVT::f16, Expand); - setOperationAction(ISD::ConstantFP, MVT::f32, Expand); - setOperationAction(ISD::ConstantFP, MVT::f64, Expand); - setOperationAction(ISD::ConstantFP, MVT::f80, Expand); - setOperationAction(ISD::ConstantFP, MVT::f128, Expand); + setOperationAction(ISD::ConstantFP, + {MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); // These library functions default to expand. - for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) { - setOperationAction(ISD::FCBRT, VT, Expand); - setOperationAction(ISD::FLOG , VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP , VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::LROUND, VT, Expand); - setOperationAction(ISD::LLROUND, VT, Expand); - setOperationAction(ISD::LRINT, VT, Expand); - setOperationAction(ISD::LLRINT, VT, Expand); - } + setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, + ISD::FEXP2, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FCEIL, + ISD::FRINT, ISD::FTRUNC, ISD::LROUND, ISD::LLROUND, + ISD::LRINT, ISD::LLRINT}, + {MVT::f32, MVT::f64, MVT::f128}, Expand); // Default ISD::TRAP to expand (which turns it into abort). setOperationAction(ISD::TRAP, MVT::Other, Expand); // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); - - setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); + setOperationAction({ISD::DEBUGTRAP, ISD::UBSANTRAP}, MVT::Other, Expand); } MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -252,8 +252,7 @@ if (Subtarget->hasLS64()) { addRegisterClass(MVT::i64x8, &AArch64::GPR64x8ClassRegClass); - setOperationAction(ISD::LOAD, MVT::i64x8, Custom); - setOperationAction(ISD::STORE, MVT::i64x8, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i64x8, Custom); } if (Subtarget->hasFPARMv8()) { @@ -330,130 +329,70 @@ computeRegisterProperties(Subtarget->getRegisterInfo()); // Provide all sorts of operation actions - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); - setOperationAction(ISD::SETCC, MVT::i32, Custom); - setOperationAction(ISD::SETCC, MVT::i64, Custom); - setOperationAction(ISD::SETCC, MVT::f16, Custom); - setOperationAction(ISD::SETCC, MVT::f32, Custom); - setOperationAction(ISD::SETCC, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); + setOperationAction({ISD::GlobalAddress, ISD::GlobalTLSAddress}, MVT::i64, + Custom); + setOperationAction( + ISD::SETCC, {MVT::i32, MVT::i64, MVT::f16, MVT::f32, MVT::f64}, Custom); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + {MVT::f16, MVT::f32, MVT::f64}, Custom); + setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal); setOperationAction(ISD::BRCOND, MVT::Other, Custom); - setOperationAction(ISD::BR_CC, MVT::i32, Custom); - setOperationAction(ISD::BR_CC, MVT::i64, Custom); - setOperationAction(ISD::BR_CC, MVT::f16, Custom); - setOperationAction(ISD::BR_CC, MVT::f32, Custom); - setOperationAction(ISD::BR_CC, MVT::f64, Custom); - setOperationAction(ISD::SELECT, MVT::i32, Custom); - setOperationAction(ISD::SELECT, MVT::i64, Custom); - setOperationAction(ISD::SELECT, MVT::f16, Custom); - setOperationAction(ISD::SELECT, MVT::f32, Custom); - setOperationAction(ISD::SELECT, MVT::f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); + setOperationAction({ISD::BR_CC, ISD::SELECT, ISD::SELECT_CC}, + {MVT::i32, MVT::i64, MVT::f16, MVT::f32, MVT::f64}, + Custom); setOperationAction(ISD::BR_JT, MVT::Other, Custom); setOperationAction(ISD::JumpTable, MVT::i64, Custom); - setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i64, + Custom); - setOperationAction(ISD::FREM, MVT::f32, Expand); - setOperationAction(ISD::FREM, MVT::f64, Expand); - setOperationAction(ISD::FREM, MVT::f80, Expand); + setOperationAction(ISD::FREM, {MVT::f32, MVT::f64, MVT::f80}, Expand); setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); // Custom lowering hooks are needed for XOR // to fold it into CSINC/CSINV. - setOperationAction(ISD::XOR, MVT::i32, Custom); - setOperationAction(ISD::XOR, MVT::i64, Custom); + setOperationAction(ISD::XOR, {MVT::i32, MVT::i64}, Custom); // Virtually no operation on f128 is legal, but LLVM can't expand them when // there's a valid register class, so we need custom operations in most cases. setOperationAction(ISD::FABS, MVT::f128, Expand); setOperationAction(ISD::FADD, MVT::f128, LibCall); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); - setOperationAction(ISD::FCOS, MVT::f128, Expand); + setOperationAction({ISD::FCOPYSIGN, ISD::FCOS}, MVT::f128, Expand); setOperationAction(ISD::FDIV, MVT::f128, LibCall); setOperationAction(ISD::FMA, MVT::f128, Expand); setOperationAction(ISD::FMUL, MVT::f128, LibCall); - setOperationAction(ISD::FNEG, MVT::f128, Expand); - setOperationAction(ISD::FPOW, MVT::f128, Expand); - setOperationAction(ISD::FREM, MVT::f128, Expand); - setOperationAction(ISD::FRINT, MVT::f128, Expand); - setOperationAction(ISD::FSIN, MVT::f128, Expand); - setOperationAction(ISD::FSINCOS, MVT::f128, Expand); - setOperationAction(ISD::FSQRT, MVT::f128, Expand); + setOperationAction({ISD::FNEG, ISD::FPOW, ISD::FREM, ISD::FRINT, ISD::FSIN, + ISD::FSINCOS, ISD::FSQRT}, + MVT::f128, Expand); setOperationAction(ISD::FSUB, MVT::f128, LibCall); setOperationAction(ISD::FTRUNC, MVT::f128, Expand); - setOperationAction(ISD::SETCC, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); - setOperationAction(ISD::BR_CC, MVT::f128, Custom); - setOperationAction(ISD::SELECT, MVT::f128, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::BR_CC, ISD::SELECT, ISD::SELECT_CC, ISD::FP_EXTEND}, + MVT::f128, Custom); // FIXME: f128 FMINIMUM and FMAXIMUM (including STRICT versions) currently // aren't handled. // Lowering for many of the conversions is actually specified by the non-f128 // type. The LowerXXX function will be trivial when f128 isn't involved. - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom); - - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_UINT, ISD::SINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_UINT_TO_FP}, + {MVT::i32, MVT::i64, MVT::i128}, Custom); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, + {MVT::f16, MVT::f32, MVT::f64}, Custom); + + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, + {MVT::i32, MVT::i64}, Custom); // Variable arguments. - setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Custom); - setOperationAction(ISD::VACOPY, MVT::Other, Custom); + setOperationAction({ISD::VASTART, ISD::VAARG, ISD::VACOPY}, MVT::Other, + Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); // Variable-sized objects. - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); if (Subtarget->isTargetWindows()) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); @@ -467,185 +406,83 @@ setOperationAction(ISD::BlockAddress, MVT::i64, Custom); // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences. - setOperationAction(ISD::ADDC, MVT::i32, Custom); - setOperationAction(ISD::ADDE, MVT::i32, Custom); - setOperationAction(ISD::SUBC, MVT::i32, Custom); - setOperationAction(ISD::SUBE, MVT::i32, Custom); - setOperationAction(ISD::ADDC, MVT::i64, Custom); - setOperationAction(ISD::ADDE, MVT::i64, Custom); - setOperationAction(ISD::SUBC, MVT::i64, Custom); - setOperationAction(ISD::SUBE, MVT::i64, Custom); + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, + {MVT::i32, MVT::i64}, Custom); // AArch64 lacks both left-rotate and popcount instructions. - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::ROTL, MVT::i64, Expand); - for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - } + setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand); + for (MVT VT : MVT::fixedlen_vector_valuetypes()) + setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand); // AArch64 doesn't have i32 MULH{S|U}. - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i32, Expand); // AArch64 doesn't have {U|S}MUL_LOHI. - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); + setOperationAction({ISD::UMUL_LOHI, ISD::SMUL_LOHI}, MVT::i64, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Custom); - setOperationAction(ISD::CTPOP, MVT::i64, Custom); - setOperationAction(ISD::CTPOP, MVT::i128, Custom); + setOperationAction(ISD::CTPOP, {MVT::i32, MVT::i64, MVT::i128}, Custom); - setOperationAction(ISD::ABS, MVT::i32, Custom); - setOperationAction(ISD::ABS, MVT::i64, Custom); + setOperationAction(ISD::ABS, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i64, Expand); - for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - } - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i64, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i64, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i64, Expand); + setOperationAction(ISD::SDIVREM, {MVT::i32, MVT::i64}, Expand); + for (MVT VT : MVT::fixedlen_vector_valuetypes()) + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Expand); + setOperationAction({ISD::SREM, ISD::UDIVREM, ISD::UREM}, {MVT::i32, MVT::i64}, + Expand); // Custom lower Add/Sub/Mul with overflow. - setOperationAction(ISD::SADDO, MVT::i32, Custom); - setOperationAction(ISD::SADDO, MVT::i64, Custom); - setOperationAction(ISD::UADDO, MVT::i32, Custom); - setOperationAction(ISD::UADDO, MVT::i64, Custom); - setOperationAction(ISD::SSUBO, MVT::i32, Custom); - setOperationAction(ISD::SSUBO, MVT::i64, Custom); - setOperationAction(ISD::USUBO, MVT::i32, Custom); - setOperationAction(ISD::USUBO, MVT::i64, Custom); - setOperationAction(ISD::SMULO, MVT::i32, Custom); - setOperationAction(ISD::SMULO, MVT::i64, Custom); - setOperationAction(ISD::UMULO, MVT::i32, Custom); - setOperationAction(ISD::UMULO, MVT::i64, Custom); - - setOperationAction(ISD::FSIN, MVT::f32, Expand); - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FCOS, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FPOW, MVT::f32, Expand); - setOperationAction(ISD::FPOW, MVT::f64, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); + setOperationAction( + {ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMULO, ISD::UMULO}, + {MVT::i32, MVT::i64}, Custom); + + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FPOW}, {MVT::f32, MVT::f64}, + Expand); + setOperationAction(ISD::FCOPYSIGN, {MVT::f64, MVT::f32}, Custom); if (Subtarget->hasFullFP16()) setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom); else setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); - setOperationAction(ISD::FREM, MVT::f16, Promote); - setOperationAction(ISD::FREM, MVT::v4f16, Expand); - setOperationAction(ISD::FREM, MVT::v8f16, Expand); - setOperationAction(ISD::FPOW, MVT::f16, Promote); - setOperationAction(ISD::FPOW, MVT::v4f16, Expand); - setOperationAction(ISD::FPOW, MVT::v8f16, Expand); - setOperationAction(ISD::FPOWI, MVT::f16, Promote); - setOperationAction(ISD::FPOWI, MVT::v4f16, Expand); - setOperationAction(ISD::FPOWI, MVT::v8f16, Expand); - setOperationAction(ISD::FCOS, MVT::f16, Promote); - setOperationAction(ISD::FCOS, MVT::v4f16, Expand); - setOperationAction(ISD::FCOS, MVT::v8f16, Expand); - setOperationAction(ISD::FSIN, MVT::f16, Promote); - setOperationAction(ISD::FSIN, MVT::v4f16, Expand); - setOperationAction(ISD::FSIN, MVT::v8f16, Expand); - setOperationAction(ISD::FSINCOS, MVT::f16, Promote); - setOperationAction(ISD::FSINCOS, MVT::v4f16, Expand); - setOperationAction(ISD::FSINCOS, MVT::v8f16, Expand); - setOperationAction(ISD::FEXP, MVT::f16, Promote); - setOperationAction(ISD::FEXP, MVT::v4f16, Expand); - setOperationAction(ISD::FEXP, MVT::v8f16, Expand); - setOperationAction(ISD::FEXP2, MVT::f16, Promote); - setOperationAction(ISD::FEXP2, MVT::v4f16, Expand); - setOperationAction(ISD::FEXP2, MVT::v8f16, Expand); - setOperationAction(ISD::FLOG, MVT::f16, Promote); - setOperationAction(ISD::FLOG, MVT::v4f16, Expand); - setOperationAction(ISD::FLOG, MVT::v8f16, Expand); - setOperationAction(ISD::FLOG2, MVT::f16, Promote); - setOperationAction(ISD::FLOG2, MVT::v4f16, Expand); - setOperationAction(ISD::FLOG2, MVT::v8f16, Expand); - setOperationAction(ISD::FLOG10, MVT::f16, Promote); - setOperationAction(ISD::FLOG10, MVT::v4f16, Expand); - setOperationAction(ISD::FLOG10, MVT::v8f16, Expand); + for (auto VT : + {ISD::FREM, ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN, ISD::FSINCOS, + ISD::FEXP, ISD::FEXP2, ISD::FLOG, ISD::FLOG2, ISD::FLOG10}) { + setOperationAction(VT, MVT::f16, Promote); + setOperationAction(VT, {MVT::v4f16, MVT::v8f16}, Expand); + } if (!Subtarget->hasFullFP16()) { - setOperationAction(ISD::SELECT, MVT::f16, Promote); - setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); - setOperationAction(ISD::SETCC, MVT::f16, Promote); - setOperationAction(ISD::BR_CC, MVT::f16, Promote); - setOperationAction(ISD::FADD, MVT::f16, Promote); - setOperationAction(ISD::FSUB, MVT::f16, Promote); - setOperationAction(ISD::FMUL, MVT::f16, Promote); - setOperationAction(ISD::FDIV, MVT::f16, Promote); - setOperationAction(ISD::FMA, MVT::f16, Promote); - setOperationAction(ISD::FNEG, MVT::f16, Promote); - setOperationAction(ISD::FABS, MVT::f16, Promote); - setOperationAction(ISD::FCEIL, MVT::f16, Promote); - setOperationAction(ISD::FSQRT, MVT::f16, Promote); - setOperationAction(ISD::FFLOOR, MVT::f16, Promote); - setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); - setOperationAction(ISD::FRINT, MVT::f16, Promote); - setOperationAction(ISD::FROUND, MVT::f16, Promote); - setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote); - setOperationAction(ISD::FTRUNC, MVT::f16, Promote); - setOperationAction(ISD::FMINNUM, MVT::f16, Promote); - setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); - setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); - setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); + setOperationAction( + {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC, ISD::BR_CC, + ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, + ISD::FMA, ISD::FNEG, ISD::FABS, ISD::FCEIL, + ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT, + ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC, ISD::FMINNUM, + ISD::FMAXNUM, ISD::FMINIMUM, ISD::FMAXIMUM}, + MVT::f16, Promote); // promote v4f16 to v4f32 when that is known to be safe. - setOperationAction(ISD::FADD, MVT::v4f16, Promote); - setOperationAction(ISD::FSUB, MVT::v4f16, Promote); - setOperationAction(ISD::FMUL, MVT::v4f16, Promote); - setOperationAction(ISD::FDIV, MVT::v4f16, Promote); - AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); - AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); - AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); - AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); - - setOperationAction(ISD::FABS, MVT::v4f16, Expand); - setOperationAction(ISD::FNEG, MVT::v4f16, Expand); - setOperationAction(ISD::FROUND, MVT::v4f16, Expand); - setOperationAction(ISD::FROUNDEVEN, MVT::v4f16, Expand); - setOperationAction(ISD::FMA, MVT::v4f16, Expand); - setOperationAction(ISD::SETCC, MVT::v4f16, Expand); - setOperationAction(ISD::BR_CC, MVT::v4f16, Expand); - setOperationAction(ISD::SELECT, MVT::v4f16, Expand); - setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand); - setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand); - setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand); - setOperationAction(ISD::FCEIL, MVT::v4f16, Expand); - setOperationAction(ISD::FRINT, MVT::v4f16, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand); - setOperationAction(ISD::FSQRT, MVT::v4f16, Expand); - - setOperationAction(ISD::FABS, MVT::v8f16, Expand); - setOperationAction(ISD::FADD, MVT::v8f16, Expand); - setOperationAction(ISD::FCEIL, MVT::v8f16, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand); - setOperationAction(ISD::FDIV, MVT::v8f16, Expand); - setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand); - setOperationAction(ISD::FMA, MVT::v8f16, Expand); - setOperationAction(ISD::FMUL, MVT::v8f16, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand); - setOperationAction(ISD::FNEG, MVT::v8f16, Expand); - setOperationAction(ISD::FROUND, MVT::v8f16, Expand); - setOperationAction(ISD::FROUNDEVEN, MVT::v8f16, Expand); - setOperationAction(ISD::FRINT, MVT::v8f16, Expand); - setOperationAction(ISD::FSQRT, MVT::v8f16, Expand); - setOperationAction(ISD::FSUB, MVT::v8f16, Expand); - setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand); - setOperationAction(ISD::SETCC, MVT::v8f16, Expand); - setOperationAction(ISD::BR_CC, MVT::v8f16, Expand); - setOperationAction(ISD::SELECT, MVT::v8f16, Expand); - setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand); - setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}, MVT::v4f16, + Promote); + AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); + AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); + AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); + AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); + + setOperationAction({ISD::FABS, ISD::FNEG, ISD::FROUND, ISD::FROUNDEVEN, + ISD::FMA, ISD::SETCC, ISD::BR_CC, ISD::SELECT, + ISD::SELECT_CC, ISD::FTRUNC, ISD::FCOPYSIGN, + ISD::FFLOOR, ISD::FCEIL, ISD::FRINT, ISD::FNEARBYINT, + ISD::FSQRT}, + MVT::v4f16, Expand); + + setOperationAction({ISD::FABS, ISD::FADD, ISD::FCEIL, + ISD::FCOPYSIGN, ISD::FDIV, ISD::FFLOOR, + ISD::FMA, ISD::FMUL, ISD::FNEARBYINT, + ISD::FNEG, ISD::FROUND, ISD::FROUNDEVEN, + ISD::FRINT, ISD::FSQRT, ISD::FSUB, + ISD::FTRUNC, ISD::SETCC, ISD::BR_CC, + ISD::SELECT, ISD::SELECT_CC, ISD::FP_EXTEND}, + MVT::v8f16, Expand); } // AArch64 has implementations of a lot of rounding-like FP operations. @@ -660,8 +497,7 @@ ISD::STRICT_FROUND, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_LROUND, ISD::STRICT_LLROUND, ISD::STRICT_LRINT, ISD::STRICT_LLRINT}) { - for (MVT Ty : {MVT::f32, MVT::f64}) - setOperationAction(Op, Ty, Legal); + setOperationAction(Op, {MVT::f32, MVT::f64}, Legal); if (Subtarget->hasFullFP16()) setOperationAction(Op, MVT::f16, Legal); } @@ -669,15 +505,13 @@ // Basic strict FP operations are legal for (auto Op : {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT}) { - for (MVT Ty : {MVT::f32, MVT::f64}) - setOperationAction(Op, Ty, Legal); + setOperationAction(Op, {MVT::f32, MVT::f64}, Legal); if (Subtarget->hasFullFP16()) setOperationAction(Op, MVT::f16, Legal); } // Strict conversion to a larger type is legal - for (auto VT : {MVT::f32, MVT::f64}) - setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); + setOperationAction(ISD::STRICT_FP_EXTEND, {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::PREFETCH, MVT::Other, Custom); @@ -685,39 +519,17 @@ setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); + setOperationAction({ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND}, + {MVT::i32, MVT::i64}, Custom); // Generate outline atomics library calls only if LSE was not specified for // subtarget if (Subtarget->outlineAtomics() && !Subtarget->hasLSE()) { - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, LibCall); + setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, + ISD::ATOMIC_LOAD_ADD, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_CLR, ISD::ATOMIC_LOAD_XOR}, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, LibCall); setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, LibCall); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_CLR, MVT::i64, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, LibCall); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, LibCall); #define LCALLNAMES(A, B, N) \ setLibcallName(A##N##_RELAX, #B #N "_relax"); \ setLibcallName(A##N##_ACQ, #B #N "_acq"); \ @@ -742,26 +554,22 @@ } // 128-bit loads and stores can be done without expanding - setOperationAction(ISD::LOAD, MVT::i128, Custom); - setOperationAction(ISD::STORE, MVT::i128, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i128, Custom); // Aligned 128-bit loads and stores are single-copy atomic according to the // v8.4a spec. if (Subtarget->hasLSE2()) { - setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i128, + Custom); } // 256 bit non-temporal stores can be lowered to STNP. Do this as part of the // custom lowering, as there are no un-paired non-temporal stores and // legalization will break up 256 bit inputs. - setOperationAction(ISD::STORE, MVT::v32i8, Custom); - setOperationAction(ISD::STORE, MVT::v16i16, Custom); - setOperationAction(ISD::STORE, MVT::v16f16, Custom); - setOperationAction(ISD::STORE, MVT::v8i32, Custom); - setOperationAction(ISD::STORE, MVT::v8f32, Custom); - setOperationAction(ISD::STORE, MVT::v4f64, Custom); - setOperationAction(ISD::STORE, MVT::v4i64, Custom); + setOperationAction(ISD::STORE, + {MVT::v32i8, MVT::v16i16, MVT::v16f16, MVT::v8i32, + MVT::v8f32, MVT::v4f64, MVT::v4i64}, + Custom); // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0. // This requires the Performance Monitors extension. @@ -771,34 +579,26 @@ if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { // Issue __sincos_stret if available. - setOperationAction(ISD::FSINCOS, MVT::f64, Custom); - setOperationAction(ISD::FSINCOS, MVT::f32, Custom); + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Custom); } else { - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Expand); } - if (Subtarget->getTargetTriple().isOSMSVCRT()) { + if (Subtarget->getTargetTriple().isOSMSVCRT()) // MSVCRT doesn't have powi; fall back to pow - setLibcallName(RTLIB::POWI_F32, nullptr); - setLibcallName(RTLIB::POWI_F64, nullptr); - } + setLibcallName({RTLIB::POWI_F32, RTLIB::POWI_F64}); // Make floating-point constants legal for the large code model, so they don't // become loads from the constant pool. - if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) { - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); - } + if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) + setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal); // AArch64 does not have floating-point extending loads, i1 sign-extending // load, floating-point truncating stores, or v2i32->v2i16 truncating store. - for (MVT VT : MVT::fp_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); - } + for (MVT VT : MVT::fp_valuetypes()) + for (auto MemVT : {MVT::f16, MVT::f32, MVT::f64, MVT::f80}) + setLoadExtAction(ISD::EXTLOAD, VT, MemVT, Expand); + for (MVT VT : MVT::integer_valuetypes()) setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand); @@ -810,9 +610,7 @@ setTruncStoreAction(MVT::f128, MVT::f32, Expand); setTruncStoreAction(MVT::f128, MVT::f16, Expand); - setOperationAction(ISD::BITCAST, MVT::i16, Custom); - setOperationAction(ISD::BITCAST, MVT::f16, Custom); - setOperationAction(ISD::BITCAST, MVT::bf16, Custom); + setOperationAction(ISD::BITCAST, {MVT::i16, MVT::f16, MVT::bf16}, Custom); // Indexed loads and stores are supported. for (unsigned im = (unsigned)ISD::PRE_INC; @@ -836,9 +634,8 @@ } // Trap. - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); - setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP, ISD::UBSANTRAP}, MVT::Other, + Legal); // We combine OR nodes for bitfield operations. setTargetDAGCombine(ISD::OR); @@ -922,29 +719,29 @@ if (Subtarget->hasNEON()) { // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to // silliness like this: - for (auto Op : - {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC, - ISD::BR_CC, ISD::FADD, ISD::FSUB, - ISD::FMUL, ISD::FDIV, ISD::FMA, - ISD::FNEG, ISD::FABS, ISD::FCEIL, - ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT, - ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, - ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM, - ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD, - ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, - ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, - ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, - ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN, - ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM, - ISD::STRICT_FMAXIMUM}) - setOperationAction(Op, MVT::v1f64, Expand); - - for (auto Op : - {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP, - ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, ISD::MUL, - ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, - ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND}) - setOperationAction(Op, MVT::v1i64, Expand); + setOperationAction( + {ISD::SELECT, ISD::SELECT_CC, ISD::SETCC, + ISD::BR_CC, ISD::FADD, ISD::FSUB, + ISD::FMUL, ISD::FDIV, ISD::FMA, + ISD::FNEG, ISD::FABS, ISD::FCEIL, + ISD::FSQRT, ISD::FFLOOR, ISD::FNEARBYINT, + ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, + ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM, + ISD::FMINIMUM, ISD::FMAXIMUM, ISD::STRICT_FADD, + ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, + ISD::STRICT_FMA, ISD::STRICT_FCEIL, ISD::STRICT_FFLOOR, + ISD::STRICT_FSQRT, ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, + ISD::STRICT_FROUND, ISD::STRICT_FTRUNC, ISD::STRICT_FROUNDEVEN, + ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM, ISD::STRICT_FMINIMUM, + ISD::STRICT_FMAXIMUM}, + MVT::v1f64, Expand); + + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::FP_ROUND, ISD::FP_TO_SINT_SAT, + ISD::FP_TO_UINT_SAT, ISD::MUL, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP, ISD::STRICT_FP_ROUND}, + MVT::v1i64, Expand); // AArch64 doesn't have a direct vector ->f32 conversion instructions for // elements smaller than i32, so promote the input to i32 first. @@ -954,21 +751,15 @@ // Similarly, there is no direct i32 -> f64 vector conversion instruction. // Or, direct i32 -> f16 vector conversion. Set it so custom, so the // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 - for (auto Op : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP, - ISD::STRICT_UINT_TO_FP}) - for (auto VT : {MVT::v2i32, MVT::v2i64, MVT::v4i32}) - setOperationAction(Op, VT, Custom); - - if (Subtarget->hasFullFP16()) { - setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i8, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); - } else { + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v2i32, MVT::v2i64, MVT::v4i32}, Custom); + + if (Subtarget->hasFullFP16()) + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16}, + Custom); + else { // when AArch64 doesn't have fullfp16 support, promote the input // to i32 first. setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32); @@ -981,65 +772,44 @@ setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32); } - setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); - setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); - setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal); - setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal); - setOperationAction(ISD::BITREVERSE, MVT::v2i32, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom); - for (auto VT : {MVT::v1i64, MVT::v2i64}) { - setOperationAction(ISD::UMAX, VT, Custom); - setOperationAction(ISD::SMAX, VT, Custom); - setOperationAction(ISD::UMIN, VT, Custom); - setOperationAction(ISD::SMIN, VT, Custom); - } + setOperationAction(ISD::CTLZ, {MVT::v1i64, MVT::v2i64}, Expand); + setOperationAction(ISD::BITREVERSE, {MVT::v8i8, MVT::v16i8}, Legal); + setOperationAction(ISD::BITREVERSE, + {MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}, + Custom); + setOperationAction({ISD::UMAX, ISD::SMAX, ISD::UMIN, ISD::SMIN}, + {MVT::v1i64, MVT::v2i64}, Custom); // AArch64 doesn't have MUL.2d: setOperationAction(ISD::MUL, MVT::v2i64, Expand); // Custom handling for some quad-vector types to detect MULL. - setOperationAction(ISD::MUL, MVT::v8i16, Custom); - setOperationAction(ISD::MUL, MVT::v4i32, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); + setOperationAction(ISD::MUL, {MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // Saturates - for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, - MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SADDSAT, VT, Legal); - setOperationAction(ISD::UADDSAT, VT, Legal); - setOperationAction(ISD::SSUBSAT, VT, Legal); - setOperationAction(ISD::USUBSAT, VT, Legal); - } + setOperationAction({ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, + {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, + MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Legal); - for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, - MVT::v4i32}) { - setOperationAction(ISD::AVGFLOORS, VT, Legal); - setOperationAction(ISD::AVGFLOORU, VT, Legal); - setOperationAction(ISD::AVGCEILS, VT, Legal); - setOperationAction(ISD::AVGCEILU, VT, Legal); - setOperationAction(ISD::ABDS, VT, Legal); - setOperationAction(ISD::ABDU, VT, Legal); - } + setOperationAction( + {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU, + ISD::ABDS, ISD::ABDU}, + {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Legal); // Vector reductions - for (MVT VT : { MVT::v4f16, MVT::v2f32, - MVT::v8f16, MVT::v4f32, MVT::v2f64 }) { + for (MVT VT : {MVT::v4f16, MVT::v2f32, MVT::v8f16, MVT::v4f32, MVT::v2f64}) if (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16()) { - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); + setOperationAction({ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMIN}, VT, + Custom); setOperationAction(ISD::VECREDUCE_FADD, VT, Legal); } - } - for (MVT VT : { MVT::v8i8, MVT::v4i16, MVT::v2i32, - MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { - setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); - } + setOperationAction( + {ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN}, + {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Custom); setOperationAction(ISD::VECREDUCE_ADD, MVT::v2i64, Custom); setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); @@ -1049,24 +819,19 @@ for (MVT VT : MVT::fixedlen_vector_valuetypes()) { setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { - setOperationAction(ISD::MULHS, VT, Legal); - setOperationAction(ISD::MULHU, VT, Legal); - } else { - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - } - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); + if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) + setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Legal); + else + setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand); + + setOperationAction({ISD::BSWAP, ISD::CTTZ}, VT, Expand); for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(VT, InnerVT, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, + InnerVT, Expand); } } @@ -1076,131 +841,115 @@ ISD::FROUND, ISD::FROUNDEVEN, ISD::STRICT_FFLOOR, ISD::STRICT_FNEARBYINT, ISD::STRICT_FCEIL, ISD::STRICT_FRINT, ISD::STRICT_FTRUNC, ISD::STRICT_FROUND, ISD::STRICT_FROUNDEVEN}) { - for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64}) - setOperationAction(Op, Ty, Legal); + setOperationAction(Op, {MVT::v2f32, MVT::v4f32, MVT::v2f64}, Legal); if (Subtarget->hasFullFP16()) - for (MVT Ty : {MVT::v4f16, MVT::v8f16}) - setOperationAction(Op, Ty, Legal); + setOperationAction(Op, {MVT::v4f16, MVT::v8f16}, Legal); } setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); + for (auto VT : {MVT::v4i16, MVT::v4i32}) + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, + MVT::v4i8, Custom); setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i8, Custom); } if (Subtarget->hasSVE()) { for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) { - setOperationAction(ISD::BITREVERSE, VT, Custom); - setOperationAction(ISD::BSWAP, VT, Custom); - setOperationAction(ISD::CTLZ, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::CTTZ, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MUL, VT, Custom); - setOperationAction(ISD::MULHS, VT, Custom); - setOperationAction(ISD::MULHU, VT, Custom); - setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - setOperationAction(ISD::SMIN, VT, Custom); - setOperationAction(ISD::UMIN, VT, Custom); - setOperationAction(ISD::SMAX, VT, Custom); - setOperationAction(ISD::UMAX, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::ABS, VT, Custom); - setOperationAction(ISD::ABDS, VT, Custom); - setOperationAction(ISD::ABDU, VT, Custom); - setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); - - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - - setOperationAction(ISD::SADDSAT, VT, Legal); - setOperationAction(ISD::UADDSAT, VT, Legal); - setOperationAction(ISD::SSUBSAT, VT, Legal); - setOperationAction(ISD::USUBSAT, VT, Legal); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); + setOperationAction({ISD::BITREVERSE, + ISD::BSWAP, + ISD::CTLZ, + ISD::CTPOP, + ISD::CTTZ, + ISD::INSERT_SUBVECTOR, + ISD::UINT_TO_FP, + ISD::SINT_TO_FP, + ISD::FP_TO_UINT, + ISD::FP_TO_SINT, + ISD::MGATHER, + ISD::MSCATTER, + ISD::MLOAD, + ISD::MUL, + ISD::MULHS, + ISD::MULHU, + ISD::SPLAT_VECTOR, + ISD::VECTOR_SPLICE, + ISD::SELECT, + ISD::SETCC, + ISD::SDIV, + ISD::UDIV, + ISD::SMIN, + ISD::UMIN, + ISD::SMAX, + ISD::UMAX, + ISD::SHL, + ISD::SRL, + ISD::SRA, + ISD::ABS, + ISD::ABDS, + ISD::ABDU, + ISD::VECREDUCE_ADD, + ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, + ISD::VECREDUCE_UMIN, + ISD::VECREDUCE_UMAX, + ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_SMAX}, + VT, Custom); + + setOperationAction({ISD::UMUL_LOHI, ISD::SMUL_LOHI, ISD::SELECT_CC, + ISD::ROTL, ISD::ROTR}, + VT, Expand); + + setOperationAction( + {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal); + setOperationAction({ISD::UREM, ISD::SREM, ISD::SDIVREM, ISD::UDIVREM}, VT, + Expand); } // Illegal unpacked integer vector types. - for (auto VT : {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}) { - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - } + setOperationAction({ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR}, + {MVT::nxv8i8, MVT::nxv4i16, MVT::nxv2i32}, Custom); // Legalize unpacked bitcasts to REINTERPRET_CAST. - for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16, - MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32}) - setOperationAction(ISD::BITCAST, VT, Custom); + setOperationAction(ISD::BITCAST, + {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16, + MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32}, + Custom); - for (auto VT : - { MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, MVT::nxv4i8, - MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, MVT::nxv8i16 }) - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::nxv2i8, MVT::nxv2i16, MVT::nxv2i32, MVT::nxv2i64, + MVT::nxv4i8, MVT::nxv4i16, MVT::nxv4i32, MVT::nxv8i8, + MVT::nxv8i16}, + Legal); for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1}) { - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); + setOperationAction({ISD::CONCAT_VECTORS, ISD::SELECT, ISD::SETCC, + ISD::SPLAT_VECTOR, ISD::TRUNCATE, ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, + VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); + setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, + ISD::INSERT_SUBVECTOR}, + VT, Custom); // There are no legal MVT::nxv16f## based types. if (VT != MVT::nxv16i1) { - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, VT, Custom); } } // NEON doesn't support masked loads/stores/gathers/scatters, but SVE does - for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64, - MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, - MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) { - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - } + + setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, + {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, + MVT::v1f64, MVT::v2f64, MVT::v8i8, MVT::v16i8, + MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, + MVT::v1i64, MVT::v2i64}, + Custom); for (MVT VT : MVT::fp_scalable_vector_valuetypes()) { for (MVT InnerVT : MVT::fp_scalable_vector_valuetypes()) { @@ -1208,21 +957,17 @@ // DAGCombiner from creating unsupported truncating stores. setTruncStoreAction(VT, InnerVT, Expand); // SVE does not have floating-point extending loads. - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, + InnerVT, Expand); } } // Firstly, exclude all scalable vector extending loads/truncating stores. - for (MVT VT : MVT::integer_scalable_vector_valuetypes()) { - for (MVT InnerVT : MVT::integer_scalable_vector_valuetypes()) { + for (MVT VT : MVT::integer_scalable_vector_valuetypes()) + for (MVT InnerVT : MVT::integer_scalable_vector_valuetypes()) // TODO: truncating stores should also be exclude - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); - } - } + setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, + InnerVT, Expand); // Then, selectively enable those which we directly support. for (auto Op : {ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}) { @@ -1243,89 +988,68 @@ for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64}) { - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); - setOperationAction(ISD::FDIV, VT, Custom); - setOperationAction(ISD::FMA, VT, Custom); - setOperationAction(ISD::FMAXIMUM, VT, Custom); - setOperationAction(ISD::FMAXNUM, VT, Custom); - setOperationAction(ISD::FMINIMUM, VT, Custom); - setOperationAction(ISD::FMINNUM, VT, Custom); - setOperationAction(ISD::FMUL, VT, Custom); - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FSUB, VT, Custom); - setOperationAction(ISD::FCEIL, VT, Custom); - setOperationAction(ISD::FFLOOR, VT, Custom); - setOperationAction(ISD::FNEARBYINT, VT, Custom); - setOperationAction(ISD::FRINT, VT, Custom); - setOperationAction(ISD::FROUND, VT, Custom); - setOperationAction(ISD::FROUNDEVEN, VT, Custom); - setOperationAction(ISD::FTRUNC, VT, Custom); - setOperationAction(ISD::FSQRT, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FP_EXTEND, VT, Custom); - setOperationAction(ISD::FP_ROUND, VT, Custom); - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); - setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); - - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FPOWI, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - - setCondCodeAction(ISD::SETO, VT, Expand); - setCondCodeAction(ISD::SETOLT, VT, Expand); - setCondCodeAction(ISD::SETLT, VT, Expand); - setCondCodeAction(ISD::SETOLE, VT, Expand); - setCondCodeAction(ISD::SETLE, VT, Expand); - setCondCodeAction(ISD::SETULT, VT, Expand); - setCondCodeAction(ISD::SETULE, VT, Expand); - setCondCodeAction(ISD::SETUGE, VT, Expand); - setCondCodeAction(ISD::SETUGT, VT, Expand); - setCondCodeAction(ISD::SETUEQ, VT, Expand); - setCondCodeAction(ISD::SETUNE, VT, Expand); + setOperationAction({ISD::CONCAT_VECTORS, + ISD::INSERT_SUBVECTOR, + ISD::MGATHER, + ISD::MSCATTER, + ISD::MLOAD, + ISD::SPLAT_VECTOR, + ISD::SELECT, + ISD::FADD, + ISD::FCOPYSIGN, + ISD::FDIV, + ISD::FMA, + ISD::FMAXIMUM, + ISD::FMAXNUM, + ISD::FMINIMUM, + ISD::FMINNUM, + ISD::FMUL, + ISD::FNEG, + ISD::FSUB, + ISD::FCEIL, + ISD::FFLOOR, + ISD::FNEARBYINT, + ISD::FRINT, + ISD::FROUND, + ISD::FROUNDEVEN, + ISD::FTRUNC, + ISD::FSQRT, + ISD::FABS, + ISD::FP_EXTEND, + ISD::FP_ROUND, + ISD::VECREDUCE_FADD, + ISD::VECREDUCE_FMAX, + ISD::VECREDUCE_FMIN, + ISD::VECREDUCE_SEQ_FADD, + ISD::VECTOR_SPLICE}, + VT, Custom); + + setOperationAction({ISD::SELECT_CC, ISD::FREM, ISD::FPOW, ISD::FPOWI, + ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP, + ISD::FEXP2, ISD::FLOG, ISD::FLOG2, ISD::FLOG10}, + VT, Expand); + + setCondCodeAction({ISD::SETO, ISD::SETOLT, ISD::SETLT, ISD::SETOLE, + ISD::SETLE, ISD::SETULT, ISD::SETULE, ISD::SETUGE, + ISD::SETUGT, ISD::SETUEQ, ISD::SETUNE}, + VT, Expand); } - for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) { - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); - } + setOperationAction({ISD::CONCAT_VECTORS, ISD::MGATHER, ISD::MSCATTER, + ISD::MLOAD, ISD::INSERT_SUBVECTOR, ISD::SPLAT_VECTOR}, + {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, {MVT::i8, MVT::i16}, Custom); // NEON doesn't support integer divides, but SVE does - for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, - MVT::v4i32, MVT::v1i64, MVT::v2i64}) { - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - } + + setOperationAction({ISD::SDIV, ISD::UDIV}, + {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, + MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}, + Custom); // NEON doesn't support 64-bit vector integer muls, but SVE does. - setOperationAction(ISD::MUL, MVT::v1i64, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); + setOperationAction(ISD::MUL, {MVT::v1i64, MVT::v2i64}, Custom); // NOTE: Currently this has to happen after computeRegisterProperties rather // than the preferred option of combining it with the addRegisterClass call. @@ -1338,56 +1062,39 @@ addTypeForFixedLengthSVE(VT); // 64bit results can mean a bigger than NEON input. - for (auto VT : {MVT::v8i8, MVT::v4i16}) - setOperationAction(ISD::TRUNCATE, VT, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v8i8, MVT::v4i16}, Custom); setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); // 128bit results imply a bigger than NEON input. - for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) - setOperationAction(ISD::TRUNCATE, VT, Custom); - for (auto VT : {MVT::v8f16, MVT::v4f32}) - setOperationAction(ISD::FP_ROUND, VT, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Custom); + setOperationAction(ISD::FP_ROUND, {MVT::v8f16, MVT::v4f32}, Custom); // These operations are not supported on NEON but SVE can do them. - setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom); - setOperationAction(ISD::CTLZ, MVT::v1i64, Custom); - setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); - setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); - setOperationAction(ISD::MULHS, MVT::v1i64, Custom); - setOperationAction(ISD::MULHS, MVT::v2i64, Custom); - setOperationAction(ISD::MULHU, MVT::v1i64, Custom); - setOperationAction(ISD::MULHU, MVT::v2i64, Custom); - setOperationAction(ISD::SMAX, MVT::v1i64, Custom); - setOperationAction(ISD::SMAX, MVT::v2i64, Custom); - setOperationAction(ISD::SMIN, MVT::v1i64, Custom); - setOperationAction(ISD::SMIN, MVT::v2i64, Custom); - setOperationAction(ISD::UMAX, MVT::v1i64, Custom); - setOperationAction(ISD::UMAX, MVT::v2i64, Custom); - setOperationAction(ISD::UMIN, MVT::v1i64, Custom); - setOperationAction(ISD::UMIN, MVT::v2i64, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom); + setOperationAction({ISD::BITREVERSE, ISD::CTTZ}, MVT::v1i64, Custom); + setOperationAction({ISD::CTLZ, ISD::MULHS, ISD::MULHU, ISD::SMAX, + ISD::SMIN, ISD::UMAX, ISD::UMIN}, + {MVT::v1i64, MVT::v2i64}, Custom); + setOperationAction({ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN}, + MVT::v2i64, Custom); // Int operations with no NEON support. - for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, - MVT::v2i32, MVT::v4i32, MVT::v2i64}) { - setOperationAction(ISD::BITREVERSE, VT, Custom); - setOperationAction(ISD::CTTZ, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - } + setOperationAction({ISD::BITREVERSE, ISD::CTTZ, ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, + {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, + MVT::v2i32, MVT::v4i32, MVT::v2i64}, + Custom); // FP operations with no NEON support. - for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, - MVT::v1f64, MVT::v2f64}) - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + setOperationAction(ISD::VECREDUCE_SEQ_FADD, + {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, + MVT::v1f64, MVT::v2f64}, + Custom); // Use SVE for vectors with more than 2 elements. - for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32}) - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); + setOperationAction(ISD::VECREDUCE_FADD, + {MVT::v4f16, MVT::v8f16, MVT::v4f32}, Custom); } setOperationPromotedToType(ISD::VECTOR_SPLICE, MVT::nxv2i1, MVT::nxv2i64); @@ -1416,37 +1123,24 @@ } // Mark vector float intrinsics as expand. - if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - } + if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, + ISD::FLOG10, ISD::FEXP, ISD::FEXP2}, + VT, Expand); // But we do support custom-lowering for FCOPYSIGN. if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64 || ((VT == MVT::v4f16 || VT == MVT::v8f16) && Subtarget->hasFullFP16())) setOperationAction(ISD::FCOPYSIGN, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::OR, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, + ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, + ISD::EXTRACT_SUBVECTOR, ISD::SRA, ISD::SRL, ISD::SHL, + ISD::OR, ISD::SETCC}, + VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); - setOperationAction(ISD::SELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Expand); + setOperationAction({ISD::SELECT, ISD::SELECT_CC, ISD::VSELECT}, VT, Expand); for (MVT InnerVT : MVT::all_valuetypes()) setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); @@ -1454,16 +1148,13 @@ if (VT != MVT::v8i8 && VT != MVT::v16i8) setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); + setOperationAction({ISD::UDIV, ISD::SDIV, ISD::UREM, ISD::SREM, ISD::FREM}, + VT, Expand); - for (unsigned Opcode : - {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT, - ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) - setOperationAction(Opcode, VT, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FP_TO_SINT_SAT, + ISD::FP_TO_UINT_SAT, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT}, + VT, Custom); if (!VT.isFloatingPoint()) setOperationAction(ISD::ABS, VT, Legal); @@ -1478,13 +1169,13 @@ if (VT.isFloatingPoint() && VT.getVectorElementType() != MVT::bf16 && (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16())) - for (unsigned Opcode : - {ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMAXNUM, - ISD::STRICT_FMINIMUM, ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM, - ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB, - ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA, - ISD::STRICT_FSQRT}) - setOperationAction(Opcode, VT, Legal); + setOperationAction({ISD::FMINIMUM, ISD::FMAXIMUM, ISD::FMINNUM, + ISD::FMAXNUM, ISD::STRICT_FMINIMUM, + ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINNUM, + ISD::STRICT_FMAXNUM, ISD::STRICT_FADD, ISD::STRICT_FSUB, + ISD::STRICT_FMUL, ISD::STRICT_FDIV, ISD::STRICT_FMA, + ISD::STRICT_FSQRT}, + VT, Legal); // Strict fp extend and trunc are legal if (VT.isFloatingPoint() && VT.getScalarSizeInBits() != 16) @@ -1503,8 +1194,7 @@ // * The lowering of the non-strict versions involves target-specific ISD // nodes so we would likely need to add strict versions of all of them and // handle them appropriately. - setOperationAction(ISD::STRICT_FSETCC, VT, Expand); - setOperationAction(ISD::STRICT_FSETCCS, VT, Expand); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, VT, Expand); if (Subtarget->isLittleEndian()) { for (unsigned im = (unsigned)ISD::PRE_INC; @@ -1545,27 +1235,18 @@ // We use EXTRACT_SUBVECTOR to "cast" a scalable vector to a fixed length one. setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - if (VT.isFloatingPoint()) { - setCondCodeAction(ISD::SETO, VT, Expand); - setCondCodeAction(ISD::SETOLT, VT, Expand); - setCondCodeAction(ISD::SETLT, VT, Expand); - setCondCodeAction(ISD::SETOLE, VT, Expand); - setCondCodeAction(ISD::SETLE, VT, Expand); - setCondCodeAction(ISD::SETULT, VT, Expand); - setCondCodeAction(ISD::SETULE, VT, Expand); - setCondCodeAction(ISD::SETUGE, VT, Expand); - setCondCodeAction(ISD::SETUGT, VT, Expand); - setCondCodeAction(ISD::SETUEQ, VT, Expand); - setCondCodeAction(ISD::SETUNE, VT, Expand); - } + if (VT.isFloatingPoint()) + setCondCodeAction({ISD::SETO, ISD::SETOLT, ISD::SETLT, ISD::SETOLE, + ISD::SETLE, ISD::SETULT, ISD::SETULE, ISD::SETUGE, + ISD::SETUGT, ISD::SETUEQ, ISD::SETUNE}, + VT, Expand); // Mark integer truncating stores/extending loads as having custom lowering if (VT.isInteger()) { MVT InnerVT = VT.changeVectorElementType(MVT::i8); while (InnerVT != VT) { setTruncStoreAction(VT, InnerVT, Custom); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom); - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom); + setLoadExtAction({ISD::ZEXTLOAD, ISD::SEXTLOAD}, VT, InnerVT, Custom); InnerVT = InnerVT.changeVectorElementType( MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits())); } @@ -1584,87 +1265,88 @@ } // Lower fixed length vector operations to scalable equivalents. - setOperationAction(ISD::ABS, VT, Custom); - setOperationAction(ISD::ADD, VT, Custom); - setOperationAction(ISD::AND, VT, Custom); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - setOperationAction(ISD::BITCAST, VT, Custom); - setOperationAction(ISD::BITREVERSE, VT, Custom); - setOperationAction(ISD::BSWAP, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::CTLZ, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::CTTZ, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::FCEIL, VT, Custom); - setOperationAction(ISD::FDIV, VT, Custom); - setOperationAction(ISD::FFLOOR, VT, Custom); - setOperationAction(ISD::FMA, VT, Custom); - setOperationAction(ISD::FMAXIMUM, VT, Custom); - setOperationAction(ISD::FMAXNUM, VT, Custom); - setOperationAction(ISD::FMINIMUM, VT, Custom); - setOperationAction(ISD::FMINNUM, VT, Custom); - setOperationAction(ISD::FMUL, VT, Custom); - setOperationAction(ISD::FNEARBYINT, VT, Custom); - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FP_EXTEND, VT, Custom); - setOperationAction(ISD::FP_ROUND, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); - setOperationAction(ISD::FRINT, VT, Custom); - setOperationAction(ISD::FROUND, VT, Custom); - setOperationAction(ISD::FROUNDEVEN, VT, Custom); - setOperationAction(ISD::FSQRT, VT, Custom); - setOperationAction(ISD::FSUB, VT, Custom); - setOperationAction(ISD::FTRUNC, VT, Custom); - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MUL, VT, Custom); - setOperationAction(ISD::MULHS, VT, Custom); - setOperationAction(ISD::MULHU, VT, Custom); - setOperationAction(ISD::OR, VT, Custom); - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom); - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::SMAX, VT, Custom); - setOperationAction(ISD::SMIN, VT, Custom); - setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - setOperationAction(ISD::SUB, VT, Custom); - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::UMAX, VT, Custom); - setOperationAction(ISD::UMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::XOR, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + setOperationAction({ISD::ABS, + ISD::ADD, + ISD::AND, + ISD::ANY_EXTEND, + ISD::BITCAST, + ISD::BITREVERSE, + ISD::BSWAP, + ISD::CONCAT_VECTORS, + ISD::CTLZ, + ISD::CTPOP, + ISD::CTTZ, + ISD::FABS, + ISD::FADD, + ISD::EXTRACT_VECTOR_ELT, + ISD::FCEIL, + ISD::FDIV, + ISD::FFLOOR, + ISD::FMA, + ISD::FMAXIMUM, + ISD::FMAXNUM, + ISD::FMINIMUM, + ISD::FMINNUM, + ISD::FMUL, + ISD::FNEARBYINT, + ISD::FNEG, + ISD::FP_EXTEND, + ISD::FP_ROUND, + ISD::FP_TO_SINT, + ISD::FP_TO_UINT, + ISD::FRINT, + ISD::FROUND, + ISD::FROUNDEVEN, + ISD::FSQRT, + ISD::FSUB, + ISD::FTRUNC, + ISD::LOAD, + ISD::MGATHER, + ISD::MLOAD, + ISD::MSCATTER, + ISD::MSTORE, + ISD::MUL, + ISD::MULHS, + ISD::MULHU, + ISD::OR, + ISD::SDIV, + ISD::SELECT, + ISD::SETCC, + ISD::SHL, + ISD::SIGN_EXTEND, + ISD::SIGN_EXTEND_INREG, + ISD::SINT_TO_FP, + ISD::SMAX, + ISD::SMIN, + ISD::SPLAT_VECTOR, + ISD::VECTOR_SPLICE, + ISD::SRA, + ISD::SRL, + ISD::STORE, + ISD::SUB, + ISD::TRUNCATE, + ISD::UDIV, + ISD::UINT_TO_FP, + ISD::UMAX, + ISD::UMIN, + ISD::VECREDUCE_ADD, + ISD::VECREDUCE_AND, + ISD::VECREDUCE_FADD, + ISD::VECREDUCE_SEQ_FADD, + ISD::VECREDUCE_FMAX, + ISD::VECREDUCE_FMIN, + ISD::VECREDUCE_OR, + ISD::INSERT_VECTOR_ELT, + ISD::VECREDUCE_SMAX, + ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, + ISD::VECREDUCE_UMIN, + ISD::VECREDUCE_XOR, + ISD::VECTOR_SHUFFLE, + ISD::VSELECT, + ISD::XOR, + ISD::ZERO_EXTEND}, + VT, Custom); } void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -128,49 +128,27 @@ // There are no 64-bit extloads. These should be done as a 32-bit extload and // an extension to 64-bit. - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); - setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT, + Expand); for (MVT VT : MVT::integer_valuetypes()) { if (VT == MVT::i64) continue; - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); - - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); - - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); + for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) { + setLoadExtAction(Op, VT, MVT::i1, Promote); + setLoadExtAction(Op, VT, MVT::i8, Legal); + setLoadExtAction(Op, VT, MVT::i16, Legal); + setLoadExtAction(Op, VT, MVT::i32, Expand); + } } - for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); - } + for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) + for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) + for (auto MemVT : + {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16}) + setLoadExtAction(Op, VT, MemVT, Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); @@ -305,229 +283,125 @@ setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand); setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand); - setOperationAction(ISD::Constant, MVT::i32, Legal); - setOperationAction(ISD::Constant, MVT::i64, Legal); - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); + setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal); + setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BRIND, MVT::Other, Expand); + setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand); // This is totally unsupported, just custom lower to produce an error. setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); // Library functions. These default to Expand, but we have instructions // for them. - setOperationAction(ISD::FCEIL, MVT::f32, Legal); - setOperationAction(ISD::FEXP2, MVT::f32, Legal); - setOperationAction(ISD::FPOW, MVT::f32, Legal); - setOperationAction(ISD::FLOG2, MVT::f32, Legal); - setOperationAction(ISD::FABS, MVT::f32, Legal); - setOperationAction(ISD::FFLOOR, MVT::f32, Legal); - setOperationAction(ISD::FRINT, MVT::f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::f32, Legal); - setOperationAction(ISD::FMINNUM, MVT::f32, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); - - setOperationAction(ISD::FROUND, MVT::f32, Custom); - setOperationAction(ISD::FROUND, MVT::f64, Custom); + setOperationAction({ISD::FCEIL, ISD::FEXP2, ISD::FPOW, ISD::FLOG2, ISD::FABS, + ISD::FFLOOR, ISD::FRINT, ISD::FTRUNC, ISD::FMINNUM, + ISD::FMAXNUM}, + MVT::f32, Legal); - setOperationAction(ISD::FLOG, MVT::f32, Custom); - setOperationAction(ISD::FLOG10, MVT::f32, Custom); - setOperationAction(ISD::FEXP, MVT::f32, Custom); + setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom); + setOperationAction({ISD::FLOG, ISD::FLOG10, ISD::FEXP}, MVT::f32, Custom); - setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); - setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); + setOperationAction(ISD::FNEARBYINT, {MVT::f32, MVT::f64}, Custom); - setOperationAction(ISD::FREM, MVT::f16, Custom); - setOperationAction(ISD::FREM, MVT::f32, Custom); - setOperationAction(ISD::FREM, MVT::f64, Custom); + setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom); // Expand to fneg + fadd. setOperationAction(ISD::FSUB, MVT::f64, Expand); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v6i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v6f32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v7i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v7f32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); - setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f16, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i16, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f16, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i16, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v6f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v6i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v7f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v7i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f64, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i64, Custom); + setOperationAction(ISD::CONCAT_VECTORS, + {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32, + MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32, + MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32}, + Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v2f16, MVT::v2i16, MVT::v4f16, MVT::v4i16, + MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32, + MVT::v4f32, MVT::v4i32, MVT::v5f32, MVT::v5i32, + MVT::v6f32, MVT::v6i32, MVT::v7f32, MVT::v7i32, + MVT::v8f32, MVT::v8i32, MVT::v16f32, MVT::v16i32, + MVT::v32f32, MVT::v32i32, MVT::v2f64, MVT::v2i64, + MVT::v3f64, MVT::v3i64, MVT::v4f64, MVT::v4i64, + MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64}, + Custom); setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); + setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom); const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; for (MVT VT : ScalarIntVTs) { // These should use [SU]DIVREM, so set them to expand - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT, + Expand); // GPU does not have divrem function for signed or unsigned. - setOperationAction(ISD::SDIVREM, VT, Custom); - setOperationAction(ISD::UDIVREM, VT, Custom); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom); // GPU does not have [S|U]MUL_LOHI functions as a single instruction. - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); + setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand); // AMDGPU uses ADDC/SUBC/ADDE/SUBE - setOperationAction(ISD::ADDC, VT, Legal); - setOperationAction(ISD::SUBC, VT, Legal); - setOperationAction(ISD::ADDE, VT, Legal); - setOperationAction(ISD::SUBE, VT, Legal); + setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal); } // The hardware supports 32-bit FSHR, but not FSHL. setOperationAction(ISD::FSHR, MVT::i32, Legal); // The hardware supports 32-bit ROTR, but not ROTL. - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand); setOperationAction(ISD::ROTR, MVT::i64, Expand); - setOperationAction(ISD::MULHU, MVT::i16, Expand); - setOperationAction(ISD::MULHS, MVT::i16, Expand); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand); - setOperationAction(ISD::MUL, MVT::i64, Expand); - setOperationAction(ISD::MULHU, MVT::i64, Expand); - setOperationAction(ISD::MULHS, MVT::i64, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); + setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand); + setOperationAction( + {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + MVT::i64, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); - setOperationAction(ISD::SMIN, MVT::i32, Legal); - setOperationAction(ISD::UMIN, MVT::i32, Legal); - setOperationAction(ISD::SMAX, MVT::i32, Legal); - setOperationAction(ISD::UMAX, MVT::i32, Legal); + setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32, + Legal); - setOperationAction(ISD::CTTZ, MVT::i64, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); - setOperationAction(ISD::CTLZ, MVT::i64, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); + setOperationAction( + {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, + MVT::i64, Custom); static const MVT::SimpleValueType VectorIntTypes[] = { MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32}; for (MVT VT : VectorIntTypes) { // Expand the following operations for the current type by default. - setOperationAction(ISD::ADD, VT, Expand); - setOperationAction(ISD::AND, VT, Expand); - setOperationAction(ISD::FP_TO_SINT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT, VT, Expand); - setOperationAction(ISD::MUL, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::OR, VT, Expand); - setOperationAction(ISD::SHL, VT, Expand); - setOperationAction(ISD::SRA, VT, Expand); - setOperationAction(ISD::SRL, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - setOperationAction(ISD::SUB, VT, Expand); - setOperationAction(ISD::SINT_TO_FP, VT, Expand); - setOperationAction(ISD::UINT_TO_FP, VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::SELECT, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::XOR, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); - setOperationAction(ISD::SETCC, VT, Expand); + setOperationAction({ISD::ADD, ISD::AND, ISD::FP_TO_SINT, + ISD::FP_TO_UINT, ISD::MUL, ISD::MULHU, + ISD::MULHS, ISD::OR, ISD::SHL, + ISD::SRA, ISD::SRL, ISD::ROTL, + ISD::ROTR, ISD::SUB, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::SDIV, ISD::UDIV, + ISD::SREM, ISD::UREM, ISD::SMUL_LOHI, + ISD::UMUL_LOHI, ISD::SDIVREM, ISD::UDIVREM, + ISD::SELECT, ISD::VSELECT, ISD::SELECT_CC, + ISD::XOR, ISD::BSWAP, ISD::CTPOP, + ISD::CTTZ, ISD::CTLZ, ISD::VECTOR_SHUFFLE, + ISD::SETCC}, + VT, Expand); } static const MVT::SimpleValueType FloatVectorTypes[] = { MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32}; for (MVT VT : FloatVectorTypes) { - setOperationAction(ISD::FABS, VT, Expand); - setOperationAction(ISD::FMINNUM, VT, Expand); - setOperationAction(ISD::FMAXNUM, VT, Expand); - setOperationAction(ISD::FADD, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FDIV, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::FMUL, VT, Expand); - setOperationAction(ISD::FMA, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::FSQRT, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FSUB, VT, Expand); - setOperationAction(ISD::FNEG, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); - setOperationAction(ISD::SETCC, VT, Expand); - setOperationAction(ISD::FCANONICALIZE, VT, Expand); + setOperationAction( + {ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD, + ISD::FCEIL, ISD::FCOS, ISD::FDIV, ISD::FEXP2, + ISD::FEXP, ISD::FLOG2, ISD::FREM, ISD::FLOG, + ISD::FLOG10, ISD::FPOW, ISD::FFLOOR, ISD::FTRUNC, + ISD::FMUL, ISD::FMA, ISD::FRINT, ISD::FNEARBYINT, + ISD::FSQRT, ISD::FSIN, ISD::FSUB, ISD::FNEG, + ISD::VSELECT, ISD::SELECT_CC, ISD::FCOPYSIGN, ISD::VECTOR_SHUFFLE, + ISD::SETCC, ISD::FCANONICALIZE}, + VT, Expand); } // This causes using an unrolled select operation rather than expansion with @@ -553,7 +427,7 @@ // There are no libcalls of any kind. for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) - setLibcallName(static_cast(I), nullptr); + setLibcallName(static_cast(I)); setSchedulingPreference(Sched::RegPressure); setJumpIsExpensive(true); diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -42,39 +42,26 @@ computeRegisterProperties(Subtarget->getRegisterInfo()); // Legalize loads and stores to the private address space. - setOperationAction(ISD::LOAD, MVT::i32, Custom); - setOperationAction(ISD::LOAD, MVT::v2i32, Custom); - setOperationAction(ISD::LOAD, MVT::v4i32, Custom); + setOperationAction(ISD::LOAD, {MVT::i32, MVT::v2i32, MVT::v4i32}, Custom); // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address // spaces, so it is custom lowered to handle those where it isn't. - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom); - - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom); - - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom); - } + for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(Op, VT, MVT::i1, Promote); + setLoadExtAction(Op, VT, MVT::i8, Custom); + setLoadExtAction(Op, VT, MVT::i16, Custom); + } // Workaround for LegalizeDAG asserting on expansion of i1 vector loads. - setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, MVT::v2i1, Expand); - setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, MVT::v2i1, Expand); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, MVT::v2i1, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i32, + MVT::v2i1, Expand); - setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i1, Expand); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i1, Expand); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i1, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v4i32, + MVT::v4i1, Expand); - setOperationAction(ISD::STORE, MVT::i8, Custom); - setOperationAction(ISD::STORE, MVT::i32, Custom); - setOperationAction(ISD::STORE, MVT::v2i32, Custom); - setOperationAction(ISD::STORE, MVT::v4i32, Custom); + setOperationAction(ISD::STORE, {MVT::i8, MVT::i32, MVT::v2i32, MVT::v4i32}, + Custom); setTruncStoreAction(MVT::i32, MVT::i8, Custom); setTruncStoreAction(MVT::i32, MVT::i16, Custom); @@ -96,55 +83,34 @@ setTruncStoreAction(MVT::v4i32, MVT::v4i1, Expand); // Set condition code actions - setCondCodeAction(ISD::SETO, MVT::f32, Expand); - setCondCodeAction(ISD::SETUO, MVT::f32, Expand); - setCondCodeAction(ISD::SETLT, MVT::f32, Expand); - setCondCodeAction(ISD::SETLE, MVT::f32, Expand); - setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); - setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); - setCondCodeAction(ISD::SETONE, MVT::f32, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); - setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); - setCondCodeAction(ISD::SETULT, MVT::f32, Expand); - setCondCodeAction(ISD::SETULE, MVT::f32, Expand); - - setCondCodeAction(ISD::SETLE, MVT::i32, Expand); - setCondCodeAction(ISD::SETLT, MVT::i32, Expand); - setCondCodeAction(ISD::SETULE, MVT::i32, Expand); - setCondCodeAction(ISD::SETULT, MVT::i32, Expand); - - setOperationAction(ISD::FCOS, MVT::f32, Custom); - setOperationAction(ISD::FSIN, MVT::f32, Custom); - - setOperationAction(ISD::SETCC, MVT::v4i32, Expand); - setOperationAction(ISD::SETCC, MVT::v2i32, Expand); - - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_CC, MVT::f32, Expand); + setCondCodeAction({ISD::SETO, ISD::SETUO, ISD::SETLT, ISD::SETLE, ISD::SETOLT, + ISD::SETOLE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGE, + ISD::SETUGT, ISD::SETULT, ISD::SETULE}, + MVT::f32, Expand); + + setCondCodeAction({ISD::SETLE, ISD::SETLT, ISD::SETULE, ISD::SETULT}, + MVT::i32, Expand); + + setOperationAction({ISD::FCOS, ISD::FSIN}, MVT::f32, Custom); + + setOperationAction(ISD::SETCC, {MVT::v4i32, MVT::v2i32}, Expand); + + setOperationAction(ISD::BR_CC, {MVT::i32, MVT::f32}, Expand); setOperationAction(ISD::BRCOND, MVT::Other, Custom); setOperationAction(ISD::FSUB, MVT::f32, Expand); - setOperationAction(ISD::FCEIL, MVT::f64, Custom); - setOperationAction(ISD::FTRUNC, MVT::f64, Custom); - setOperationAction(ISD::FRINT, MVT::f64, Custom); - setOperationAction(ISD::FFLOOR, MVT::f64, Custom); + setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR}, + MVT::f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); + setOperationAction(ISD::SELECT_CC, {MVT::f32, MVT::i32}, Custom); - setOperationAction(ISD::SETCC, MVT::i32, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i1, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); + setOperationAction(ISD::SETCC, {MVT::i32, MVT::f32}, Expand); + setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT}, {MVT::i1, MVT::i64}, + Custom); - setOperationAction(ISD::SELECT, MVT::i32, Expand); - setOperationAction(ISD::SELECT, MVT::f32, Expand); - setOperationAction(ISD::SELECT, MVT::v2i32, Expand); - setOperationAction(ISD::SELECT, MVT::v4i32, Expand); + setOperationAction(ISD::SELECT, {MVT::i32, MVT::f32, MVT::v2i32, MVT::v4i32}, + Expand); // ADD, SUB overflow. // TODO: turn these into Legal? @@ -158,56 +124,43 @@ if (!Subtarget->hasBFE()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i1, MVT::v4i1}, Expand); if (!Subtarget->hasBFE()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i8, MVT::v4i8}, Expand); if (!Subtarget->hasBFE()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v4i16}, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i32, MVT::v4i32}, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand); setOperationAction(ISD::FrameIndex, MVT::i32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, + {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, + {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom); // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32 // to be Legal/Custom in order to avoid library calls. - setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, MVT::i32, + Custom); - if (!Subtarget->hasFMA()) { - setOperationAction(ISD::FMA, MVT::f32, Expand); - setOperationAction(ISD::FMA, MVT::f64, Expand); - } + if (!Subtarget->hasFMA()) + setOperationAction(ISD::FMA, {MVT::f32, MVT::f64}, Expand); // FIXME: May need no denormals check setOperationAction(ISD::FMAD, MVT::f32, Legal); - if (!Subtarget->hasBFI()) { + if (!Subtarget->hasBFI()) // fcopysign can be done in a single instruction with BFI. - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - } + setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand); if (!Subtarget->hasBCNT(32)) setOperationAction(ISD::CTPOP, MVT::i32, Expand); @@ -229,21 +182,17 @@ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; - for (MVT VT : ScalarIntVTs) { - setOperationAction(ISD::ADDC, VT, Expand); - setOperationAction(ISD::SUBC, VT, Expand); - setOperationAction(ISD::ADDE, VT, Expand); - setOperationAction(ISD::SUBE, VT, Expand); - } + for (MVT VT : ScalarIntVTs) + setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, + Expand); // LLVM will expand these to atomic_cmp_swap(0) // and atomic_swap, respectively. - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Expand); // We need to custom lower some of the intrinsics - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + setOperationAction({ISD::INTRINSIC_VOID, ISD::INTRINSIC_WO_CHAIN}, MVT::Other, + Custom); setSchedulingPreference(Sched::Source); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -153,27 +153,17 @@ setBooleanVectorContents(ZeroOrOneBooleanContent); // We need to custom lower vector stores from local memory - setOperationAction(ISD::LOAD, MVT::v2i32, Custom); - setOperationAction(ISD::LOAD, MVT::v3i32, Custom); - setOperationAction(ISD::LOAD, MVT::v4i32, Custom); - setOperationAction(ISD::LOAD, MVT::v5i32, Custom); - setOperationAction(ISD::LOAD, MVT::v6i32, Custom); - setOperationAction(ISD::LOAD, MVT::v7i32, Custom); - setOperationAction(ISD::LOAD, MVT::v8i32, Custom); - setOperationAction(ISD::LOAD, MVT::v16i32, Custom); - setOperationAction(ISD::LOAD, MVT::i1, Custom); - setOperationAction(ISD::LOAD, MVT::v32i32, Custom); - - setOperationAction(ISD::STORE, MVT::v2i32, Custom); - setOperationAction(ISD::STORE, MVT::v3i32, Custom); - setOperationAction(ISD::STORE, MVT::v4i32, Custom); - setOperationAction(ISD::STORE, MVT::v5i32, Custom); - setOperationAction(ISD::STORE, MVT::v6i32, Custom); - setOperationAction(ISD::STORE, MVT::v7i32, Custom); - setOperationAction(ISD::STORE, MVT::v8i32, Custom); - setOperationAction(ISD::STORE, MVT::v16i32, Custom); - setOperationAction(ISD::STORE, MVT::i1, Custom); - setOperationAction(ISD::STORE, MVT::v32i32, Custom); + setOperationAction(ISD::LOAD, + {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, + MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1, + MVT::v32i32}, + Custom); + + setOperationAction(ISD::STORE, + {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, + MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32, MVT::i1, + MVT::v32i32}, + Custom); setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); @@ -200,71 +190,47 @@ setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); + setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i64}, Custom); setOperationAction(ISD::SELECT, MVT::i1, Promote); setOperationAction(ISD::SELECT, MVT::i64, Custom); setOperationAction(ISD::SELECT, MVT::f64, Promote); AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); - setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); - setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); + setOperationAction(ISD::SELECT_CC, + {MVT::f32, MVT::i32, MVT::i64, MVT::f64, MVT::i1}, Expand); setOperationAction(ISD::SETCC, MVT::i1, Promote); - setOperationAction(ISD::SETCC, MVT::v2i1, Expand); - setOperationAction(ISD::SETCC, MVT::v4i1, Expand); + setOperationAction(ISD::SETCC, {MVT::v2i1, MVT::v4i1}, Expand); AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); - setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v3i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v3f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v5i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v5f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v6i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v6f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v7i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v7f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v16i32, Expand); - setOperationAction(ISD::FP_ROUND, MVT::v16f32, Expand); - - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); + setOperationAction(ISD::TRUNCATE, + {MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, + MVT::v6i32, MVT::v7i32, MVT::v8i32, MVT::v16i32}, + Expand); + setOperationAction(ISD::FP_ROUND, + {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, + MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32}, + Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::v2i1, MVT::v4i1, MVT::v2i8, MVT::v4i8, MVT::v2i16, + MVT::v3i16, MVT::v4i16, MVT::Other}, + Custom); setOperationAction(ISD::BRCOND, MVT::Other, Custom); - setOperationAction(ISD::BR_CC, MVT::i1, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_CC, MVT::i64, Expand); - setOperationAction(ISD::BR_CC, MVT::f32, Expand); - setOperationAction(ISD::BR_CC, MVT::f64, Expand); + setOperationAction(ISD::BR_CC, + {MVT::i1, MVT::i32, MVT::i64, MVT::f32, MVT::f64}, Expand); - setOperationAction(ISD::UADDO, MVT::i32, Legal); - setOperationAction(ISD::USUBO, MVT::i32, Legal); + setOperationAction({ISD::UADDO, ISD::USUBO}, MVT::i32, Legal); - setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); - setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i32, Legal); - setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i64, + Expand); #if 0 - setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); - setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i64, Legal); #endif // We only support LOAD/STORE and vector manipulation ops for vectors @@ -374,94 +340,63 @@ AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); } - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, + {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32}, + Expand); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); + setOperationAction(ISD::BUILD_VECTOR, {MVT::v4f16, MVT::v4i16}, Custom); // Avoid stack access for these. // TODO: Generalize to more vector types. - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); - - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); - - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); + setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}, + {MVT::v2i16, MVT::v2f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, + MVT::v4i16, MVT::v4f16}, + Custom); // Deal with vec3 vector operations when widened to vec4. - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, + {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32}, Custom); // Deal with vec5/6/7 vector operations when widened to vec8. - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6f32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7f32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, + {MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32, + MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32}, + Custom); // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, // and output demarshalling - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i32, MVT::i64}, Custom); // We can't return success/failure, only the old value, // let LLVM add the comparison - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, {MVT::i32, MVT::i64}, + Expand); - if (Subtarget->hasFlatAddressSpace()) { - setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); - setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); - } + if (Subtarget->hasFlatAddressSpace()) + setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); + setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal); // FIXME: This should be narrowed to i32, but that only happens if i64 is // illegal. // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. - setOperationAction(ISD::BSWAP, MVT::i64, Legal); - setOperationAction(ISD::BSWAP, MVT::i32, Legal); + setOperationAction(ISD::BSWAP, {MVT::i64, MVT::i32}, Legal); // On SI this is s_memtime and s_memrealtime on VI. setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); - setOperationAction(ISD::TRAP, MVT::Other, Custom); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Custom); if (Subtarget->has16BitInsts()) { - setOperationAction(ISD::FPOW, MVT::f16, Promote); - setOperationAction(ISD::FPOWI, MVT::f16, Promote); - setOperationAction(ISD::FLOG, MVT::f16, Custom); - setOperationAction(ISD::FEXP, MVT::f16, Custom); - setOperationAction(ISD::FLOG10, MVT::f16, Custom); + setOperationAction({ISD::FPOW, ISD::FPOWI}, MVT::f16, Promote); + setOperationAction({ISD::FLOG, ISD::FEXP, ISD::FLOG10}, MVT::f16, Custom); } if (Subtarget->hasMadMacF32Insts()) setOperationAction(ISD::FMAD, MVT::f32, Legal); - if (!Subtarget->hasBFI()) { + if (!Subtarget->hasBFI()) // fcopysign can be done in a single instruction with BFI. - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - } + setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand); if (!Subtarget->hasBCNT(32)) setOperationAction(ISD::CTPOP, MVT::i32, Expand); @@ -469,15 +404,11 @@ if (!Subtarget->hasBCNT(64)) setOperationAction(ISD::CTPOP, MVT::i64, Expand); - if (Subtarget->hasFFBH()) { - setOperationAction(ISD::CTLZ, MVT::i32, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); - } + if (Subtarget->hasFFBH()) + setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom); - if (Subtarget->hasFFBL()) { - setOperationAction(ISD::CTTZ, MVT::i32, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); - } + if (Subtarget->hasFFBL()) + setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom); // We only really have 32-bit BFE instructions (and 16-bit on VI). // @@ -491,84 +422,48 @@ setHasExtractBitsInsn(true); // Clamp modifier on add/sub - if (Subtarget->hasIntClamp()) { - setOperationAction(ISD::UADDSAT, MVT::i32, Legal); - setOperationAction(ISD::USUBSAT, MVT::i32, Legal); - } - - if (Subtarget->hasAddNoCarry()) { - setOperationAction(ISD::SADDSAT, MVT::i16, Legal); - setOperationAction(ISD::SSUBSAT, MVT::i16, Legal); - setOperationAction(ISD::SADDSAT, MVT::i32, Legal); - setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); - } + if (Subtarget->hasIntClamp()) + setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Legal); - setOperationAction(ISD::FMINNUM, MVT::f32, Custom); - setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); - setOperationAction(ISD::FMINNUM, MVT::f64, Custom); - setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); + if (Subtarget->hasAddNoCarry()) + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, {MVT::i16, MVT::i32}, + Legal); + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::f32, MVT::f64}, + Custom); // These are really only legal for ieee_mode functions. We should be avoiding // them for functions that don't have ieee_mode enabled, so just say they are // legal. - setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); - setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); - + setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE}, + {MVT::f32, MVT::f64}, Legal); - if (Subtarget->haveRoundOpsF64()) { - setOperationAction(ISD::FTRUNC, MVT::f64, Legal); - setOperationAction(ISD::FCEIL, MVT::f64, Legal); - setOperationAction(ISD::FRINT, MVT::f64, Legal); - } else { - setOperationAction(ISD::FCEIL, MVT::f64, Custom); - setOperationAction(ISD::FTRUNC, MVT::f64, Custom); - setOperationAction(ISD::FRINT, MVT::f64, Custom); - setOperationAction(ISD::FFLOOR, MVT::f64, Custom); - } + if (Subtarget->haveRoundOpsF64()) + setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FRINT}, MVT::f64, Legal); + else + setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR}, + MVT::f64, Custom); setOperationAction(ISD::FFLOOR, MVT::f64, Legal); - setOperationAction(ISD::FSIN, MVT::f32, Custom); - setOperationAction(ISD::FCOS, MVT::f32, Custom); - setOperationAction(ISD::FDIV, MVT::f32, Custom); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FDIV}, MVT::f32, Custom); setOperationAction(ISD::FDIV, MVT::f64, Custom); if (Subtarget->has16BitInsts()) { - setOperationAction(ISD::Constant, MVT::i16, Legal); - - setOperationAction(ISD::SMIN, MVT::i16, Legal); - setOperationAction(ISD::SMAX, MVT::i16, Legal); + setOperationAction({ISD::Constant, ISD::SMIN, ISD::SMAX, ISD::UMIN, + ISD::UMAX, ISD::UADDSAT, ISD::USUBSAT}, + MVT::i16, Legal); - setOperationAction(ISD::UMIN, MVT::i16, Legal); - setOperationAction(ISD::UMAX, MVT::i16, Legal); - - setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); - setOperationAction(ISD::ROTR, MVT::i16, Expand); - setOperationAction(ISD::ROTL, MVT::i16, Expand); - - setOperationAction(ISD::SDIV, MVT::i16, Promote); - setOperationAction(ISD::UDIV, MVT::i16, Promote); - setOperationAction(ISD::SREM, MVT::i16, Promote); - setOperationAction(ISD::UREM, MVT::i16, Promote); - setOperationAction(ISD::UADDSAT, MVT::i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::i16, Legal); - - setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); - - setOperationAction(ISD::CTTZ, MVT::i16, Promote); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); - setOperationAction(ISD::CTLZ, MVT::i16, Promote); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); - setOperationAction(ISD::CTPOP, MVT::i16, Promote); + setOperationAction({ISD::ROTR, ISD::ROTL, ISD::SELECT_CC, ISD::BR_CC}, + MVT::i16, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); - - setOperationAction(ISD::BR_CC, MVT::i16, Expand); + setOperationAction({ISD::SIGN_EXTEND, ISD::SDIV, ISD::UDIV, ISD::SREM, + ISD::UREM, ISD::BITREVERSE, ISD::CTTZ, + ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, + ISD::CTPOP}, + MVT::i16, Promote); setOperationAction(ISD::LOAD, MVT::i16, Custom); @@ -579,8 +474,7 @@ setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i16, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::i16, Custom); // F16 - Constant Actions. setOperationAction(ISD::ConstantFP, MVT::f16, Legal); @@ -592,23 +486,18 @@ AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); // F16 - VOP1 Actions. - setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::FCOS, MVT::f16, Custom); - setOperationAction(ISD::FSIN, MVT::f16, Custom); + setOperationAction( + {ISD::FP_ROUND, ISD::FCOS, ISD::FSIN, ISD::FROUND, ISD::FPTRUNC_ROUND}, + MVT::f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); - setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); - setOperationAction(ISD::FROUND, MVT::f16, Custom); - setOperationAction(ISD::FPTRUNC_ROUND, MVT::f16, Custom); + setOperationAction( + {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + MVT::f16, Promote); // F16 - VOP2 Actions. - setOperationAction(ISD::BR_CC, MVT::f16, Expand); - setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::f16, Expand); setOperationAction(ISD::FDIV, MVT::f16, Custom); @@ -642,16 +531,12 @@ } // v_perm_b32 can handle either of these. - setOperationAction(ISD::BSWAP, MVT::i16, Legal); - setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); + setOperationAction(ISD::BSWAP, {MVT::i16, MVT::v2i16}, Legal); setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); // XXX - Do these do anything? Vector constants turn into build_vector. - setOperationAction(ISD::Constant, MVT::v2i16, Legal); - setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); - - setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); - setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); + setOperationAction({ISD::Constant, ISD::UNDEF}, {MVT::v2i16, MVT::v2f16}, + Legal); setOperationAction(ISD::STORE, MVT::v2i16, Promote); AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); @@ -695,140 +580,87 @@ setOperationAction(ISD::STORE, MVT::v8f16, Promote); AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32); - setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); - setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); - setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); + setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, + MVT::v2i32, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); - setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); - setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); - setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); + setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, + MVT::v4i32, Expand); - setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Expand); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Expand); - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Expand); + setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, + MVT::v8i32, Expand); - if (!Subtarget->hasVOP3PInsts()) { - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); - } + if (!Subtarget->hasVOP3PInsts()) + setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16}, Custom); setOperationAction(ISD::FNEG, MVT::v2f16, Legal); // This isn't really legal, but this avoids the legalizer unrolling it (and // allows matching fneg (fabs x) patterns) setOperationAction(ISD::FABS, MVT::v2f16, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); - setOperationAction(ISD::FMINNUM, MVT::f16, Custom); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); - setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); + setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, MVT::f16, Custom); + setOperationAction({ISD::FMAXNUM_IEEE, ISD::FMINNUM_IEEE}, MVT::f16, Legal); - setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); - setOperationAction(ISD::FMINNUM_IEEE, MVT::v8f16, Custom); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::v8f16, Custom); + setOperationAction({ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE}, + {MVT::v4f16, MVT::v8f16}, Custom); - setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); - setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); - setOperationAction(ISD::FMINNUM, MVT::v8f16, Expand); - setOperationAction(ISD::FMAXNUM, MVT::v8f16, Expand); + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::v4f16, MVT::v8f16}, + Expand); for (MVT Vec16 : { MVT::v8i16, MVT::v8f16 }) { - setOperationAction(ISD::BUILD_VECTOR, Vec16, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand); - setOperationAction(ISD::SCALAR_TO_VECTOR, Vec16, Expand); + setOperationAction({ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT}, Vec16, + Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::SCALAR_TO_VECTOR}, Vec16, + Expand); } } if (Subtarget->hasVOP3PInsts()) { - setOperationAction(ISD::ADD, MVT::v2i16, Legal); - setOperationAction(ISD::SUB, MVT::v2i16, Legal); - setOperationAction(ISD::MUL, MVT::v2i16, Legal); - setOperationAction(ISD::SHL, MVT::v2i16, Legal); - setOperationAction(ISD::SRL, MVT::v2i16, Legal); - setOperationAction(ISD::SRA, MVT::v2i16, Legal); - setOperationAction(ISD::SMIN, MVT::v2i16, Legal); - setOperationAction(ISD::UMIN, MVT::v2i16, Legal); - setOperationAction(ISD::SMAX, MVT::v2i16, Legal); - setOperationAction(ISD::UMAX, MVT::v2i16, Legal); - - setOperationAction(ISD::UADDSAT, MVT::v2i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v2i16, Legal); - setOperationAction(ISD::SADDSAT, MVT::v2i16, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v2i16, Legal); - - setOperationAction(ISD::FADD, MVT::v2f16, Legal); - setOperationAction(ISD::FMUL, MVT::v2f16, Legal); - setOperationAction(ISD::FMA, MVT::v2f16, Legal); - - setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); - - setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); - - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); - - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); - - for (MVT VT : { MVT::v4i16, MVT::v8i16 }) { - // Split vector operations. - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::ADD, VT, Custom); - setOperationAction(ISD::SUB, VT, Custom); - setOperationAction(ISD::MUL, VT, Custom); + setOperationAction({ISD::ADD, ISD::SUB, ISD::MUL, ISD::SHL, ISD::SRL, + ISD::SRA, ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX, + ISD::UADDSAT, ISD::USUBSAT, ISD::SADDSAT, ISD::SSUBSAT}, + MVT::v2i16, Legal); - setOperationAction(ISD::SMIN, VT, Custom); - setOperationAction(ISD::SMAX, VT, Custom); - setOperationAction(ISD::UMIN, VT, Custom); - setOperationAction(ISD::UMAX, VT, Custom); + setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FMINNUM_IEEE, + ISD::FMAXNUM_IEEE, ISD::FCANONICALIZE}, + MVT::v2f16, Legal); - setOperationAction(ISD::UADDSAT, VT, Custom); - setOperationAction(ISD::SADDSAT, VT, Custom); - setOperationAction(ISD::USUBSAT, VT, Custom); - setOperationAction(ISD::SSUBSAT, VT, Custom); - } + setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2i16, MVT::v2f16}, + Custom); - for (MVT VT : { MVT::v4f16, MVT::v8f16 }) { + setOperationAction(ISD::VECTOR_SHUFFLE, + {MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::v8i16}, + Custom); + + for (MVT VT : {MVT::v4i16, MVT::v8i16}) // Split vector operations. - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FMUL, VT, Custom); - setOperationAction(ISD::FMA, VT, Custom); - setOperationAction(ISD::FCANONICALIZE, VT, Custom); - } + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL, ISD::ADD, ISD::SUB, + ISD::MUL, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, + ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, + ISD::SSUBSAT}, + VT, Custom); - setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); - setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); + for (MVT VT : {MVT::v4f16, MVT::v8f16}) + // Split vector operations. + setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FCANONICALIZE}, + VT, Custom); - setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); - setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); + setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, {MVT::v2f16, MVT::v4f16}, + Custom); setOperationAction(ISD::FEXP, MVT::v2f16, Custom); - setOperationAction(ISD::SELECT, MVT::v4i16, Custom); - setOperationAction(ISD::SELECT, MVT::v4f16, Custom); + setOperationAction(ISD::SELECT, {MVT::v4i16, MVT::v4f16}, Custom); if (Subtarget->hasPackedFP32Ops()) { - setOperationAction(ISD::FADD, MVT::v2f32, Legal); - setOperationAction(ISD::FMUL, MVT::v2f32, Legal); - setOperationAction(ISD::FMA, MVT::v2f32, Legal); - setOperationAction(ISD::FNEG, MVT::v2f32, Legal); - - for (MVT VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32 }) { - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FMUL, VT, Custom); - setOperationAction(ISD::FMA, VT, Custom); - } + setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FNEG}, + MVT::v2f32, Legal); + setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA}, + {MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32}, + Custom); } } - setOperationAction(ISD::FNEG, MVT::v4f16, Custom); - setOperationAction(ISD::FABS, MVT::v4f16, Custom); + setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v4f16, Custom); if (Subtarget->has16BitInsts()) { setOperationAction(ISD::SELECT, MVT::v2i16, Promote); @@ -837,56 +669,37 @@ AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); } else { // Legalization hack. - setOperationAction(ISD::SELECT, MVT::v2i16, Custom); - setOperationAction(ISD::SELECT, MVT::v2f16, Custom); - - setOperationAction(ISD::FNEG, MVT::v2f16, Custom); - setOperationAction(ISD::FABS, MVT::v2f16, Custom); - } - - for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, - MVT::v8i16, MVT::v8f16 }) { - setOperationAction(ISD::SELECT, VT, Custom); - } - - setOperationAction(ISD::SMULO, MVT::i64, Custom); - setOperationAction(ISD::UMULO, MVT::i64, Custom); - - if (Subtarget->hasMad64_32()) { - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); - } - - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); - - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3f16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3i16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); - - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v3i16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v3f16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); + setOperationAction(ISD::SELECT, {MVT::v2i16, MVT::v2f16}, Custom); + + setOperationAction({ISD::FNEG, ISD::FABS}, MVT::v2f16, Custom); + } + + setOperationAction(ISD::SELECT, + {MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, + MVT::v8i16, MVT::v8f16}, + Custom); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::i64, Custom); + + if (Subtarget->hasMad64_32()) + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom); + + setOperationAction(ISD::INTRINSIC_WO_CHAIN, + {MVT::Other, MVT::f32, MVT::v4f32, MVT::i16, MVT::f16, + MVT::v2i16, MVT::v2f16}, + Custom); + + setOperationAction(ISD::INTRINSIC_W_CHAIN, + {MVT::v2f16, MVT::v2i16, MVT::v3f16, MVT::v3i16, + MVT::v4f16, MVT::v4i16, MVT::v8f16, MVT::Other, MVT::f16, + MVT::i16, MVT::i8}, + Custom); + + setOperationAction(ISD::INTRINSIC_VOID, + {MVT::Other, MVT::v2i16, MVT::v2f16, MVT::v3i16, + MVT::v3f16, MVT::v4f16, MVT::v4i16, MVT::f16, MVT::i16, + MVT::i8}, + Custom); setTargetDAGCombine({ISD::ADD, ISD::ADDCARRY, @@ -916,8 +729,8 @@ ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}); - // All memory operations. Some folding on the pointer operand is done to help - // matching the constant offsets in the addressing modes. + // All memory operations. Some folding on the pointer operand is done to + // help matching the constant offsets in the addressing modes. setTargetDAGCombine({ISD::LOAD, ISD::STORE, ISD::ATOMIC_LOAD, diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp --- a/llvm/lib/Target/ARC/ARCISelLowering.cpp +++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp @@ -115,38 +115,23 @@ // Operations to get us off of the ground. // Basic. - setOperationAction(ISD::ADD, MVT::i32, Legal); - setOperationAction(ISD::SUB, MVT::i32, Legal); - setOperationAction(ISD::AND, MVT::i32, Legal); - setOperationAction(ISD::SMAX, MVT::i32, Legal); - setOperationAction(ISD::SMIN, MVT::i32, Legal); - - setOperationAction(ISD::ADDC, MVT::i32, Legal); - setOperationAction(ISD::ADDE, MVT::i32, Legal); - setOperationAction(ISD::SUBC, MVT::i32, Legal); - setOperationAction(ISD::SUBE, MVT::i32, Legal); + setOperationAction({ISD::ADD, ISD::SUB, ISD::AND, ISD::SMAX, ISD::SMIN, + ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, + MVT::i32, Legal); // Need barrel shifter. - setOperationAction(ISD::SHL, MVT::i32, Legal); - setOperationAction(ISD::SRA, MVT::i32, Legal); - setOperationAction(ISD::SRL, MVT::i32, Legal); - setOperationAction(ISD::ROTR, MVT::i32, Legal); - - setOperationAction(ISD::Constant, MVT::i32, Legal); - setOperationAction(ISD::UNDEF, MVT::i32, Legal); + setOperationAction( + {ISD::SHL, ISD::SRA, ISD::SRL, ISD::ROTR, ISD::Constant, ISD::UNDEF}, + MVT::i32, Legal); // Need multiplier - setOperationAction(ISD::MUL, MVT::i32, Legal); - setOperationAction(ISD::MULHS, MVT::i32, Legal); - setOperationAction(ISD::MULHU, MVT::i32, Legal); - setOperationAction(ISD::LOAD, MVT::i32, Legal); - setOperationAction(ISD::STORE, MVT::i32, Legal); - - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - setOperationAction(ISD::BR_CC, MVT::i32, Custom); - setOperationAction(ISD::BRCOND, MVT::Other, Expand); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction( + {ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::LOAD, ISD::STORE, MVT::i32}, + Legal); + + setOperationAction({ISD::SELECT_CC, ISD::BR_CC, ISD::JumpTable}, MVT::i32, + Custom); + setOperationAction({ISD::BRCOND, ISD::BR_JT}, MVT::Other, Expand); // Have pseudo instruction for frame addresses. setOperationAction(ISD::FRAMEADDR, MVT::i32, Legal); @@ -155,21 +140,17 @@ // Expand var-args ops. setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction({ISD::VAEND, ISD::VAARG, ISD::VACOPY}, MVT::Other, Expand); // Other expansions - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); // Sign extend inreg setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom); // TODO: Predicate these with `options.hasBitScan() ? Legal : Expand` // when the HasBitScan predicate is available. - setOperationAction(ISD::CTLZ, MVT::i32, Legal); - setOperationAction(ISD::CTTZ, MVT::i32, Legal); + setOperationAction({ISD::CTLZ, ISD::CTTZ}, MVT::i32, Legal); setOperationAction(ISD::READCYCLECOUNTER, MVT::i32, Legal); setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -167,50 +167,36 @@ MVT ElemTy = VT.getVectorElementType(); if (ElemTy != MVT::f64) setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - if (ElemTy == MVT::i32) { - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); - } else { - setOperationAction(ISD::SINT_TO_FP, VT, Expand); - setOperationAction(ISD::UINT_TO_FP, VT, Expand); - setOperationAction(ISD::FP_TO_SINT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT, VT, Expand); - } - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); - setOperationAction(ISD::SELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - if (VT.isInteger()) { - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - } + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, + Custom); + if (ElemTy == MVT::i32) + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + VT, Custom); + else + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + VT, Expand); + setOperationAction({ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE}, VT, Custom); + setOperationAction({ISD::CONCAT_VECTORS, ISD::EXTRACT_SUBVECTOR}, VT, Legal); + setOperationAction( + {ISD::SELECT, ISD::SELECT_CC, ISD::VSELECT, ISD::SIGN_EXTEND_INREG}, VT, + Expand); + if (VT.isInteger()) + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Custom); // Neon does not support vector divide/remainder operations. - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::FDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::FDIV, ISD::SREM, ISD::UREM, + ISD::FREM, ISD::SDIVREM, ISD::UDIVREM}, + VT, Expand); if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) - for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) - setOperationAction(Opcode, VT, Legal); + setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, + VT, Legal); if (!VT.isFloatingPoint()) - for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) - setOperationAction(Opcode, VT, Legal); + setOperationAction({ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, + VT, Legal); } void ARMTargetLowering::addDRTypeForNEON(MVT VT) { @@ -230,17 +216,14 @@ // We support these really simple operations even on types where all // the actual arithmetic has to be broken down into simpler // operations or turned into library calls. - setOperationAction(ISD::BITCAST, VT, Legal); - setOperationAction(ISD::LOAD, VT, Legal); - setOperationAction(ISD::STORE, VT, Legal); - setOperationAction(ISD::UNDEF, VT, Legal); + setOperationAction({ISD::BITCAST, ISD::LOAD, ISD::STORE, ISD::UNDEF}, VT, + Legal); } void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action) { - setLoadExtAction(ISD::EXTLOAD, From, To, Action); - setLoadExtAction(ISD::ZEXTLOAD, From, To, Action); - setLoadExtAction(ISD::SEXTLOAD, From, To, Action); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, From, To, + Action); } void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { @@ -248,67 +231,39 @@ for (auto VT : IntTypes) { addRegisterClass(VT, &ARM::MQPRRegClass); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Legal); - setOperationAction(ISD::CTLZ, VT, Legal); - setOperationAction(ISD::CTTZ, VT, Custom); - setOperationAction(ISD::BITREVERSE, VT, Legal); - setOperationAction(ISD::BSWAP, VT, Legal); - setOperationAction(ISD::SADDSAT, VT, Legal); - setOperationAction(ISD::UADDSAT, VT, Legal); - setOperationAction(ISD::SSUBSAT, VT, Legal); - setOperationAction(ISD::USUBSAT, VT, Legal); - setOperationAction(ISD::ABDS, VT, Legal); - setOperationAction(ISD::ABDU, VT, Legal); - setOperationAction(ISD::AVGFLOORS, VT, Legal); - setOperationAction(ISD::AVGFLOORU, VT, Legal); - setOperationAction(ISD::AVGCEILS, VT, Legal); - setOperationAction(ISD::AVGCEILU, VT, Legal); + setOperationAction({ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_VECTOR_ELT, ISD::BUILD_VECTOR, ISD::SHL, + ISD::SRA, ISD::SRL, ISD::SETCC, ISD::MLOAD, ISD::CTTZ}, + VT, Custom); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS, + ISD::MSTORE, ISD::CTLZ, ISD::BITREVERSE, ISD::BSWAP, + ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT, + ISD::ABDS, ISD::ABDU, ISD::AVGFLOORS, ISD::AVGFLOORU, + ISD::AVGCEILS, ISD::AVGCEILU}, + VT, Legal); // No native support for these. - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::SELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction({ISD::UDIV, ISD::SDIV, ISD::UREM, ISD::SREM, + ISD::UDIVREM, ISD::SDIVREM, ISD::CTPOP, ISD::SELECT, + ISD::SELECT_CC}, + VT, Expand); // Vector reductions - setOperationAction(ISD::VECREDUCE_ADD, VT, Legal); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal); - setOperationAction(ISD::VECREDUCE_MUL, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - - if (!HasMVEFP) { - setOperationAction(ISD::SINT_TO_FP, VT, Expand); - setOperationAction(ISD::UINT_TO_FP, VT, Expand); - setOperationAction(ISD::FP_TO_SINT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT, VT, Expand); - } else { - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom); - } + setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMIN}, + VT, Legal); + setOperationAction({ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, + VT, Custom); + + if (!HasMVEFP) + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + VT, Expand); + else + setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT, + Custom); // Pre and Post inc are supported on loads and stores for (unsigned im = (unsigned)ISD::PRE_INC; @@ -327,18 +282,14 @@ setAllExpand(VT); // These are legal or custom whether we have MVE.fp or not - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Legal); - setOperationAction(ISD::SELECT, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction({ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT, + ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, ISD::SETCC, + ISD::MLOAD}, + VT, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::BUILD_VECTOR}, + VT.getVectorElementType(), Custom); + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::MSTORE}, VT, Legal); + setOperationAction({ISD::SELECT, ISD::SELECT_CC}, VT, Expand); // Pre and Post inc are supported on loads and stores for (unsigned im = (unsigned)ISD::PRE_INC; @@ -350,40 +301,24 @@ } if (HasMVEFP) { - setOperationAction(ISD::FMINNUM, VT, Legal); - setOperationAction(ISD::FMAXNUM, VT, Legal); - setOperationAction(ISD::FROUND, VT, Legal); - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM, ISD::FROUND}, VT, Legal); + setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, + ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX}, + VT, Custom); // No native support for these. - setOperationAction(ISD::FDIV, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FSQRT, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); + setOperationAction({ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, + ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, + ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FNEARBYINT}, + VT, Expand); } } // Custom Expand smaller than legal vector reductions to prevent false zero // items being added. - setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom); - setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom); - setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom); - setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom); + setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, + ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX}, + {MVT::v4f16, MVT::v2f16}, Custom); // We 'support' these types up to bitcast/load/store level, regardless of // MVE integer-only / float support. Only doing FP data processing on the FP @@ -392,18 +327,15 @@ for (auto VT : LongTypes) { addRegisterClass(VT, &ARM::MQPRRegClass); setAllExpand(VT); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE}, + VT, Custom); setOperationAction(ISD::VSELECT, VT, Legal); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); } setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); // We can do bitwise operations on v2i64 vectors - setOperationAction(ISD::AND, MVT::v2i64, Legal); - setOperationAction(ISD::OR, MVT::v2i64, Legal); - setOperationAction(ISD::XOR, MVT::v2i64, Legal); + setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, MVT::v2i64, Legal); // It is legal to extload from v4i8 to v4i16 or v4i32. addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal); @@ -411,11 +343,9 @@ addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal); // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::v4i8, MVT::v4i16, MVT::v4i32, MVT::v8i8, MVT::v8i16}, + Legal); // Some truncating stores are legal too. setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); @@ -437,38 +367,22 @@ const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1}; for (auto VT : pTypes) { addRegisterClass(VT, &ARM::VCCRRegClass); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::SELECT, VT, Expand); - } - setOperationAction(ISD::SETCC, MVT::v2i1, Expand); - setOperationAction(ISD::TRUNCATE, MVT::v2i1, Expand); - setOperationAction(ISD::AND, MVT::v2i1, Expand); - setOperationAction(ISD::OR, MVT::v2i1, Expand); - setOperationAction(ISD::XOR, MVT::v2i1, Expand); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i1, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i1, Expand); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Expand); - - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom); + setOperationAction({ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, + ISD::EXTRACT_SUBVECTOR, ISD::CONCAT_VECTORS, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::SETCC, ISD::LOAD, ISD::STORE, ISD::TRUNCATE}, + VT, Custom); + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::VSELECT, ISD::SELECT}, VT, + Expand); + } + setOperationAction({ISD::SETCC, ISD::TRUNCATE, ISD::AND, ISD::OR, ISD::XOR, + ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, + ISD::FP_TO_UINT}, + MVT::v2i1, Expand); + + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, + {MVT::v8i32, MVT::v16i16, MVT::v16i32}, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v8i32, MVT::v16i16}, Custom); } ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, @@ -560,12 +474,8 @@ } // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); - setLibcallName(RTLIB::MULO_I64, nullptr); - setLibcallName(RTLIB::MULO_I128, nullptr); + setLibcallName({RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, + RTLIB::MUL_I128, RTLIB::MULO_I64, RTLIB::MULO_I128}); // RTLIB if (Subtarget->isAAPCS_ABI() && @@ -764,10 +674,8 @@ addRegisterClass(MVT::f32, &ARM::SPRRegClass); addRegisterClass(MVT::f64, &ARM::DPRRegClass); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, + {MVT::i32, MVT::i64}, Custom); if (!Subtarget->hasVFP2Base()) setAllExpand(MVT::f32); @@ -777,11 +685,9 @@ if (Subtarget->hasFullFP16()) { addRegisterClass(MVT::f16, &ARM::HPRRegClass); - setOperationAction(ISD::BITCAST, MVT::i16, Custom); - setOperationAction(ISD::BITCAST, MVT::f16, Custom); + setOperationAction(ISD::BITCAST, {MVT::i16, MVT::f16}, Custom); - setOperationAction(ISD::FMINNUM, MVT::f16, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, MVT::f16, Legal); } if (Subtarget->hasBF16()) { @@ -797,17 +703,14 @@ addAllExtLoads(VT, InnerVT, Expand); } - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - - setOperationAction(ISD::BSWAP, VT, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::BSWAP}, VT, + Expand); } - setOperationAction(ISD::ConstantFP, MVT::f32, Custom); - setOperationAction(ISD::ConstantFP, MVT::f64, Custom); + setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Custom); - setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); - setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); + setOperationAction({ISD::READ_REGISTER, ISD::WRITE_REGISTER}, MVT::i64, + Custom); if (Subtarget->hasMVEIntegerOps()) addMVEVectorTypes(Subtarget->hasMVEFloatOps()); @@ -845,13 +748,10 @@ if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { // v2f64 is legal so that QR subregs can be extracted as f64 elements, but // none of Neon, MVE or VFP supports any arithmetic operations on it. - setOperationAction(ISD::FADD, MVT::v2f64, Expand); - setOperationAction(ISD::FSUB, MVT::v2f64, Expand); - setOperationAction(ISD::FMUL, MVT::v2f64, Expand); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL}, MVT::v2f64, Expand); // FIXME: Code duplication: FDIV and FREM are expanded always, see // ARMTargetLowering::addTypeForNEON method for details. - setOperationAction(ISD::FDIV, MVT::v2f64, Expand); - setOperationAction(ISD::FREM, MVT::v2f64, Expand); + setOperationAction({ISD::FDIV, ISD::FREM}, MVT::v2f64, Expand); // FIXME: Create unittest. // In another words, find a way when "copysign" appears in DAG with vector // operands. @@ -860,83 +760,45 @@ // ARMTargetLowering::addTypeForNEON method for details. setOperationAction(ISD::SETCC, MVT::v2f64, Expand); // FIXME: Create unittest for FNEG and for FABS. - setOperationAction(ISD::FNEG, MVT::v2f64, Expand); - setOperationAction(ISD::FABS, MVT::v2f64, Expand); - setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); - setOperationAction(ISD::FSIN, MVT::v2f64, Expand); - setOperationAction(ISD::FCOS, MVT::v2f64, Expand); - setOperationAction(ISD::FPOW, MVT::v2f64, Expand); - setOperationAction(ISD::FLOG, MVT::v2f64, Expand); - setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); - setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); - setOperationAction(ISD::FEXP, MVT::v2f64, Expand); - setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, ISD::FCOS, + ISD::FPOW, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, + ISD::FEXP, ISD::FEXP2}, + MVT::v2f64, Expand); // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. - setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); - setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); - setOperationAction(ISD::FRINT, MVT::v2f64, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); - setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); - setOperationAction(ISD::FMA, MVT::v2f64, Expand); + setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FNEARBYINT, + ISD::FFLOOR, ISD::FMA}, + MVT::v2f64, Expand); } if (Subtarget->hasNEON()) { // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively // supported for v4f32. - setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); - setOperationAction(ISD::FSIN, MVT::v4f32, Expand); - setOperationAction(ISD::FCOS, MVT::v4f32, Expand); - setOperationAction(ISD::FPOW, MVT::v4f32, Expand); - setOperationAction(ISD::FLOG, MVT::v4f32, Expand); - setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); - setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); - setOperationAction(ISD::FEXP, MVT::v4f32, Expand); - setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); - setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); - setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); - setOperationAction(ISD::FRINT, MVT::v4f32, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); - setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); + setOperationAction({ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FPOW, ISD::FLOG, + ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, + ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FNEARBYINT, + ISD::FFLOOR}, + MVT::v4f32, Expand); // Mark v2f32 intrinsics. - setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); - setOperationAction(ISD::FSIN, MVT::v2f32, Expand); - setOperationAction(ISD::FCOS, MVT::v2f32, Expand); - setOperationAction(ISD::FPOW, MVT::v2f32, Expand); - setOperationAction(ISD::FLOG, MVT::v2f32, Expand); - setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); - setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); - setOperationAction(ISD::FEXP, MVT::v2f32, Expand); - setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); - setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); - setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); - setOperationAction(ISD::FRINT, MVT::v2f32, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); - setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); + setOperationAction({ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FPOW, ISD::FLOG, + ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, + ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FNEARBYINT, + ISD::FFLOOR}, + MVT::v2f32, Expand); // Neon does not support some operations on v1i64 and v2i64 types. setOperationAction(ISD::MUL, MVT::v1i64, Expand); // Custom handling for some quad-vector types to detect VMULL. - setOperationAction(ISD::MUL, MVT::v8i16, Custom); - setOperationAction(ISD::MUL, MVT::v4i32, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); + setOperationAction(ISD::MUL, {MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // Custom handling for some vector types to avoid expensive expansions - setOperationAction(ISD::SDIV, MVT::v4i16, Custom); - setOperationAction(ISD::SDIV, MVT::v8i8, Custom); - setOperationAction(ISD::UDIV, MVT::v4i16, Custom); - setOperationAction(ISD::UDIV, MVT::v8i8, Custom); + setOperationAction({ISD::SDIV, ISD::UDIV}, {MVT::v4i16, MVT::v8i8}, Custom); // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with // a destination type that is wider than the source, and nor does // it have a FP_TO_[SU]INT instruction with a narrower destination than // source. - setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_UINT, ISD::FP_TO_SINT}, + {MVT::v4i16, MVT::v8i16}, Custom); setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); @@ -944,36 +806,18 @@ // NEON does not have single instruction CTPOP for vectors with element // types wider than 8-bits. However, custom lowering can leverage the // v8i8/v16i8 vcnt instruction. - setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); - setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); - setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); - setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); - setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); - setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); + setOperationAction(ISD::CTPOP, + {MVT::v2i32, MVT::v4i32, MVT::v4i16, MVT::v8i16, + MVT::v1i64, MVT::v2i64}, + Custom); - setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); - setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); + setOperationAction(ISD::CTLZ, {MVT::v1i64, MVT::v2i64}, Expand); // NEON does not have single instruction CTTZ for vectors. - setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); - setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); - setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); - setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); - - setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); - setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); - setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); - setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); - - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); - - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); + setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, + {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, + MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Custom); for (MVT VT : MVT::fixedlen_vector_valuetypes()) { setOperationAction(ISD::MULHS, VT, Expand); @@ -981,23 +825,18 @@ } // NEON only has FMA instructions as of VFP4. - if (!Subtarget->hasVFP4Base()) { - setOperationAction(ISD::FMA, MVT::v2f32, Expand); - setOperationAction(ISD::FMA, MVT::v4f32, Expand); - } + if (!Subtarget->hasVFP4Base()) + setOperationAction(ISD::FMA, {MVT::v2f32, MVT::v4f32}, Expand); setTargetDAGCombine({ISD::SHL, ISD::SRL, ISD::SRA, ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::FDIV, ISD::LOAD}); // It is legal to extload from v4i8 to v4i16 or v4i32. - for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, - MVT::v2i32}) { - for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); - setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); - setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); - } - } + for (MVT Ty : + {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, MVT::v2i32}) + for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, Ty, + Legal); } if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { @@ -1022,57 +861,33 @@ // operations, f64 is legal for the few double-precision instructions which // are present However, no double-precision operations other than moves, // loads and stores are provided by the hardware. - setOperationAction(ISD::FADD, MVT::f64, Expand); - setOperationAction(ISD::FSUB, MVT::f64, Expand); - setOperationAction(ISD::FMUL, MVT::f64, Expand); - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FDIV, MVT::f64, Expand); - setOperationAction(ISD::FREM, MVT::f64, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); - setOperationAction(ISD::FNEG, MVT::f64, Expand); - setOperationAction(ISD::FABS, MVT::f64, Expand); - setOperationAction(ISD::FSQRT, MVT::f64, Expand); - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FPOW, MVT::f64, Expand); - setOperationAction(ISD::FLOG, MVT::f64, Expand); - setOperationAction(ISD::FLOG2, MVT::f64, Expand); - setOperationAction(ISD::FLOG10, MVT::f64, Expand); - setOperationAction(ISD::FEXP, MVT::f64, Expand); - setOperationAction(ISD::FEXP2, MVT::f64, Expand); - setOperationAction(ISD::FCEIL, MVT::f64, Expand); - setOperationAction(ISD::FTRUNC, MVT::f64, Expand); - setOperationAction(ISD::FRINT, MVT::f64, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); - setOperationAction(ISD::FFLOOR, MVT::f64, Expand); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, + ISD::FDIV, ISD::FREM, ISD::FCOPYSIGN, ISD::FGETSIGN, + ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, + ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, + ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, + ISD::FTRUNC, ISD::FRINT, ISD::FNEARBYINT, ISD::FFLOOR}, + MVT::f64, Expand); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP}, MVT::i32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, {MVT::i32, MVT::f64}, + Custom); setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom); + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + {MVT::i32, MVT::f64}, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); } if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { - setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom); - if (Subtarget->hasFullFP16()) { - setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); - } + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f64, + Custom); + if (Subtarget->hasFullFP16()) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f16, + Custom); } - if (!Subtarget->hasFP16()) { - setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom); - } + if (!Subtarget->hasFP16()) + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f32, + Custom); computeRegisterProperties(Subtarget->getRegisterInfo()); @@ -1110,48 +925,31 @@ setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); } - setOperationAction(ISD::SADDO, MVT::i32, Custom); - setOperationAction(ISD::UADDO, MVT::i32, Custom); - setOperationAction(ISD::SSUBO, MVT::i32, Custom); - setOperationAction(ISD::USUBO, MVT::i32, Custom); + setOperationAction({ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO}, MVT::i32, + Custom); - setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); - setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); - if (Subtarget->hasDSP()) { - setOperationAction(ISD::SADDSAT, MVT::i8, Custom); - setOperationAction(ISD::SSUBSAT, MVT::i8, Custom); - setOperationAction(ISD::SADDSAT, MVT::i16, Custom); - setOperationAction(ISD::SSUBSAT, MVT::i16, Custom); - setOperationAction(ISD::UADDSAT, MVT::i8, Custom); - setOperationAction(ISD::USUBSAT, MVT::i8, Custom); - setOperationAction(ISD::UADDSAT, MVT::i16, Custom); - setOperationAction(ISD::USUBSAT, MVT::i16, Custom); - } - if (Subtarget->hasBaseDSP()) { - setOperationAction(ISD::SADDSAT, MVT::i32, Legal); - setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); - } + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i32, Custom); + if (Subtarget->hasDSP()) + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT, ISD::UADDSAT, ISD::USUBSAT}, + {MVT::i8, MVT::i16}, Custom); + if (Subtarget->hasBaseDSP()) + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Legal); // i64 operation support. setOperationAction(ISD::MUL, MVT::i64, Expand); setOperationAction(ISD::MULHU, MVT::i32, Expand); - if (Subtarget->isThumb1Only()) { - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - } + if (Subtarget->isThumb1Only()) + setOperationAction({ISD::UMUL_LOHI, ISD::SMUL_LOHI}, MVT::i32, Expand); if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() || (Subtarget->isThumb2() && !Subtarget->hasDSP())) setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL, MVT::i64, Custom); - setOperationAction(ISD::SRA, MVT::i64, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i32, + Custom); + setOperationAction( + {ISD::SRL, ISD::SRA, ISD::INTRINSIC_WO_CHAIN, ISD::LOAD, ISD::STORE}, + MVT::i64, Custom); setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); - setOperationAction(ISD::LOAD, MVT::i64, Custom); - setOperationAction(ISD::STORE, MVT::i64, Custom); // MVE lowers 64 bit shifts to lsll and lsrl // assuming that ISD::SRL and SRA of i64 are already marked custom @@ -1159,21 +957,17 @@ setOperationAction(ISD::SHL, MVT::i64, Custom); // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. - if (Subtarget->isThumb1Only()) { - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); - } + if (Subtarget->isThumb1Only()) + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i32, Expand); if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); // ARM does not have ROTL. setOperationAction(ISD::ROTL, MVT::i32, Expand); - for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - } + for (MVT VT : MVT::fixedlen_vector_valuetypes()) + setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand); setOperationAction(ISD::CTTZ, MVT::i32, Custom); setOperationAction(ISD::CTPOP, MVT::i32, Expand); if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { @@ -1194,29 +988,20 @@ bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() : Subtarget->hasDivideInARMMode(); - if (!hasDivide) { + if (!hasDivide) // These are expanded into libcalls if the cpu doesn't have HW divider. - setOperationAction(ISD::SDIV, MVT::i32, LibCall); - setOperationAction(ISD::UDIV, MVT::i32, LibCall); - } - - if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { - setOperationAction(ISD::SDIV, MVT::i32, Custom); - setOperationAction(ISD::UDIV, MVT::i32, Custom); + setOperationAction({ISD::SDIV, ISD::UDIV}, MVT::i32, LibCall); - setOperationAction(ISD::SDIV, MVT::i64, Custom); - setOperationAction(ISD::UDIV, MVT::i64, Custom); - } + if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) + setOperationAction({ISD::SDIV, ISD::UDIV}, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction({ISD::SREM, ISD::UREM}, MVT::i32, Expand); // Register based DivRem for AEABI (RTABI 4.2) if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || Subtarget->isTargetWindows()) { - setOperationAction(ISD::SREM, MVT::i64, Custom); - setOperationAction(ISD::UREM, MVT::i64, Custom); + setOperationAction({ISD::SREM, ISD::UREM}, MVT::i64, Custom); HasStandaloneRem = false; if (Subtarget->isTargetWindows()) { @@ -1263,36 +1048,28 @@ } } - setOperationAction(ISD::SDIVREM, MVT::i32, Custom); - setOperationAction(ISD::UDIVREM, MVT::i32, Custom); - setOperationAction(ISD::SDIVREM, MVT::i64, Custom); - setOperationAction(ISD::UDIVREM, MVT::i64, Custom); - } else { - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - } + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, {MVT::i32, MVT::i64}, + Custom); + } else + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, MVT::i32, Expand); if (Subtarget->getTargetTriple().isOSMSVCRT()) { // MSVCRT doesn't have powi; fall back to pow - setLibcallName(RTLIB::POWI_F32, nullptr); - setLibcallName(RTLIB::POWI_F64, nullptr); + setLibcallName(RTLIB::POWI_F32); + setLibcallName(RTLIB::POWI_F64); } - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); + setOperationAction({ISD::GlobalAddress, ISD::ConstantPool, + ISD::GlobalTLSAddress, ISD::BlockAddress}, + MVT::i32, Custom); - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); // Use the default implementation. setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction( + {ISD::VAARG, ISD::VACOPY, ISD::VAEND, ISD::STACKSAVE, ISD::STACKRESTORE}, + MVT::Other, Expand); if (Subtarget->isTargetWindows()) setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); @@ -1328,24 +1105,17 @@ Subtarget->hasAnyDataBarrier() ? Custom : Expand); // Set them all for expansion, which will force libcalls. - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); + setOperationAction( + {ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD, + ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN, + ISD::ATOMIC_LOAD_MAX, ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX}, + MVT::i32, Expand); // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the // Unordered/Monotonic case. - if (!InsertFencesForAtomic) { - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); - } + if (!InsertFencesForAtomic) + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, + Custom); } // Compute supported atomic widths. @@ -1374,10 +1144,8 @@ setOperationAction(ISD::PREFETCH, MVT::Other, Custom); // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. - if (!Subtarget->hasV6Ops()) { - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - } + if (!Subtarget->hasV6Ops()) + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i16, MVT::i8}, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && @@ -1390,26 +1158,19 @@ } // We want to custom lower some of our intrinsics. - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); - setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); - setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); + setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::EH_SJLJ_LONGJMP, + ISD::EH_SJLJ_SETUP_DISPATCH}, + MVT::Other, Custom); if (Subtarget->useSjLjEH()) setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); - setOperationAction(ISD::SETCC, MVT::i32, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Expand); - setOperationAction(ISD::SETCC, MVT::f64, Expand); - setOperationAction(ISD::SELECT, MVT::i32, Custom); - setOperationAction(ISD::SELECT, MVT::f32, Custom); - setOperationAction(ISD::SELECT, MVT::f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); + setOperationAction(ISD::SETCC, {MVT::i32, MVT::f32, MVT::f64}, Expand); + setOperationAction({ISD::SELECT, ISD::SELECT_CC}, + {MVT::i32, MVT::f32, MVT::f64}, Custom); if (Subtarget->hasFullFP16()) { - setOperationAction(ISD::SETCC, MVT::f16, Expand); - setOperationAction(ISD::SELECT, MVT::f16, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); + setOperationAction(ISD::SETCC, MVT::f16, Expand); + setOperationAction({ISD::SELECT, ISD::SELECT_CC}, MVT::f16, Custom); } setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); @@ -1418,105 +1179,64 @@ setOperationAction(ISD::BR_CC, MVT::i32, Custom); if (Subtarget->hasFullFP16()) setOperationAction(ISD::BR_CC, MVT::f16, Custom); - setOperationAction(ISD::BR_CC, MVT::f32, Custom); - setOperationAction(ISD::BR_CC, MVT::f64, Custom); + setOperationAction(ISD::BR_CC, {MVT::f32, MVT::f64}, Custom); setOperationAction(ISD::BR_JT, MVT::Other, Custom); // We don't support sin/cos/fmod/copysign/pow - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FSIN, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FREM, MVT::f64, Expand); - setOperationAction(ISD::FREM, MVT::f32, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FREM}, + {MVT::f64, MVT::f32}, Expand); if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && - !Subtarget->isThumb1Only()) { - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); - } - setOperationAction(ISD::FPOW, MVT::f64, Expand); - setOperationAction(ISD::FPOW, MVT::f32, Expand); + !Subtarget->isThumb1Only()) + setOperationAction(ISD::FCOPYSIGN, {MVT::f64, MVT::f32}, Custom); + setOperationAction(ISD::FPOW, {MVT::f64, MVT::f32}, Expand); - if (!Subtarget->hasVFP4Base()) { - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); - } + if (!Subtarget->hasVFP4Base()) + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Expand); // Various VFP goodness if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. - if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { - setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); - } + if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, MVT::f64, Expand); // fp16 is a special v7 extension that adds f16 <-> f32 conversions. - if (!Subtarget->hasFP16()) { - setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); - } + if (!Subtarget->hasFP16()) + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, MVT::f32, Expand); // Strict floating-point comparisons need custom lowering. - setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + {MVT::f16, MVT::f32, MVT::f64}, Custom); } // Use __sincos_stret if available. if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && - getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { - setOperationAction(ISD::FSINCOS, MVT::f64, Custom); - setOperationAction(ISD::FSINCOS, MVT::f32, Custom); - } + getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Custom); // FP-ARMv8 implements a lot of rounding-like FP operations. if (Subtarget->hasFPARMv8Base()) { - setOperationAction(ISD::FFLOOR, MVT::f32, Legal); - setOperationAction(ISD::FCEIL, MVT::f32, Legal); - setOperationAction(ISD::FROUND, MVT::f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); - setOperationAction(ISD::FRINT, MVT::f32, Legal); - setOperationAction(ISD::FMINNUM, MVT::f32, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); - if (Subtarget->hasNEON()) { - setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); - setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); - setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); - setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); - } + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FROUND, ISD::FTRUNC, + ISD::FNEARBYINT, ISD::FRINT, ISD::FMINNUM, + ISD::FMAXNUM}, + MVT::f32, Legal); + if (Subtarget->hasNEON()) + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, {MVT::v2f32, MVT::v4f32}, + Legal); - if (Subtarget->hasFP64()) { - setOperationAction(ISD::FFLOOR, MVT::f64, Legal); - setOperationAction(ISD::FCEIL, MVT::f64, Legal); - setOperationAction(ISD::FROUND, MVT::f64, Legal); - setOperationAction(ISD::FTRUNC, MVT::f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); - setOperationAction(ISD::FRINT, MVT::f64, Legal); - setOperationAction(ISD::FMINNUM, MVT::f64, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); - } + if (Subtarget->hasFP64()) + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FROUND, ISD::FTRUNC, + ISD::FNEARBYINT, ISD::FRINT, ISD::FMINNUM, + ISD::FMAXNUM}, + MVT::f64, Legal); } // FP16 often need to be promoted to call lib functions if (Subtarget->hasFullFP16()) { - setOperationAction(ISD::FREM, MVT::f16, Promote); + setOperationAction({ISD::FREM, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, + ISD::FPOWI, ISD::FPOW, ISD::FEXP, ISD::FEXP2, ISD::FLOG, + ISD::FLOG10, ISD::FLOG2}, + MVT::f16, Promote); setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); - setOperationAction(ISD::FSIN, MVT::f16, Promote); - setOperationAction(ISD::FCOS, MVT::f16, Promote); - setOperationAction(ISD::FSINCOS, MVT::f16, Promote); - setOperationAction(ISD::FPOWI, MVT::f16, Promote); - setOperationAction(ISD::FPOW, MVT::f16, Promote); - setOperationAction(ISD::FEXP, MVT::f16, Promote); - setOperationAction(ISD::FEXP2, MVT::f16, Promote); - setOperationAction(ISD::FLOG, MVT::f16, Promote); - setOperationAction(ISD::FLOG10, MVT::f16, Promote); - setOperationAction(ISD::FLOG2, MVT::f16, Promote); setOperationAction(ISD::FROUND, MVT::f16, Legal); } @@ -1526,28 +1246,14 @@ // a NEON instruction with an undef lane instead. This has a performance // penalty on some cores, so we don't do this unless we have been // asked to by the core tuning model. - if (Subtarget->useNEONForSinglePrecisionFP()) { - setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); - setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); - } - setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); - setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); + if (Subtarget->useNEONForSinglePrecisionFP()) + setOperationAction({ISD::FMINIMUM, ISD::FMAXIMUM}, + {MVT::f32, MVT::f16, MVT::v2f32, MVT::v4f32}, Legal); - if (Subtarget->hasFullFP16()) { - setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); - setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); - setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); - setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); - - setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); - setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); - } + if (Subtarget->hasFullFP16()) + setOperationAction( + {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUM, ISD::FMAXIMUM}, + {MVT::v4f16, MVT::v8f16}, Legal); } // We have target-specific dag combine patterns for the following nodes: diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -49,68 +49,48 @@ setStackPointerRegisterToSaveRestore(AVR::SP); setSupportsUnalignedAtomics(true); - setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); - setOperationAction(ISD::BlockAddress, MVT::i16, Custom); + setOperationAction({ISD::GlobalAddress, ISD::BlockAddress}, MVT::i16, Custom); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i8, MVT::i16}, Expand); for (MVT VT : MVT::integer_valuetypes()) { - for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { - setLoadExtAction(N, VT, MVT::i1, Promote); - setLoadExtAction(N, VT, MVT::i8, Expand); - } + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i8, + Expand); } setTruncStoreAction(MVT::i16, MVT::i8, Expand); - for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::ADDC, VT, Legal); - setOperationAction(ISD::SUBC, VT, Legal); - setOperationAction(ISD::ADDE, VT, Legal); - setOperationAction(ISD::SUBE, VT, Legal); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal); // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types // revert into a sub since we don't have an add with immediate instruction. - setOperationAction(ISD::ADD, MVT::i32, Custom); - setOperationAction(ISD::ADD, MVT::i64, Custom); + setOperationAction(ISD::ADD, {MVT::i32, MVT::i64}, Custom); // our shift instructions are only able to shift 1 bit at a time, so handle // this in a custom way. - setOperationAction(ISD::SRA, MVT::i8, Custom); - setOperationAction(ISD::SHL, MVT::i8, Custom); - setOperationAction(ISD::SRL, MVT::i8, Custom); - setOperationAction(ISD::SRA, MVT::i16, Custom); - setOperationAction(ISD::SHL, MVT::i16, Custom); - setOperationAction(ISD::SRL, MVT::i16, Custom); - setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand); + setOperationAction({ISD::SRA, ISD::SHL, ISD::SRL}, {MVT::i8, MVT::i16}, + Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i16, + Expand); setOperationAction(ISD::ROTL, MVT::i8, Custom); setOperationAction(ISD::ROTL, MVT::i16, Expand); setOperationAction(ISD::ROTR, MVT::i8, Custom); setOperationAction(ISD::ROTR, MVT::i16, Expand); - setOperationAction(ISD::BR_CC, MVT::i8, Custom); - setOperationAction(ISD::BR_CC, MVT::i16, Custom); - setOperationAction(ISD::BR_CC, MVT::i32, Custom); - setOperationAction(ISD::BR_CC, MVT::i64, Custom); + setOperationAction(ISD::BR_CC, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, + Custom); setOperationAction(ISD::BRCOND, MVT::Other, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i8, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i16, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); - setOperationAction(ISD::SETCC, MVT::i8, Custom); - setOperationAction(ISD::SETCC, MVT::i16, Custom); - setOperationAction(ISD::SETCC, MVT::i32, Custom); - setOperationAction(ISD::SETCC, MVT::i64, Custom); - setOperationAction(ISD::SELECT, MVT::i8, Expand); - setOperationAction(ISD::SELECT, MVT::i16, Expand); + setOperationAction(ISD::SELECT_CC, {MVT::i8, MVT::i16}, Custom); + setOperationAction(ISD::SELECT_CC, {MVT::i32, MVT::i64}, Expand); + setOperationAction(ISD::SETCC, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, + Custom); + setOperationAction(ISD::SELECT, {MVT::i8, MVT::i16}, Expand); setOperationAction(ISD::BSWAP, MVT::i16, Expand); @@ -127,88 +107,55 @@ setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction({ISD::VAEND, ISD::VAARG, ISD::VACOPY}, MVT::Other, Expand); // Atomic operations which must be lowered to rtlib calls - for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); - setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::ATOMIC_SWAP, ISD::ATOMIC_CMP_SWAP, + ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MAX, + ISD::ATOMIC_LOAD_MIN, ISD::ATOMIC_LOAD_UMAX, + ISD::ATOMIC_LOAD_UMIN}, + VT, Expand); // Division/remainder - setOperationAction(ISD::UDIV, MVT::i8, Expand); - setOperationAction(ISD::UDIV, MVT::i16, Expand); - setOperationAction(ISD::UREM, MVT::i8, Expand); - setOperationAction(ISD::UREM, MVT::i16, Expand); - setOperationAction(ISD::SDIV, MVT::i8, Expand); - setOperationAction(ISD::SDIV, MVT::i16, Expand); - setOperationAction(ISD::SREM, MVT::i8, Expand); - setOperationAction(ISD::SREM, MVT::i16, Expand); + setOperationAction({ISD::UDIV, ISD::UREM, ISD::SDIV, ISD::SREM}, + {MVT::i8, MVT::i16}, Expand); // Make division and modulus custom - setOperationAction(ISD::UDIVREM, MVT::i8, Custom); - setOperationAction(ISD::UDIVREM, MVT::i16, Custom); - setOperationAction(ISD::UDIVREM, MVT::i32, Custom); - setOperationAction(ISD::SDIVREM, MVT::i8, Custom); - setOperationAction(ISD::SDIVREM, MVT::i16, Custom); - setOperationAction(ISD::SDIVREM, MVT::i32, Custom); + setOperationAction({ISD::UDIVREM, ISD::SDIVREM}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co. - setOperationAction(ISD::MUL, MVT::i8, Expand); - setOperationAction(ISD::MUL, MVT::i16, Expand); + setOperationAction(ISD::MUL, {MVT::i8, MVT::i16}, Expand); // Expand 16 bit multiplications. - setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i16, Expand); // Expand multiplications to libcalls when there is // no hardware MUL. - if (!Subtarget.supportsMultiplication()) { - setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand); - } + if (!Subtarget.supportsMultiplication()) + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i8, Expand); - for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Expand); - for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::CTPOP, ISD::CTLZ, ISD::CTTZ}, VT, Expand); - for (MVT VT : MVT::integer_valuetypes()) { + for (MVT VT : MVT::integer_valuetypes()) setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - // TODO: The generated code is pretty poor. Investigate using the - // same "shift and subtract with carry" trick that we do for - // extending 8-bit to 16-bit. This may require infrastructure - // improvements in how we treat 16-bit "registers" to be feasible. - } + // TODO: The generated code is pretty poor. Investigate using the + // same "shift and subtract with carry" trick that we do for + // extending 8-bit to 16-bit. This may require infrastructure + // improvements in how we treat 16-bit "registers" to be feasible. // Division rtlib functions (not supported), use divmod functions instead - setLibcallName(RTLIB::SDIV_I8, nullptr); - setLibcallName(RTLIB::SDIV_I16, nullptr); - setLibcallName(RTLIB::SDIV_I32, nullptr); - setLibcallName(RTLIB::UDIV_I8, nullptr); - setLibcallName(RTLIB::UDIV_I16, nullptr); - setLibcallName(RTLIB::UDIV_I32, nullptr); + setLibcallName({RTLIB::SDIV_I8, RTLIB::SDIV_I16, RTLIB::SDIV_I32, + RTLIB::UDIV_I8, RTLIB::UDIV_I16, RTLIB::UDIV_I32}); // Modulus rtlib functions (not supported), use divmod functions instead - setLibcallName(RTLIB::SREM_I8, nullptr); - setLibcallName(RTLIB::SREM_I16, nullptr); - setLibcallName(RTLIB::SREM_I32, nullptr); - setLibcallName(RTLIB::UREM_I8, nullptr); - setLibcallName(RTLIB::UREM_I16, nullptr); - setLibcallName(RTLIB::UREM_I32, nullptr); + setLibcallName({RTLIB::SREM_I8, RTLIB::SREM_I16, RTLIB::SREM_I32, + RTLIB::UREM_I8, RTLIB::UREM_I16, RTLIB::UREM_I32}); // Division and modulus rtlib functions setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4"); diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -67,16 +67,11 @@ setStackPointerRegisterToSaveRestore(BPF::R11); - setOperationAction(ISD::BR_CC, MVT::i64, Custom); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BRIND, MVT::Other, Expand); - setOperationAction(ISD::BRCOND, MVT::Other, Expand); - - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); - - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::BR_CC, ISD::GlobalAddress, ISD::DYNAMIC_STACKALLOC}, + MVT::i64, Custom); + setOperationAction( + {ISD::BR_JT, ISD::BRIND, ISD::BRCOND, ISD::STACKSAVE, ISD::STACKRESTORE}, + MVT::Other, Expand); // Set unsupported atomic operations as Custom so // we can emit better error messages than fatal error @@ -85,38 +80,24 @@ if (VT == MVT::i32) { if (STI.getHasAlu32()) continue; - } else { + } else setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); - } - setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); - setOperationAction(ISD::ATOMIC_SWAP, VT, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); + setOperationAction({ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_SWAP, + ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS}, + VT, Custom); } - for (auto VT : { MVT::i32, MVT::i64 }) { + for (auto VT : {MVT::i32, MVT::i64}) { if (VT == MVT::i32 && !STI.getHasAlu32()) continue; - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::SHL_PARTS, VT, Expand); - setOperationAction(ISD::SRL_PARTS, VT, Expand); - setOperationAction(ISD::SRA_PARTS, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - - setOperationAction(ISD::SETCC, VT, Expand); - setOperationAction(ISD::SELECT, VT, Expand); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM, ISD::SREM, ISD::UREM, + ISD::MULHU, ISD::MULHS, ISD::UMUL_LOHI, ISD::SMUL_LOHI, + ISD::ROTR, ISD::ROTL, ISD::SHL_PARTS, ISD::SRL_PARTS, + ISD::SRA_PARTS, ISD::CTPOP, ISD::SETCC, ISD::SELECT}, + VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Custom); } @@ -126,21 +107,17 @@ STI.getHasJmp32() ? Custom : Promote); } - setOperationAction(ISD::CTTZ, MVT::i64, Custom); - setOperationAction(ISD::CTLZ, MVT::i64, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); + setOperationAction( + {ISD::CTTZ, ISD::CTLZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ_ZERO_UNDEF}, + MVT::i64, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::i1, MVT::i8, MVT::i16, MVT::i32}, Expand); // Extended load operations for i1 types must be promoted for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand); diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp --- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp @@ -51,58 +51,36 @@ addRegisterClass(MVT::f64, &CSKY::FPR64RegClass); } - setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); - setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::CTTZ, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Expand); - setOperationAction(ISD::ROTR, MVT::i32, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - - setLoadExtAction(ISD::EXTLOAD, MVT::i32, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, MVT::i1, Promote); - - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY, ISD::BITREVERSE}, MVT::i32, + Legal); + + setOperationAction({ISD::SREM, ISD::UREM, ISD::UDIVREM, ISD::SDIVREM, + ISD::CTTZ, ISD::CTPOP, ISD::ROTR, ISD::SHL_PARTS, + ISD::SRL_PARTS, ISD::SRA_PARTS, ISD::UMUL_LOHI, + ISD::SMUL_LOHI, ISD::SELECT_CC, ISD::BR_CC, + ISD::DYNAMIC_STACKALLOC, ISD::MULHS, ISD::MULHU}, + MVT::i32, Expand); + setOperationAction({ISD::BR_JT, ISD::STACKSAVE, ISD::STACKRESTORE, ISD::VAARG, + ISD::VACOPY, ISD::VAEND}, + MVT::Other, Expand); + + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i32, + MVT::i1, Promote); + + setOperationAction({ISD::GlobalAddress, ISD::ExternalSymbol, + ISD::GlobalTLSAddress, ISD::BlockAddress, ISD::JumpTable}, + MVT::i32, Custom); setOperationAction(ISD::VASTART, MVT::Other, Custom); if (!Subtarget.hasE2()) { setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i8, Expand); setLoadExtAction(ISD::SEXTLOAD, MVT::i32, MVT::i16, Expand); - setOperationAction(ISD::CTLZ, MVT::i32, Expand); - setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction({ISD::CTLZ, ISD::BSWAP}, MVT::i32, Expand); } - if (!Subtarget.has2E3()) { - setOperationAction(ISD::ABS, MVT::i32, Expand); - setOperationAction(ISD::BITREVERSE, MVT::i32, Expand); - setOperationAction(ISD::SDIV, MVT::i32, Expand); - setOperationAction(ISD::UDIV, MVT::i32, Expand); - } + if (!Subtarget.has2E3()) + setOperationAction({ISD::ABS, ISD::BITREVERSE, ISD::SDIV, ISD::UDIV}, + MVT::i32, Expand); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); @@ -121,9 +99,7 @@ MVT AllVTy[] = {MVT::f32, MVT::f64}; for (auto VT : AllVTy) { - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::BR_CC, VT, Expand); + setOperationAction({ISD::FREM, ISD::SELECT_CC, ISD::BR_CC}, VT, Expand); for (auto CC : FPCCToExtend) setCondCodeAction(CC, VT, Expand); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1491,45 +1491,35 @@ // which default to "expand" for at least one type. // Misc operations. - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); + setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction({ISD::ConstantPool, ISD::JumpTable, + ISD::GLOBAL_OFFSET_TABLE, ISD::GlobalTLSAddress}, + MVT::i32, Custom); setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::INLINEASM, MVT::Other, Custom); - setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); - setOperationAction(ISD::PREFETCH, MVT::Other, Custom); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); - setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + setOperationAction({ISD::INLINEASM, ISD::INLINEASM_BR, ISD::PREFETCH, + ISD::INTRINSIC_VOID, ISD::EH_RETURN, ISD::ATOMIC_FENCE}, + MVT::Other, Custom); // Custom legalize GlobalAddress nodes into CONST32. - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i8, Custom); + setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i8}, Custom); setOperationAction(ISD::BlockAddress, MVT::i32, Custom); // Hexagon needs to optimize cases with negative constants. - setOperationAction(ISD::SETCC, MVT::i8, Custom); - setOperationAction(ISD::SETCC, MVT::i16, Custom); - setOperationAction(ISD::SETCC, MVT::v4i8, Custom); - setOperationAction(ISD::SETCC, MVT::v2i16, Custom); + setOperationAction(ISD::SETCC, {MVT::i8, MVT::i16, MVT::v4i8, MVT::v2i16}, + Custom); // VASTART needs to be custom lowered to use the VarArgsFrameIndex. setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction({ISD::VAEND, ISD::VAARG}, MVT::Other, Expand); if (Subtarget.isEnvironmentMusl()) setOperationAction(ISD::VACOPY, MVT::Other, Custom); else setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); if (EmitJumpTables) @@ -1538,68 +1528,45 @@ setMinimumJumpTableEntries(std::numeric_limits::max()); setOperationAction(ISD::BR_JT, MVT::Other, Expand); - for (unsigned LegalIntOp : - {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) { - setOperationAction(LegalIntOp, MVT::i32, Legal); - setOperationAction(LegalIntOp, MVT::i64, Legal); - } + setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, + {MVT::i32, MVT::i64}, Legal); // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit, // but they only operate on i64. for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::UADDO, VT, Custom); - setOperationAction(ISD::USUBO, VT, Custom); - setOperationAction(ISD::SADDO, VT, Expand); - setOperationAction(ISD::SSUBO, VT, Expand); - setOperationAction(ISD::ADDCARRY, VT, Expand); - setOperationAction(ISD::SUBCARRY, VT, Expand); + setOperationAction({ISD::UADDO, ISD::USUBO}, VT, Custom); + setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::ADDCARRY, ISD::SUBCARRY}, + VT, Expand); } - setOperationAction(ISD::ADDCARRY, MVT::i64, Custom); - setOperationAction(ISD::SUBCARRY, MVT::i64, Custom); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, MVT::i64, Custom); - setOperationAction(ISD::CTLZ, MVT::i8, Promote); - setOperationAction(ISD::CTLZ, MVT::i16, Promote); - setOperationAction(ISD::CTTZ, MVT::i8, Promote); - setOperationAction(ISD::CTTZ, MVT::i16, Promote); + setOperationAction({ISD::CTLZ, ISD::CTTZ}, {MVT::i8, MVT::i16}, Promote); // Popcount can count # of 1s in i64 but returns i32. - setOperationAction(ISD::CTPOP, MVT::i8, Promote); - setOperationAction(ISD::CTPOP, MVT::i16, Promote); - setOperationAction(ISD::CTPOP, MVT::i32, Promote); + setOperationAction(ISD::CTPOP, {MVT::i8, MVT::i16, MVT::i32}, Promote); setOperationAction(ISD::CTPOP, MVT::i64, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); - setOperationAction(ISD::BSWAP, MVT::i32, Legal); - setOperationAction(ISD::BSWAP, MVT::i64, Legal); + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, {MVT::i32, MVT::i64}, + Legal); - setOperationAction(ISD::FSHL, MVT::i32, Legal); - setOperationAction(ISD::FSHL, MVT::i64, Legal); - setOperationAction(ISD::FSHR, MVT::i32, Legal); - setOperationAction(ISD::FSHR, MVT::i64, Legal); + setOperationAction({ISD::FSHL, ISD::FSHR}, {MVT::i32, MVT::i64}, Legal); - for (unsigned IntExpOp : - {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, - ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, - ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, - ISD::SMUL_LOHI, ISD::UMUL_LOHI}) { - for (MVT VT : MVT::integer_valuetypes()) - setOperationAction(IntExpOp, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, + ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR, + ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, + ISD::SMUL_LOHI, ISD::UMUL_LOHI}, + VT, Expand); - for (unsigned FPExpOp : - {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, - ISD::FPOW, ISD::FCOPYSIGN}) { - for (MVT VT : MVT::fp_valuetypes()) - setOperationAction(FPExpOp, VT, Expand); - } + for (MVT VT : MVT::fp_valuetypes()) + setOperationAction({ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, + ISD::FSINCOS, ISD::FPOW, ISD::FCOPYSIGN}, + VT, Expand); // No extending loads from i32. - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::ZEXTLOAD, ISD::SEXTLOAD, ISD::EXTLOAD}, VT, MVT::i32, + Expand); // Turn FP truncstore into trunc + store. setTruncStoreAction(MVT::f64, MVT::f32, Expand); // Turn FP extload into load/fpextend. @@ -1607,14 +1574,10 @@ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); // Expand BR_CC and SELECT_CC for all integer and fp types. - for (MVT VT : MVT::integer_valuetypes()) { - setOperationAction(ISD::BR_CC, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - } - for (MVT VT : MVT::fp_valuetypes()) { - setOperationAction(ISD::BR_CC, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - } + for (MVT VT : MVT::integer_valuetypes()) + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, VT, Expand); + for (MVT VT : MVT::fp_valuetypes()) + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, VT, Expand); setOperationAction(ISD::BR_CC, MVT::Other, Expand); // @@ -1623,42 +1586,84 @@ // Set the action for vector operations to "expand", then override it with // either "custom" or "legal" for specific cases. - static const unsigned VectExpOps[] = { - // Integer arithmetic: - ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV, - ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO, - ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI, - // Logical/bit: - ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR, - ISD::CTPOP, ISD::CTLZ, ISD::CTTZ, - // Floating point arithmetic/math functions: - ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV, - ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, - ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2, - ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC, - ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR, - ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS, - // Misc: - ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool, - // Vector: - ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR, - ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, - ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, - ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE, - ISD::SPLAT_VECTOR, - }; - for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - for (unsigned VectExpOp : VectExpOps) - setOperationAction(VectExpOp, VT, Expand); + setOperationAction( + { + // Integer arithmetic: + ISD::ADD, + ISD::SUB, + ISD::MUL, + ISD::SDIV, + ISD::UDIV, + ISD::SREM, + ISD::UREM, + ISD::SDIVREM, + ISD::UDIVREM, + ISD::SADDO, + ISD::UADDO, + ISD::SSUBO, + ISD::USUBO, + ISD::SMUL_LOHI, + ISD::UMUL_LOHI, + // Logical/bit: + ISD::AND, + ISD::OR, + ISD::XOR, + ISD::ROTL, + ISD::ROTR, + ISD::CTPOP, + ISD::CTLZ, + ISD::CTTZ, + // Floating point arithmetic/math functions: + ISD::FADD, + ISD::FSUB, + ISD::FMUL, + ISD::FMA, + ISD::FDIV, + ISD::FREM, + ISD::FNEG, + ISD::FABS, + ISD::FSQRT, + ISD::FSIN, + ISD::FCOS, + ISD::FPOW, + ISD::FLOG, + ISD::FLOG2, + ISD::FLOG10, + ISD::FEXP, + ISD::FEXP2, + ISD::FCEIL, + ISD::FTRUNC, + ISD::FRINT, + ISD::FNEARBYINT, + ISD::FROUND, + ISD::FFLOOR, + ISD::FMINNUM, + ISD::FMAXNUM, + ISD::FSINCOS, + // Misc: + ISD::BR_CC, + ISD::SELECT_CC, + ISD::ConstantPool, + // Vector: + ISD::BUILD_VECTOR, + ISD::SCALAR_TO_VECTOR, + ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_SUBVECTOR, + ISD::INSERT_SUBVECTOR, + ISD::CONCAT_VECTORS, + ISD::VECTOR_SHUFFLE, + ISD::SPLAT_VECTOR, + }, + VT, Expand); // Expand all extending loads and truncating stores: for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) { if (TargetVT == VT) continue; - setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand); - setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, TargetVT, + VT, Expand); setTruncStoreAction(VT, TargetVT, Expand); } @@ -1668,116 +1673,78 @@ setOperationAction(ISD::SELECT, VT, Promote); AddPromotedToType(ISD::SELECT, VT, VT32); } - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); + setOperationAction({ISD::SRA, ISD::SHL, ISD::SRL}, VT, Custom); } // Extending loads from (native) vectors of i8 into (native) vectors of i16 // are legal. - setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); - setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal); - setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i16, + MVT::v2i8, Legal); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v4i16, + MVT::v4i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::v2i8, MVT::v2i16, MVT::v2i32}, Legal); // Types natively supported: for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { - setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom); - - setOperationAction(ISD::ADD, NativeVT, Legal); - setOperationAction(ISD::SUB, NativeVT, Legal); - setOperationAction(ISD::MUL, NativeVT, Legal); - setOperationAction(ISD::AND, NativeVT, Legal); - setOperationAction(ISD::OR, NativeVT, Legal); - setOperationAction(ISD::XOR, NativeVT, Legal); + setOperationAction({ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_SUBVECTOR, + ISD::INSERT_SUBVECTOR, ISD::CONCAT_VECTORS}, + NativeVT, Custom); + + setOperationAction( + {ISD::ADD, ISD::SUB, ISD::MUL, ISD::AND, ISD::OR, ISD::XOR}, NativeVT, + Legal); if (NativeVT.getVectorElementType() != MVT::i1) setOperationAction(ISD::SPLAT_VECTOR, NativeVT, Legal); } - for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) { - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - } + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, + {MVT::v8i8, MVT::v4i16, MVT::v2i32}, Legal); // Custom lower unaligned loads. // Also, for both loads and stores, verify the alignment of the address // in case it is a compile-time constant. This is a usability feature to // provide a meaningful error message to users. - for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, - MVT::v2i16, MVT::v4i16, MVT::v2i32}) { - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - } + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8, + MVT::v2i16, MVT::v4i16, MVT::v2i32}, + Custom); // Custom-lower load/stores of boolean vectors. - for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) { - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - } + setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::v2i1, MVT::v4i1, MVT::v8i1}, + Custom); - for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, - MVT::v2i32}) { - setCondCodeAction(ISD::SETNE, VT, Expand); - setCondCodeAction(ISD::SETLE, VT, Expand); - setCondCodeAction(ISD::SETGE, VT, Expand); - setCondCodeAction(ISD::SETLT, VT, Expand); - setCondCodeAction(ISD::SETULE, VT, Expand); - setCondCodeAction(ISD::SETUGE, VT, Expand); - setCondCodeAction(ISD::SETULT, VT, Expand); - } + setCondCodeAction( + {ISD::SETNE, ISD::SETLE, ISD::SETGE, ISD::SETLT, ISD::SETULE, ISD::SETUGE, + ISD::SETULT}, + {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16, MVT::v2i32}, + Expand); // Custom-lower bitcasts from i8 to v8i1. - setOperationAction(ISD::BITCAST, MVT::i8, Custom); - setOperationAction(ISD::SETCC, MVT::v2i16, Custom); - setOperationAction(ISD::VSELECT, MVT::v4i8, Custom); - setOperationAction(ISD::VSELECT, MVT::v2i16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); + setOperationAction(ISD::BITCAST, MVT::i8, Custom); + setOperationAction(ISD::SETCC, MVT::v2i16, Custom); + setOperationAction(ISD::VSELECT, {MVT::v4i8, MVT::v2i16}, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v4i8, MVT::v4i16, MVT::v8i8}, + Custom); // V5+. - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FADD, MVT::f64, Expand); - setOperationAction(ISD::FSUB, MVT::f64, Expand); - setOperationAction(ISD::FMUL, MVT::f64, Expand); - - setOperationAction(ISD::FMINNUM, MVT::f32, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); - - setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); - setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); - setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); + setOperationAction({ISD::FMA, ISD::FADD, ISD::FSUB, ISD::FMUL}, MVT::f64, + Expand); + + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, MVT::f32, Legal); + + setOperationAction( + {ISD::FP_TO_UINT, ISD::FP_TO_SINT, ISD::UINT_TO_FP, ISD::SINT_TO_FP}, + {MVT::i1, MVT::i8, MVT::i16}, Promote); // Special handling for half-precision floating point conversions. // Lower half float conversions into library calls. - setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, {MVT::f32, MVT::f64}, + Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); @@ -1792,72 +1759,64 @@ setIndexedStoreAction(ISD::POST_INC, VT, Legal); } - // Subtarget-specific operation actions. - // - if (Subtarget.hasV60Ops()) { - setOperationAction(ISD::ROTL, MVT::i32, Legal); - setOperationAction(ISD::ROTL, MVT::i64, Legal); - setOperationAction(ISD::ROTR, MVT::i32, Legal); - setOperationAction(ISD::ROTR, MVT::i64, Legal); - } - if (Subtarget.hasV66Ops()) { - setOperationAction(ISD::FADD, MVT::f64, Legal); - setOperationAction(ISD::FSUB, MVT::f64, Legal); - } - if (Subtarget.hasV67Ops()) { - setOperationAction(ISD::FMINNUM, MVT::f64, Legal); - setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); - setOperationAction(ISD::FMUL, MVT::f64, Legal); - } + // Subtarget-specific operation actions. + // + if (Subtarget.hasV60Ops()) + setOperationAction({ISD::ROTL, ISD::ROTR}, {MVT::i32, MVT::i64}, Legal); + if (Subtarget.hasV66Ops()) + setOperationAction({ISD::FADD, ISD::FSUB}, MVT::f64, Legal); + if (Subtarget.hasV67Ops()) + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM, ISD::FMUL}, MVT::f64, + Legal); - setTargetDAGCombine(ISD::VSELECT); + setTargetDAGCombine(ISD::VSELECT); - if (Subtarget.useHVXOps()) - initializeHVXLowering(); + if (Subtarget.useHVXOps()) + initializeHVXLowering(); - computeRegisterProperties(&HRI); + computeRegisterProperties(&HRI); - // - // Library calls for unsupported operations - // - bool FastMath = EnableFastMath; - - setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); - setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3"); - setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3"); - setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3"); - setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); - setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3"); - setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3"); - setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3"); - - setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); - setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); - setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); - setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); - setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); - setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); - - // This is the only fast library function for sqrtd. - if (FastMath) - setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2"); - - // Prefix is: nothing for "slow-math", - // "fast2_" for V5+ fast-math double-precision - // (actually, keep fast-math and fast-math2 separate for now) - if (FastMath) { - setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3"); - setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3"); - setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3"); - setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3"); - setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3"); - } else { - setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); - setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); - setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); - setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); - setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3"); - } + // + // Library calls for unsupported operations + // + bool FastMath = EnableFastMath; + + setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); + setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3"); + setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3"); + setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3"); + setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); + setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3"); + setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3"); + setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3"); + + setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); + setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); + setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); + setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); + setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); + setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); + + // This is the only fast library function for sqrtd. + if (FastMath) + setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2"); + + // Prefix is: nothing for "slow-math", + // "fast2_" for V5+ fast-math double-precision + // (actually, keep fast-math and fast-math2 separate for now) + if (FastMath) { + setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3"); + setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3"); + setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3"); + setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3"); + setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3"); + } else { + setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); + setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); + setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); + setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); + setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3"); + } if (FastMath) setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf"); @@ -1870,9 +1829,7 @@ setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); // These cause problems when the shift amount is non-constant. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); + setLibcallName({RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128}); } const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -78,14 +78,11 @@ // Handle bitcasts of vector predicates to scalars (e.g. v32i1 to i32). // Note: v16i1 -> i16 is handled in type legalization instead of op // legalization. - setOperationAction(ISD::BITCAST, MVT::i16, Custom); - setOperationAction(ISD::BITCAST, MVT::i32, Custom); - setOperationAction(ISD::BITCAST, MVT::i64, Custom); - setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); - setOperationAction(ISD::BITCAST, MVT::v128i1, Custom); - setOperationAction(ISD::BITCAST, MVT::i128, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, ByteV, Legal); - setOperationAction(ISD::VECTOR_SHUFFLE, ByteW, Legal); + setOperationAction( + ISD::BITCAST, + {MVT::i16, MVT::i32, MVT::i64, MVT::v16i1, MVT::v128i1, MVT::i128}, + Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, {ByteV, ByteW}, Legal); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); if (Subtarget.useHVX128BOps() && Subtarget.useHVXV68Ops() && @@ -95,20 +92,16 @@ static const MVT FloatW[] = { MVT::v128f16, MVT::v64f32 }; for (MVT T : FloatV) { - setOperationAction(ISD::FADD, T, Legal); - setOperationAction(ISD::FSUB, T, Legal); - setOperationAction(ISD::FMUL, T, Legal); - setOperationAction(ISD::FMINNUM, T, Legal); - setOperationAction(ISD::FMAXNUM, T, Legal); + setOperationAction( + {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMINNUM, ISD::FMAXNUM}, T, + Legal); - setOperationAction(ISD::INSERT_SUBVECTOR, T, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, T, Custom); + setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, T, + Custom); - setOperationAction(ISD::SPLAT_VECTOR, T, Legal); - setOperationAction(ISD::SPLAT_VECTOR, T, Legal); + setOperationAction({ISD::SPLAT_VECTOR, ISD::SPLAT_VECTOR}, T, Legal); - setOperationAction(ISD::MLOAD, T, Custom); - setOperationAction(ISD::MSTORE, T, Custom); + setOperationAction({ISD::MLOAD, ISD::MSTORE}, T, Custom); // Custom-lower BUILD_VECTOR. The standard (target-independent) // handling of it would convert it to a load, which is not always // the optimal choice. @@ -118,9 +111,9 @@ // BUILD_VECTOR with f16 operands cannot be promoted without // promoting the result, so lower the node to vsplat or constant pool - setOperationAction(ISD::BUILD_VECTOR, MVT::f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::f16, Custom); - setOperationAction(ISD::SPLAT_VECTOR, MVT::f16, Custom); + setOperationAction( + {ISD::BUILD_VECTOR, ISD::INSERT_VECTOR_ELT, ISD::SPLAT_VECTOR}, + MVT::f16, Custom); // Vector shuffle is always promoted to ByteV and a bitcast to f16 is // generated. @@ -130,14 +123,9 @@ setPromoteTo(ISD::VECTOR_SHUFFLE, MVT::v32f32, ByteV); for (MVT P : FloatW) { - setOperationAction(ISD::LOAD, P, Custom); - setOperationAction(ISD::STORE, P, Custom); - setOperationAction(ISD::FADD, P, Custom); - setOperationAction(ISD::FSUB, P, Custom); - setOperationAction(ISD::FMUL, P, Custom); - setOperationAction(ISD::FMINNUM, P, Custom); - setOperationAction(ISD::FMAXNUM, P, Custom); - setOperationAction(ISD::VSELECT, P, Custom); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::FADD, ISD::FSUB, + ISD::FMUL, ISD::FMINNUM, ISD::FMAXNUM, ISD::VSELECT}, + P, Custom); // Custom-lower BUILD_VECTOR. The standard (target-independent) // handling of it would convert it to a load, which is not always @@ -146,8 +134,7 @@ // Make concat-vectors custom to handle concats of more than 2 vectors. setOperationAction(ISD::CONCAT_VECTORS, P, Custom); - setOperationAction(ISD::MLOAD, P, Custom); - setOperationAction(ISD::MSTORE, P, Custom); + setOperationAction({ISD::MLOAD, ISD::MSTORE}, P, Custom); } if (Subtarget.useHVXQFloatOps()) { @@ -163,75 +150,49 @@ setIndexedLoadAction(ISD::POST_INC, T, Legal); setIndexedStoreAction(ISD::POST_INC, T, Legal); - setOperationAction(ISD::AND, T, Legal); - setOperationAction(ISD::OR, T, Legal); - setOperationAction(ISD::XOR, T, Legal); - setOperationAction(ISD::ADD, T, Legal); - setOperationAction(ISD::SUB, T, Legal); - setOperationAction(ISD::MUL, T, Legal); - setOperationAction(ISD::CTPOP, T, Legal); - setOperationAction(ISD::CTLZ, T, Legal); - setOperationAction(ISD::SELECT, T, Legal); - setOperationAction(ISD::SPLAT_VECTOR, T, Legal); - if (T != ByteV) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); - setOperationAction(ISD::BSWAP, T, Legal); - } - - setOperationAction(ISD::SMIN, T, Legal); - setOperationAction(ISD::SMAX, T, Legal); - if (T.getScalarType() != MVT::i32) { - setOperationAction(ISD::UMIN, T, Legal); - setOperationAction(ISD::UMAX, T, Legal); - } - - setOperationAction(ISD::CTTZ, T, Custom); - setOperationAction(ISD::LOAD, T, Custom); - setOperationAction(ISD::MLOAD, T, Custom); - setOperationAction(ISD::MSTORE, T, Custom); - setOperationAction(ISD::MULHS, T, Custom); - setOperationAction(ISD::MULHU, T, Custom); - setOperationAction(ISD::BUILD_VECTOR, T, Custom); + setOperationAction({ISD::AND, ISD::OR, ISD::XOR, ISD::ADD, ISD::SUB, + ISD::MUL, ISD::CTPOP, ISD::CTLZ, ISD::SELECT, + ISD::SPLAT_VECTOR}, + T, Legal); + if (T != ByteV) + setOperationAction({ISD::SIGN_EXTEND_VECTOR_INREG, + ISD::ZERO_EXTEND_VECTOR_INREG, ISD::BSWAP}, + T, Legal); + + setOperationAction({ISD::SMIN, ISD::SMAX}, T, Legal); + if (T.getScalarType() != MVT::i32) + setOperationAction({ISD::UMIN, ISD::UMAX}, T, Legal); + + setOperationAction({ISD::CTTZ, ISD::LOAD, ISD::MLOAD, ISD::MSTORE, + ISD::MULHS, ISD::MULHU, ISD::BUILD_VECTOR}, + T, Custom); // Make concat-vectors custom to handle concats of more than 2 vectors. - setOperationAction(ISD::CONCAT_VECTORS, T, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, T, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, T, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, T, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, T, Custom); - setOperationAction(ISD::ANY_EXTEND, T, Custom); - setOperationAction(ISD::SIGN_EXTEND, T, Custom); - setOperationAction(ISD::ZERO_EXTEND, T, Custom); + setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_SUBVECTOR, + ISD::EXTRACT_VECTOR_ELT, ISD::ANY_EXTEND, + ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, + T, Custom); if (T != ByteV) { setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); // HVX only has shifts of words and halfwords. - setOperationAction(ISD::SRA, T, Custom); - setOperationAction(ISD::SHL, T, Custom); - setOperationAction(ISD::SRL, T, Custom); + setOperationAction({ISD::SRA, ISD::SHL, ISD::SRL}, T, Custom); // Promote all shuffles to operate on vectors of bytes. setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteV); } - if (Subtarget.useHVXQFloatOps()) { - setOperationAction(ISD::SINT_TO_FP, T, Expand); - setOperationAction(ISD::UINT_TO_FP, T, Expand); - setOperationAction(ISD::FP_TO_SINT, T, Expand); - setOperationAction(ISD::FP_TO_UINT, T, Expand); - } else if (Subtarget.useHVXIEEEFPOps()) { - setOperationAction(ISD::SINT_TO_FP, T, Custom); - setOperationAction(ISD::UINT_TO_FP, T, Custom); - setOperationAction(ISD::FP_TO_SINT, T, Custom); - setOperationAction(ISD::FP_TO_UINT, T, Custom); - } - - setCondCodeAction(ISD::SETNE, T, Expand); - setCondCodeAction(ISD::SETLE, T, Expand); - setCondCodeAction(ISD::SETGE, T, Expand); - setCondCodeAction(ISD::SETLT, T, Expand); - setCondCodeAction(ISD::SETULE, T, Expand); - setCondCodeAction(ISD::SETUGE, T, Expand); - setCondCodeAction(ISD::SETULT, T, Expand); + if (Subtarget.useHVXQFloatOps()) + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + T, Expand); + else if (Subtarget.useHVXIEEEFPOps()) + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + T, Custom); + + setCondCodeAction({ISD::SETNE, ISD::SETLE, ISD::SETGE, ISD::SETLT, + ISD::SETULE, ISD::SETUGE, ISD::SETULT}, + T, Expand); } for (MVT T : LegalW) { @@ -244,80 +205,42 @@ // Custom-lower these operations for pairs. Expand them into a concat // of the corresponding operations on individual vectors. - setOperationAction(ISD::ANY_EXTEND, T, Custom); - setOperationAction(ISD::SIGN_EXTEND, T, Custom); - setOperationAction(ISD::ZERO_EXTEND, T, Custom); - setOperationAction(ISD::SIGN_EXTEND_INREG, T, Custom); - setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, T, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Legal); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Legal); - setOperationAction(ISD::SPLAT_VECTOR, T, Custom); - - setOperationAction(ISD::LOAD, T, Custom); - setOperationAction(ISD::STORE, T, Custom); - setOperationAction(ISD::MLOAD, T, Custom); - setOperationAction(ISD::MSTORE, T, Custom); - setOperationAction(ISD::CTLZ, T, Custom); - setOperationAction(ISD::CTTZ, T, Custom); - setOperationAction(ISD::CTPOP, T, Custom); - - setOperationAction(ISD::ADD, T, Legal); - setOperationAction(ISD::SUB, T, Legal); - setOperationAction(ISD::MUL, T, Custom); - setOperationAction(ISD::MULHS, T, Custom); - setOperationAction(ISD::MULHU, T, Custom); - setOperationAction(ISD::AND, T, Custom); - setOperationAction(ISD::OR, T, Custom); - setOperationAction(ISD::XOR, T, Custom); - setOperationAction(ISD::SETCC, T, Custom); - setOperationAction(ISD::VSELECT, T, Custom); + setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, + ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG, + ISD::SPLAT_VECTOR}, + T, Custom); + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, T, + Legal); + + setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE, + ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}, + T, Custom); + + setOperationAction({ISD::ADD, ISD::SUB}, T, Legal); + setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::AND, ISD::OR, + ISD::XOR, ISD::SETCC, ISD::VSELECT}, + T, Custom); if (T != ByteW) { - setOperationAction(ISD::SRA, T, Custom); - setOperationAction(ISD::SHL, T, Custom); - setOperationAction(ISD::SRL, T, Custom); + setOperationAction({ISD::SRA, ISD::SHL, ISD::SRL}, T, Custom); // Promote all shuffles to operate on vectors of bytes. setPromoteTo(ISD::VECTOR_SHUFFLE, T, ByteW); } - setOperationAction(ISD::SMIN, T, Custom); - setOperationAction(ISD::SMAX, T, Custom); - if (T.getScalarType() != MVT::i32) { - setOperationAction(ISD::UMIN, T, Custom); - setOperationAction(ISD::UMAX, T, Custom); - } + setOperationAction({ISD::SMIN, ISD::SMAX}, T, Custom); + if (T.getScalarType() != MVT::i32) + setOperationAction({ISD::UMIN, ISD::UMAX}, T, Custom); - setOperationAction(ISD::SINT_TO_FP, T, Custom); - setOperationAction(ISD::UINT_TO_FP, T, Custom); - setOperationAction(ISD::FP_TO_SINT, T, Custom); - setOperationAction(ISD::FP_TO_UINT, T, Custom); + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, T, + Custom); } - setCondCodeAction(ISD::SETNE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETLE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETGE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETLT, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETONE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETOLE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETOGE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETOLT, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETUNE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETULE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETUGE, MVT::v64f16, Expand); - setCondCodeAction(ISD::SETULT, MVT::v64f16, Expand); - - setCondCodeAction(ISD::SETNE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETLE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETGE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETLT, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETONE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETOLE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETOGE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETOLT, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETUNE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETULE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETUGE, MVT::v32f32, Expand); - setCondCodeAction(ISD::SETULT, MVT::v32f32, Expand); + setCondCodeAction({ISD::SETNE, ISD::SETLE, ISD::SETGE, ISD::SETLT, + ISD::SETONE, ISD::SETOLE, ISD::SETOGE, ISD::SETOLT, + ISD::SETUNE, ISD::SETULE, ISD::SETUGE, ISD::SETULT}, + {MVT::v64f16, MVT::v32f32}, Expand); // Boolean vectors. @@ -329,36 +252,30 @@ // Set these actions first, and allow the single actions to overwrite // any duplicates. MVT BoolW = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); - setOperationAction(ISD::SETCC, BoolW, Custom); - setOperationAction(ISD::AND, BoolW, Custom); - setOperationAction(ISD::OR, BoolW, Custom); - setOperationAction(ISD::XOR, BoolW, Custom); + setOperationAction({ISD::SETCC, ISD::AND, ISD::OR, ISD::XOR}, BoolW, + Custom); // Masked load/store takes a mask that may need splitting. - setOperationAction(ISD::MLOAD, BoolW, Custom); - setOperationAction(ISD::MSTORE, BoolW, Custom); + setOperationAction({ISD::MLOAD, ISD::MSTORE}, BoolW, Custom); } for (MVT T : LegalV) { MVT BoolV = MVT::getVectorVT(MVT::i1, T.getVectorNumElements()); - setOperationAction(ISD::BUILD_VECTOR, BoolV, Custom); - setOperationAction(ISD::CONCAT_VECTORS, BoolV, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, BoolV, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, BoolV, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, BoolV, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, BoolV, Custom); - setOperationAction(ISD::SELECT, BoolV, Custom); - setOperationAction(ISD::AND, BoolV, Legal); - setOperationAction(ISD::OR, BoolV, Legal); - setOperationAction(ISD::XOR, BoolV, Legal); + setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, + ISD::INSERT_SUBVECTOR, ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_SUBVECTOR, ISD::EXTRACT_VECTOR_ELT, + ISD::SELECT}, + BoolV, Custom); + setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, BoolV, Legal); } - if (Use64b) { - for (MVT T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32}) - setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); - } else { - for (MVT T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32}) - setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); - } + if (Use64b) + setOperationAction( + ISD::SIGN_EXTEND_INREG, + {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32}, Legal); + else + setOperationAction( + ISD::SIGN_EXTEND_INREG, + {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32}, Legal); // Handle store widening for short vectors. unsigned HwLen = Subtarget.getVectorLength(); @@ -371,13 +288,10 @@ MVT VecTy = MVT::getVectorVT(ElemTy, N); auto Action = getPreferredVectorAction(VecTy); if (Action == TargetLoweringBase::TypeWidenVector) { - setOperationAction(ISD::LOAD, VecTy, Custom); - setOperationAction(ISD::STORE, VecTy, Custom); - setOperationAction(ISD::SETCC, VecTy, Custom); - setOperationAction(ISD::TRUNCATE, VecTy, Custom); - setOperationAction(ISD::ANY_EXTEND, VecTy, Custom); - setOperationAction(ISD::SIGN_EXTEND, VecTy, Custom); - setOperationAction(ISD::ZERO_EXTEND, VecTy, Custom); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::SETCC, ISD::TRUNCATE, + ISD::ANY_EXTEND, ISD::SIGN_EXTEND, + ISD::ZERO_EXTEND}, + VecTy, Custom); MVT BoolTy = MVT::getVectorVT(MVT::i1, N); if (!isTypeLegal(BoolTy)) diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -82,61 +82,33 @@ setStackPointerRegisterToSaveRestore(Lanai::SP); - setOperationAction(ISD::BR_CC, MVT::i32, Custom); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BRCOND, MVT::Other, Expand); - setOperationAction(ISD::SETCC, MVT::i32, Custom); - setOperationAction(ISD::SELECT, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::BR_CC, ISD::GlobalAddress, ISD::BlockAddress, + ISD::JumpTable, ISD::ConstantPool, ISD::SETCC, + ISD::SELECT_CC, ISD::DYNAMIC_STACKALLOC, ISD::MUL, + ISD::SHL_PARTS, ISD::SRL_PARTS}, + MVT::i32, Custom); + + setOperationAction({ISD::BR_JT, ISD::BRCOND, ISD::STACKSAVE, + ISD::STACKRESTORE, ISD::VAARG, ISD::VACOPY, ISD::VAEND}, + MVT::Other, Expand); setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - - setOperationAction(ISD::SDIV, MVT::i32, Expand); - setOperationAction(ISD::UDIV, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); - - setOperationAction(ISD::MUL, MVT::i32, Custom); - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - - setOperationAction(ISD::ROTR, MVT::i32, Expand); - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - - setOperationAction(ISD::BSWAP, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Legal); - setOperationAction(ISD::CTLZ, MVT::i32, Legal); - setOperationAction(ISD::CTTZ, MVT::i32, Legal); - - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + + setOperationAction({ISD::SELECT, ISD::SDIV, ISD::UDIV, ISD::SDIVREM, + ISD::UDIVREM, ISD::SREM, ISD::UREM, ISD::MULHU, + ISD::MULHS, ISD::UMUL_LOHI, ISD::SMUL_LOHI, ISD::ROTR, + ISD::ROTL, ISD::SRA_PARTS, ISD::BSWAP}, + MVT::i32, Expand); + + setOperationAction({ISD::CTPOP, ISD::CTLZ, ISD::CTTZ}, MVT::i32, Legal); + + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i1, MVT::i8, MVT::i16}, + Expand); // Extended load operations for i1 types must be promoted - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - } + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); setTargetDAGCombine({ISD::ADD, ISD::SUB, ISD::AND, ISD::OR, ISD::XOR}); diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -59,11 +59,9 @@ addRegisterClass(MVT::i16, &M68k::XR16RegClass); addRegisterClass(MVT::i32, &M68k::XR32RegClass); - for (auto VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - } + for (auto VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MVT::i1, + Promote); // We don't accept any truncstore of integer registers. setTruncStoreAction(MVT::i64, MVT::i32, Expand); @@ -89,36 +87,23 @@ setOperationAction(OP, MVT::i32, LibCall); } - for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) { - setOperationAction(OP, MVT::i8, Expand); - setOperationAction(OP, MVT::i16, Expand); - } + setOperationAction({ISD::UMUL_LOHI, ISD::SMUL_LOHI}, {MVT::i8, MVT::i16}, + Expand); // FIXME It would be better to use a custom lowering - for (auto OP : {ISD::SMULO, ISD::UMULO}) { - setOperationAction(OP, MVT::i8, Expand); - setOperationAction(OP, MVT::i16, Expand); - setOperationAction(OP, MVT::i32, Expand); - } + setOperationAction({ISD::SMULO, ISD::UMULO}, {MVT::i8, MVT::i16, MVT::i32}, + Expand); - for (auto OP : {ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}) - setOperationAction(OP, MVT::i32, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i32, + Custom); // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences. - for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { - setOperationAction(ISD::ADDC, VT, Custom); - setOperationAction(ISD::ADDE, VT, Custom); - setOperationAction(ISD::SUBC, VT, Custom); - setOperationAction(ISD::SUBE, VT, Custom); - } + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); // SADDO and friends are legal with this setup, i hope - for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { - setOperationAction(ISD::SADDO, VT, Custom); - setOperationAction(ISD::UADDO, VT, Custom); - setOperationAction(ISD::SSUBO, VT, Custom); - setOperationAction(ISD::USUBO, VT, Custom); - } + setOperationAction({ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); setOperationAction(ISD::BR_JT, MVT::Other, Expand); setOperationAction(ISD::BRCOND, MVT::Other, Custom); @@ -131,27 +116,18 @@ setOperationAction(ISD::SETCCCARRY, VT, Custom); } - for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) { - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - } + setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, + {MVT::i8, MVT::i16, MVT::i32}, Expand); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); + setOperationAction({ISD::ConstantPool, ISD::JumpTable, ISD::GlobalAddress, + ISD::GlobalTLSAddress, ISD::ExternalSymbol, + ISD::BlockAddress}, + MVT::i32, Custom); setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction({ISD::VAEND, ISD::VAARG, ISD::VACOPY}, MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -62,9 +62,8 @@ setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand); } @@ -72,69 +71,40 @@ // We don't have any truncstores setTruncStoreAction(MVT::i16, MVT::i8, Expand); - setOperationAction(ISD::SRA, MVT::i8, Custom); - setOperationAction(ISD::SHL, MVT::i8, Custom); - setOperationAction(ISD::SRL, MVT::i8, Custom); - setOperationAction(ISD::SRA, MVT::i16, Custom); - setOperationAction(ISD::SHL, MVT::i16, Custom); - setOperationAction(ISD::SRL, MVT::i16, Custom); - setOperationAction(ISD::ROTL, MVT::i8, Expand); - setOperationAction(ISD::ROTR, MVT::i8, Expand); - setOperationAction(ISD::ROTL, MVT::i16, Expand); - setOperationAction(ISD::ROTR, MVT::i16, Expand); - setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); - setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom); - setOperationAction(ISD::BlockAddress, MVT::i16, Custom); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BR_CC, MVT::i8, Custom); - setOperationAction(ISD::BR_CC, MVT::i16, Custom); + setOperationAction({ISD::SRA, ISD::SHL, ISD::SRL}, {MVT::i8, MVT::i16}, + Custom); + setOperationAction({ISD::ROTL, ISD::ROTR}, {MVT::i8, MVT::i16}, Expand); + setOperationAction( + {ISD::GlobalAddress, ISD::ExternalSymbol, ISD::BlockAddress}, MVT::i16, + Custom); + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::BR_CC, {MVT::i8, MVT::i16}, Custom); setOperationAction(ISD::BRCOND, MVT::Other, Expand); - setOperationAction(ISD::SETCC, MVT::i8, Custom); - setOperationAction(ISD::SETCC, MVT::i16, Custom); - setOperationAction(ISD::SELECT, MVT::i8, Expand); - setOperationAction(ISD::SELECT, MVT::i16, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i8, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i16, Custom); + setOperationAction({ISD::SETCC, ISD::SELECT, ISD::SELECT_CC}, + {MVT::i8, MVT::i16}, Custom); setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - - setOperationAction(ISD::CTTZ, MVT::i8, Expand); - setOperationAction(ISD::CTTZ, MVT::i16, Expand); - setOperationAction(ISD::CTLZ, MVT::i8, Expand); - setOperationAction(ISD::CTLZ, MVT::i16, Expand); - setOperationAction(ISD::CTPOP, MVT::i8, Expand); - setOperationAction(ISD::CTPOP, MVT::i16, Expand); - - setOperationAction(ISD::SHL_PARTS, MVT::i8, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i8, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i8, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i8, MVT::i16}, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); + + setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, {MVT::i8, MVT::i16}, + Expand); + + setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, + {MVT::i8, MVT::i16}, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); // FIXME: Implement efficiently multiplication by a constant - setOperationAction(ISD::MUL, MVT::i8, Promote); - setOperationAction(ISD::MULHS, MVT::i8, Promote); - setOperationAction(ISD::MULHU, MVT::i8, Promote); - setOperationAction(ISD::SMUL_LOHI, MVT::i8, Promote); - setOperationAction(ISD::UMUL_LOHI, MVT::i8, Promote); + setOperationAction( + {ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, + MVT::i8, Promote); setOperationAction(ISD::MUL, MVT::i16, LibCall); - setOperationAction(ISD::MULHS, MVT::i16, Expand); - setOperationAction(ISD::MULHU, MVT::i16, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand); - - setOperationAction(ISD::UDIV, MVT::i8, Promote); - setOperationAction(ISD::UDIVREM, MVT::i8, Promote); - setOperationAction(ISD::UREM, MVT::i8, Promote); - setOperationAction(ISD::SDIV, MVT::i8, Promote); - setOperationAction(ISD::SDIVREM, MVT::i8, Promote); - setOperationAction(ISD::SREM, MVT::i8, Promote); + setOperationAction({ISD::MULHS, ISD::MULHU, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, + MVT::i16, Expand); + + setOperationAction( + {ISD::UDIV, ISD::UDIVREM, ISD::UREM, ISD::SDIV, ISD::SDIVREM, ISD::SREM}, + MVT::i8, Promote); setOperationAction(ISD::UDIV, MVT::i16, LibCall); setOperationAction(ISD::UDIVREM, MVT::i16, Expand); setOperationAction(ISD::UREM, MVT::i16, LibCall); @@ -144,9 +114,7 @@ // varargs support setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction({ISD::VAARG, ISD::VAEND, ISD::VACOPY}, MVT::Other, Expand); setOperationAction(ISD::JumpTable, MVT::i16, Custom); // EABI Libcalls - EABI Section 6.2 diff --git a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp --- a/llvm/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/llvm/lib/Target/Mips/Mips16ISelLowering.cpp @@ -128,23 +128,14 @@ setMips16HardFloatLibCalls(); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); - - setOperationAction(ISD::ROTR, MVT::i32, Expand); - setOperationAction(ISD::ROTR, MVT::i64, Expand); - setOperationAction(ISD::BSWAP, MVT::i32, Expand); - setOperationAction(ISD::BSWAP, MVT::i64, Expand); + setOperationAction( + {ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD, + ISD::ATOMIC_LOAD_SUB, ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN, + ISD::ATOMIC_LOAD_MAX, ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX}, + MVT::i32, Expand); + + setOperationAction({ISD::ROTR, ISD::BSWAP}, {MVT::i32, MVT::i64}, Expand); computeRegisterProperties(STI.getRegisterInfo()); } diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -297,11 +297,9 @@ ZeroOrNegativeOneBooleanContent); // Load extented operations for i1 types must be promoted - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - } + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}, VT, MVT::i1, + Promote); // MIPS doesn't have extending float->double load/store. Set LoadExtAction // for f32, f16 @@ -329,138 +327,81 @@ AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); // Mips Custom Operations - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - setOperationAction(ISD::SELECT, MVT::f32, Custom); - setOperationAction(ISD::SELECT, MVT::f64, Custom); - setOperationAction(ISD::SELECT, MVT::i32, Custom); - setOperationAction(ISD::SETCC, MVT::f32, Custom); - setOperationAction(ISD::SETCC, MVT::f64, Custom); - setOperationAction(ISD::BRCOND, MVT::Other, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - - if (!(TM.Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())) { - setOperationAction(ISD::FABS, MVT::f32, Custom); - setOperationAction(ISD::FABS, MVT::f64, Custom); - } + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, + ISD::GlobalTLSAddress, ISD::JumpTable, ISD::SELECT, + ISD::ConstantPool, ISD::FP_TO_SINT}, + MVT::i32, Custom); + setOperationAction({ISD::SELECT, ISD::SETCC, ISD::FCOPYSIGN}, MVT::f32, + Custom); + setOperationAction({ISD::SELECT, ISD::SETCC, ISD::FCOPYSIGN}, MVT::f64, + Custom); + setOperationAction(ISD::BRCOND, MVT::Other, Custom); + + if (!(TM.Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())) + setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Custom); - if (Subtarget.isGP64bit()) { - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); - setOperationAction(ISD::BlockAddress, MVT::i64, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); - setOperationAction(ISD::JumpTable, MVT::i64, Custom); - setOperationAction(ISD::ConstantPool, MVT::i64, Custom); - setOperationAction(ISD::SELECT, MVT::i64, Custom); - setOperationAction(ISD::LOAD, MVT::i64, Custom); - setOperationAction(ISD::STORE, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); - } + if (Subtarget.isGP64bit()) + setOperationAction( + {ISD::GlobalAddress, ISD::BlockAddress, ISD::GlobalTLSAddress, + ISD::JumpTable, ISD::ConstantPool, ISD::SELECT, ISD::LOAD, ISD::STORE, + ISD::FP_TO_SINT, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i64, Custom); - if (!Subtarget.isGP64bit()) { - setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); - } + if (!Subtarget.isGP64bit()) + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i32, Custom); - setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); + setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); if (Subtarget.isGP64bit()) - setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); + setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); - setOperationAction(ISD::SDIV, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UDIV, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::SDIV, MVT::i64, Expand); - setOperationAction(ISD::SREM, MVT::i64, Expand); - setOperationAction(ISD::UDIV, MVT::i64, Expand); - setOperationAction(ISD::UREM, MVT::i64, Expand); + setOperationAction({ISD::SDIV, ISD::SREM, ISD::UDIV, ISD::UREM}, + {MVT::i32, MVT::i64}, Expand); // Operations not directly supported by Mips. - setOperationAction(ISD::BR_CC, MVT::f32, Expand); - setOperationAction(ISD::BR_CC, MVT::f64, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::BR_CC, MVT::i64, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); - setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (Subtarget.hasCnMips()) { - setOperationAction(ISD::CTPOP, MVT::i32, Legal); - setOperationAction(ISD::CTPOP, MVT::i64, Legal); - } else { - setOperationAction(ISD::CTPOP, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i64, Expand); - } - setOperationAction(ISD::CTTZ, MVT::i32, Expand); - setOperationAction(ISD::CTTZ, MVT::i64, Expand); - setOperationAction(ISD::ROTL, MVT::i32, Expand); - setOperationAction(ISD::ROTL, MVT::i64, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, + {MVT::f32, MVT::f64, MVT::i32, MVT::i64}, Expand); + setOperationAction({ISD::UINT_TO_FP, ISD::FP_TO_UINT}, {MVT::i32, MVT::i64}, + Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + if (Subtarget.hasCnMips()) + setOperationAction(ISD::CTPOP, {MVT::i32, MVT::i64}, Legal); + else + setOperationAction(ISD::CTPOP, {MVT::i32, MVT::i64}, Expand); + setOperationAction({ISD::CTTZ, ISD::ROTL, ISD::DYNAMIC_STACKALLOC}, + {MVT::i32, MVT::i64}, Expand); if (!Subtarget.hasMips32r2()) - setOperationAction(ISD::ROTR, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); if (!Subtarget.hasMips64r2()) - setOperationAction(ISD::ROTR, MVT::i64, Expand); - - setOperationAction(ISD::FSIN, MVT::f32, Expand); - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FCOS, MVT::f32, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - setOperationAction(ISD::FPOW, MVT::f32, Expand); - setOperationAction(ISD::FPOW, MVT::f64, Expand); - setOperationAction(ISD::FLOG, MVT::f32, Expand); - setOperationAction(ISD::FLOG2, MVT::f32, Expand); - setOperationAction(ISD::FLOG10, MVT::f32, Expand); - setOperationAction(ISD::FEXP, MVT::f32, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FREM, MVT::f32, Expand); - setOperationAction(ISD::FREM, MVT::f64, Expand); + setOperationAction(ISD::ROTR, MVT::i64, Expand); + + setOperationAction( + {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FMA, ISD::FREM}, + {MVT::f32, MVT::f64}, Expand); + + setOperationAction({ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP}, MVT::f32, + Expand); // Lower f16 conversion operations into library calls - setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, {MVT::f32, MVT::f64}, + Expand); setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); - setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Custom); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction({ISD::VASTART, ISD::VAARG}, MVT::Other, Custom); + setOperationAction({ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); // Use the default for now - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); - if (!Subtarget.isGP64bit()) { - setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); - } + if (!Subtarget.isGP64bit()) + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i64, Expand); - if (!Subtarget.hasMips32r2()) { - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - } + if (!Subtarget.hasMips32r2()) + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand); // MIPS16 lacks MIPS32's clz and clo instructions. if (!Subtarget.hasMips32() || Subtarget.inMips16Mode()) @@ -474,9 +415,8 @@ setOperationAction(ISD::BSWAP, MVT::i64, Expand); if (Subtarget.isGP64bit()) { - setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom); - setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom); - setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom); + setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, MVT::i64, + MVT::i32, Custom); setTruncStoreAction(MVT::i64, MVT::i32, Custom); } @@ -487,12 +427,8 @@ if (ABI.IsO32()) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); - setLibcallName(RTLIB::MULO_I64, nullptr); - setLibcallName(RTLIB::MULO_I128, nullptr); + setLibcallName({RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, + RTLIB::MUL_I128, RTLIB::MULO_I64, RTLIB::MULO_I128}); } setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4)); diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -75,9 +75,8 @@ for (MVT VT0 : MVT::fixedlen_vector_valuetypes()) { for (MVT VT1 : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(VT0, VT1, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand); - setLoadExtAction(ISD::EXTLOAD, VT0, VT1, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT0, VT1, + Expand); } } } @@ -92,20 +91,16 @@ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) setOperationAction(Opc, VecTy, Expand); - setOperationAction(ISD::ADD, VecTy, Legal); - setOperationAction(ISD::SUB, VecTy, Legal); - setOperationAction(ISD::LOAD, VecTy, Legal); - setOperationAction(ISD::STORE, VecTy, Legal); - setOperationAction(ISD::BITCAST, VecTy, Legal); + setOperationAction( + {ISD::ADD, ISD::SUB, ISD::LOAD, ISD::STORE, ISD::BITCAST}, VecTy, + Legal); } setTargetDAGCombine( {ISD::SHL, ISD::SRA, ISD::SRL, ISD::SETCC, ISD::VSELECT}); - if (Subtarget.hasMips32r2()) { - setOperationAction(ISD::ADDC, MVT::i32, Legal); - setOperationAction(ISD::ADDE, MVT::i32, Legal); - } + if (Subtarget.hasMips32r2()) + setOperationAction({ISD::ADDC, ISD::ADDE}, MVT::i32, Legal); } if (Subtarget.hasDSPR2()) @@ -122,41 +117,17 @@ // f16 is a storage-only type, always promote it to f32. addRegisterClass(MVT::f16, &Mips::MSA128HRegClass); - setOperationAction(ISD::SETCC, MVT::f16, Promote); - setOperationAction(ISD::BR_CC, MVT::f16, Promote); - setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); - setOperationAction(ISD::SELECT, MVT::f16, Promote); - setOperationAction(ISD::FADD, MVT::f16, Promote); - setOperationAction(ISD::FSUB, MVT::f16, Promote); - setOperationAction(ISD::FMUL, MVT::f16, Promote); - setOperationAction(ISD::FDIV, MVT::f16, Promote); - setOperationAction(ISD::FREM, MVT::f16, Promote); - setOperationAction(ISD::FMA, MVT::f16, Promote); - setOperationAction(ISD::FNEG, MVT::f16, Promote); - setOperationAction(ISD::FABS, MVT::f16, Promote); - setOperationAction(ISD::FCEIL, MVT::f16, Promote); - setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); - setOperationAction(ISD::FCOS, MVT::f16, Promote); - setOperationAction(ISD::FP_EXTEND, MVT::f16, Promote); - setOperationAction(ISD::FFLOOR, MVT::f16, Promote); - setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); - setOperationAction(ISD::FPOW, MVT::f16, Promote); - setOperationAction(ISD::FPOWI, MVT::f16, Promote); - setOperationAction(ISD::FRINT, MVT::f16, Promote); - setOperationAction(ISD::FSIN, MVT::f16, Promote); - setOperationAction(ISD::FSINCOS, MVT::f16, Promote); - setOperationAction(ISD::FSQRT, MVT::f16, Promote); - setOperationAction(ISD::FEXP, MVT::f16, Promote); - setOperationAction(ISD::FEXP2, MVT::f16, Promote); - setOperationAction(ISD::FLOG, MVT::f16, Promote); - setOperationAction(ISD::FLOG2, MVT::f16, Promote); - setOperationAction(ISD::FLOG10, MVT::f16, Promote); - setOperationAction(ISD::FROUND, MVT::f16, Promote); - setOperationAction(ISD::FTRUNC, MVT::f16, Promote); - setOperationAction(ISD::FMINNUM, MVT::f16, Promote); - setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); - setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); - setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); + setOperationAction( + {ISD::SETCC, ISD::BR_CC, ISD::SELECT_CC, ISD::SELECT, + ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, + ISD::FREM, ISD::FMA, ISD::FNEG, ISD::FABS, + ISD::FCEIL, ISD::FCOPYSIGN, ISD::FCOS, ISD::FP_EXTEND, + ISD::FFLOOR, ISD::FNEARBYINT, ISD::FPOW, ISD::FPOWI, + ISD::FRINT, ISD::FSIN, ISD::FSINCOS, ISD::FSQRT, + ISD::FEXP, ISD::FEXP2, ISD::FLOG, ISD::FLOG2, + ISD::FLOG10, ISD::FROUND, ISD::FTRUNC, ISD::FMINNUM, + ISD::FMAXNUM, ISD::FMINIMUM, ISD::FMAXIMUM}, + MVT::f16, Promote); setTargetDAGCombine({ISD::AND, ISD::OR, ISD::SRA, ISD::VSELECT, ISD::XOR}); } @@ -173,76 +144,57 @@ } } - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); - setOperationAction(ISD::MULHS, MVT::i32, Custom); - setOperationAction(ISD::MULHU, MVT::i32, Custom); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU}, + MVT::i32, Custom); if (Subtarget.hasCnMips()) setOperationAction(ISD::MUL, MVT::i64, Legal); else if (Subtarget.isGP64bit()) setOperationAction(ISD::MUL, MVT::i64, Custom); - if (Subtarget.isGP64bit()) { - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Custom); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); - setOperationAction(ISD::MULHS, MVT::i64, Custom); - setOperationAction(ISD::MULHU, MVT::i64, Custom); - setOperationAction(ISD::SDIVREM, MVT::i64, Custom); - setOperationAction(ISD::UDIVREM, MVT::i64, Custom); - } + if (Subtarget.isGP64bit()) + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, + ISD::SDIVREM, ISD::UDIVREM}, + MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); + setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, + MVT::i64, Custom); - setOperationAction(ISD::SDIVREM, MVT::i32, Custom); - setOperationAction(ISD::UDIVREM, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); - setOperationAction(ISD::LOAD, MVT::i32, Custom); - setOperationAction(ISD::STORE, MVT::i32, Custom); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM, ISD::LOAD, ISD::STORE}, + MVT::i32, Custom); + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); setTargetDAGCombine(ISD::MUL); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + setOperationAction( + {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::Other, Custom); if (Subtarget.hasMips32r2() && !Subtarget.useSoftFloat() && - !Subtarget.hasMips64()) { + !Subtarget.hasMips64()) setOperationAction(ISD::BITCAST, MVT::i64, Custom); - } - if (NoDPLoadStore) { - setOperationAction(ISD::LOAD, MVT::f64, Custom); - setOperationAction(ISD::STORE, MVT::f64, Custom); - } + if (NoDPLoadStore) + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::f64, Custom); if (Subtarget.hasMips32r6()) { // MIPS32r6 replaces the accumulator-based multiplies with a three register // instruction - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::MUL, MVT::i32, Legal); - setOperationAction(ISD::MULHS, MVT::i32, Legal); - setOperationAction(ISD::MULHU, MVT::i32, Legal); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Expand); + setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, MVT::i32, Legal); // MIPS32r6 replaces the accumulator-based division/remainder with separate // three register division and remainder instructions. - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SDIV, MVT::i32, Legal); - setOperationAction(ISD::UDIV, MVT::i32, Legal); - setOperationAction(ISD::SREM, MVT::i32, Legal); - setOperationAction(ISD::UREM, MVT::i32, Legal); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, MVT::i32, Expand); + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, MVT::i32, + Legal); // MIPS32r6 replaces conditional moves with an equivalent that removes the // need for three GPR read ports. - setOperationAction(ISD::SETCC, MVT::i32, Legal); - setOperationAction(ISD::SELECT, MVT::i32, Legal); + setOperationAction({ISD::SETCC, ISD::SELECT}, MVT::i32, Legal); setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Legal); - setOperationAction(ISD::SELECT, MVT::f32, Legal); + setOperationAction({ISD::SETCC, ISD::SELECT}, MVT::f32, Legal); setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); assert(Subtarget.isFP64bit() && "FR=1 is required for MIPS32r6"); @@ -253,39 +205,25 @@ setOperationAction(ISD::BRCOND, MVT::Other, Legal); // Floating point > and >= are supported via < and <= - setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); - setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); - setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); - - setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); - setCondCodeAction(ISD::SETOGT, MVT::f64, Expand); - setCondCodeAction(ISD::SETUGE, MVT::f64, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); + setCondCodeAction({ISD::SETOGE, ISD::SETOGT, ISD::SETUGE, ISD::SETUGT}, + {MVT::f32, MVT::f64}, Expand); } if (Subtarget.hasMips64r6()) { // MIPS64r6 replaces the accumulator-based multiplies with a three register // instruction - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::MUL, MVT::i64, Legal); - setOperationAction(ISD::MULHS, MVT::i64, Legal); - setOperationAction(ISD::MULHU, MVT::i64, Legal); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i64, Expand); + setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, MVT::i64, Legal); // MIPS32r6 replaces the accumulator-based division/remainder with separate // three register division and remainder instructions. - setOperationAction(ISD::SDIVREM, MVT::i64, Expand); - setOperationAction(ISD::UDIVREM, MVT::i64, Expand); - setOperationAction(ISD::SDIV, MVT::i64, Legal); - setOperationAction(ISD::UDIV, MVT::i64, Legal); - setOperationAction(ISD::SREM, MVT::i64, Legal); - setOperationAction(ISD::UREM, MVT::i64, Legal); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, MVT::i64, Expand); + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, MVT::i64, + Legal); // MIPS64r6 replaces conditional moves with an equivalent that removes the // need for three GPR read ports. - setOperationAction(ISD::SETCC, MVT::i64, Legal); - setOperationAction(ISD::SELECT, MVT::i64, Legal); + setOperationAction({ISD::SETCC, ISD::SELECT}, MVT::i64, Legal); setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); } @@ -315,49 +253,28 @@ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) setOperationAction(Opc, Ty, Expand); - setOperationAction(ISD::BITCAST, Ty, Legal); - setOperationAction(ISD::LOAD, Ty, Legal); - setOperationAction(ISD::STORE, Ty, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); - setOperationAction(ISD::BUILD_VECTOR, Ty, Custom); - setOperationAction(ISD::UNDEF, Ty, Legal); - - setOperationAction(ISD::ADD, Ty, Legal); - setOperationAction(ISD::AND, Ty, Legal); - setOperationAction(ISD::CTLZ, Ty, Legal); - setOperationAction(ISD::CTPOP, Ty, Legal); - setOperationAction(ISD::MUL, Ty, Legal); - setOperationAction(ISD::OR, Ty, Legal); - setOperationAction(ISD::SDIV, Ty, Legal); - setOperationAction(ISD::SREM, Ty, Legal); - setOperationAction(ISD::SHL, Ty, Legal); - setOperationAction(ISD::SRA, Ty, Legal); - setOperationAction(ISD::SRL, Ty, Legal); - setOperationAction(ISD::SUB, Ty, Legal); - setOperationAction(ISD::SMAX, Ty, Legal); - setOperationAction(ISD::SMIN, Ty, Legal); - setOperationAction(ISD::UDIV, Ty, Legal); - setOperationAction(ISD::UREM, Ty, Legal); - setOperationAction(ISD::UMAX, Ty, Legal); - setOperationAction(ISD::UMIN, Ty, Legal); - setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom); - setOperationAction(ISD::VSELECT, Ty, Legal); - setOperationAction(ISD::XOR, Ty, Legal); - - if (Ty == MVT::v4i32 || Ty == MVT::v2i64) { - setOperationAction(ISD::FP_TO_SINT, Ty, Legal); - setOperationAction(ISD::FP_TO_UINT, Ty, Legal); - setOperationAction(ISD::SINT_TO_FP, Ty, Legal); - setOperationAction(ISD::UINT_TO_FP, Ty, Legal); - } + setOperationAction( + {ISD::BITCAST, ISD::LOAD, ISD::STORE, ISD::INSERT_VECTOR_ELT, + ISD::UNDEF, ISD::ADD, ISD::AND, ISD::CTLZ, + ISD::CTPOP, ISD::MUL, ISD::OR, ISD::SDIV, + ISD::SREM, ISD::SHL, ISD::SRA, ISD::SRL, + ISD::SUB, ISD::SMAX, ISD::SMIN, ISD::UDIV, + ISD::UREM, ISD::UMAX, ISD::UMIN, ISD::VSELECT, + ISD::XOR}, + Ty, Legal); + setOperationAction( + {ISD::EXTRACT_VECTOR_ELT, ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE}, Ty, + Custom); + + if (Ty == MVT::v4i32 || Ty == MVT::v2i64) + setOperationAction( + {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + Ty, Legal); setOperationAction(ISD::SETCC, Ty, Legal); - setCondCodeAction(ISD::SETNE, Ty, Expand); - setCondCodeAction(ISD::SETGE, Ty, Expand); - setCondCodeAction(ISD::SETGT, Ty, Expand); - setCondCodeAction(ISD::SETUGE, Ty, Expand); - setCondCodeAction(ISD::SETUGT, Ty, Expand); + setCondCodeAction( + {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, Ty, + Expand); } // Enable MSA support for the given floating-point type and Register class. @@ -369,33 +286,21 @@ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) setOperationAction(Opc, Ty, Expand); - setOperationAction(ISD::LOAD, Ty, Legal); - setOperationAction(ISD::STORE, Ty, Legal); - setOperationAction(ISD::BITCAST, Ty, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal); - setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::BITCAST, + ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}, + Ty, Legal); setOperationAction(ISD::BUILD_VECTOR, Ty, Custom); if (Ty != MVT::v8f16) { - setOperationAction(ISD::FABS, Ty, Legal); - setOperationAction(ISD::FADD, Ty, Legal); - setOperationAction(ISD::FDIV, Ty, Legal); - setOperationAction(ISD::FEXP2, Ty, Legal); - setOperationAction(ISD::FLOG2, Ty, Legal); - setOperationAction(ISD::FMA, Ty, Legal); - setOperationAction(ISD::FMUL, Ty, Legal); - setOperationAction(ISD::FRINT, Ty, Legal); - setOperationAction(ISD::FSQRT, Ty, Legal); - setOperationAction(ISD::FSUB, Ty, Legal); - setOperationAction(ISD::VSELECT, Ty, Legal); + setOperationAction({ISD::FABS, ISD::FADD, ISD::FDIV, ISD::FEXP2, ISD::FLOG2, + ISD::FMA, ISD::FMUL, ISD::FRINT, ISD::FSQRT, ISD::FSUB, + ISD::VSELECT, ISD::SETCC}, + Ty, Legal); setOperationAction(ISD::SETCC, Ty, Legal); - setCondCodeAction(ISD::SETOGE, Ty, Expand); - setCondCodeAction(ISD::SETOGT, Ty, Expand); - setCondCodeAction(ISD::SETUGE, Ty, Expand); - setCondCodeAction(ISD::SETUGT, Ty, Expand); - setCondCodeAction(ISD::SETGE, Ty, Expand); - setCondCodeAction(ISD::SETGT, Ty, Expand); + setCondCodeAction({ISD::SETOGE, ISD::SETOGT, ISD::SETUGE, ISD::SETUGT, + ISD::SETGE, ISD::SETGT}, + Ty, Expand); } } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -367,64 +367,44 @@ addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass); // Conversion to/from FP16/FP16x2 is always legal. - setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f16, Expand); + setOperationAction({ISD::SINT_TO_FP, ISD::FP_TO_SINT}, MVT::f16, Legal); + setOperationAction({ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT}, MVT::v2f16, + Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::VECTOR_SHUFFLE}, MVT::v2f16, + Expand); setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote); setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand); // Operations not directly supported by NVPTX. for (MVT VT : {MVT::f16, MVT::v2f16, MVT::f32, MVT::f64, MVT::i1, MVT::i8, - MVT::i16, MVT::i32, MVT::i64}) { - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::BR_CC, VT, Expand); - } + MVT::i16, MVT::i32, MVT::i64}) + setOperationAction({ISD::SELECT_CC, ISD::BR_CC}, VT, Expand); // Some SIGN_EXTEND_INREG can be done using cvt instruction. // For others we will expand to a SHL/SRA pair. - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::i64, MVT::i32, MVT::i16, MVT::i8}, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom); - setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); + setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal); // TODO: we may consider expanding ROTL/ROTR on older GPUs. Currently on GPUs // that don't have h/w rotation we lower them to multi-instruction assembly. // See ROT*_sw in NVPTXIntrInfo.td - setOperationAction(ISD::ROTL, MVT::i64, Legal); - setOperationAction(ISD::ROTR, MVT::i64, Legal); - setOperationAction(ISD::ROTL, MVT::i32, Legal); - setOperationAction(ISD::ROTR, MVT::i32, Legal); - - setOperationAction(ISD::ROTL, MVT::i16, Expand); - setOperationAction(ISD::ROTR, MVT::i16, Expand); - setOperationAction(ISD::ROTL, MVT::i8, Expand); - setOperationAction(ISD::ROTR, MVT::i8, Expand); - setOperationAction(ISD::BSWAP, MVT::i16, Expand); - setOperationAction(ISD::BSWAP, MVT::i32, Expand); - setOperationAction(ISD::BSWAP, MVT::i64, Expand); + setOperationAction({ISD::ROTL, ISD::ROTR}, {MVT::i64, MVT::i32}, Legal); + + setOperationAction({ISD::ROTL, ISD::ROTR}, {MVT::i16, MVT::i8}, Expand); + setOperationAction(ISD::BSWAP, {MVT::i16, MVT::i32, MVT::i64}, Expand); // Indirect branch is not supported. // This also disables Jump Table creation. - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BRIND, MVT::Other, Expand); + setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand); - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); + setOperationAction(ISD::GlobalAddress, {MVT::i32, MVT::i64}, Custom); // We want to legalize constant related memmove and memcopy // intrinsics. @@ -447,8 +427,7 @@ setTruncStoreAction(MVT::f64, MVT::f32, Expand); // PTX does not support load / store predicate registers - setOperationAction(ISD::LOAD, MVT::i1, Custom); - setOperationAction(ISD::STORE, MVT::i1, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i1, Custom); for (MVT VT : MVT::integer_valuetypes()) { setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); @@ -457,46 +436,31 @@ } // This is legal in NVPTX - setOperationAction(ISD::ConstantFP, MVT::f64, Legal); - setOperationAction(ISD::ConstantFP, MVT::f32, Legal); - setOperationAction(ISD::ConstantFP, MVT::f16, Legal); + setOperationAction(ISD::ConstantFP, {MVT::f64, MVT::f32, MVT::f16}, Legal); // TRAP can be lowered to PTX trap setOperationAction(ISD::TRAP, MVT::Other, Legal); // Register custom handling for vector loads/stores - for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - if (IsPTXVectorType(VT)) { - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom); - } - } + for (MVT VT : MVT::fixedlen_vector_valuetypes()) + if (IsPTXVectorType(VT)) + setOperationAction({ISD::LOAD, ISD::STORE, ISD::INTRINSIC_W_CHAIN}, VT, + Custom); // Custom handling for i8 intrinsics setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); - for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) { - setOperationAction(ISD::ABS, Ty, Legal); - setOperationAction(ISD::SMIN, Ty, Legal); - setOperationAction(ISD::SMAX, Ty, Legal); - setOperationAction(ISD::UMIN, Ty, Legal); - setOperationAction(ISD::UMAX, Ty, Legal); + setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, + ISD::CTPOP, ISD::CTLZ}, + {MVT::i16, MVT::i32, MVT::i64}, Legal); - setOperationAction(ISD::CTPOP, Ty, Legal); - setOperationAction(ISD::CTLZ, Ty, Legal); - } - - setOperationAction(ISD::CTTZ, MVT::i16, Expand); - setOperationAction(ISD::CTTZ, MVT::i32, Expand); - setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::CTTZ, {MVT::i16, MVT::i32, MVT::i64}, Expand); // PTX does not directly support SELP of i1, so promote to i32 first setOperationAction(ISD::SELECT, MVT::i1, Custom); // PTX cannot multiply two i64s in a single instruction. - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i64, Expand); // We have some custom DAG combine patterns for these nodes setTargetDAGCombine({ISD::ADD, ISD::AND, ISD::FADD, ISD::MUL, ISD::SHL, @@ -519,31 +483,24 @@ } // There's no neg.f16 instruction. Expand to (0-x). - setOperationAction(ISD::FNEG, MVT::f16, Expand); - setOperationAction(ISD::FNEG, MVT::v2f16, Expand); + setOperationAction(ISD::FNEG, {MVT::f16, MVT::v2f16}, Expand); // (would be) Library functions. // These map to conversion instructions for scalar FP types. - for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT, - ISD::FTRUNC}) { - setOperationAction(Op, MVT::f16, Legal); - setOperationAction(Op, MVT::f32, Legal); - setOperationAction(Op, MVT::f64, Legal); + for (const auto &Op : + {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT, ISD::FTRUNC}) { + setOperationAction(Op, {MVT::f16, MVT::f32, MVT::f64}, Legal); setOperationAction(Op, MVT::v2f16, Expand); } setOperationAction(ISD::FROUND, MVT::f16, Promote); setOperationAction(ISD::FROUND, MVT::v2f16, Expand); - setOperationAction(ISD::FROUND, MVT::f32, Custom); - setOperationAction(ISD::FROUND, MVT::f64, Custom); - + setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom); // 'Expand' implements FCOPYSIGN without calling an external library. - setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); + setOperationAction(ISD::FCOPYSIGN, {MVT::f16, MVT::v2f16, MVT::f32, MVT::f64}, + Expand); // These map to corresponding instructions for f32/f64. f16 must be // promoted to f32. v2f16 is expanded to f16, which is then promoted @@ -551,8 +508,7 @@ for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FABS}) { setOperationAction(Op, MVT::f16, Promote); - setOperationAction(Op, MVT::f32, Legal); - setOperationAction(Op, MVT::f64, Legal); + setOperationAction(Op, {MVT::f32, MVT::f64}, Legal); setOperationAction(Op, MVT::v2f16, Expand); } // max.f16, max.f16x2 and max.NaN are supported on sm_80+. @@ -562,8 +518,7 @@ }; for (const auto &Op : {ISD::FMINNUM, ISD::FMAXNUM}) { setFP16OperationAction(Op, MVT::f16, GetMinMaxAction(Promote), Promote); - setOperationAction(Op, MVT::f32, Legal); - setOperationAction(Op, MVT::f64, Legal); + setOperationAction(Op, {MVT::f32, MVT::f64}, Legal); setFP16OperationAction(Op, MVT::v2f16, GetMinMaxAction(Expand), Expand); } for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) { diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -172,15 +172,13 @@ } // Match BITREVERSE to customized fast code sequence in the td file. - setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); - setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); + setOperationAction(ISD::BITREVERSE, {MVT::i32, MVT::i64}, Legal); // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); // Custom lower inline assembly to check for special registers. - setOperationAction(ISD::INLINEASM, MVT::Other, Custom); - setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom); + setOperationAction({ISD::INLINEASM, ISD::INLINEASM_BR}, MVT::Other, Custom); // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. for (MVT VT : MVT::integer_valuetypes()) { @@ -196,11 +194,9 @@ } else { // No extending loads from f16 or HW conversions back and forth. setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, {MVT::f64, MVT::f32}, + Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); setTruncStoreAction(MVT::f64, MVT::f16, Expand); setTruncStoreAction(MVT::f32, MVT::f16, Expand); } @@ -226,13 +222,8 @@ } // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. - const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; - for (MVT VT : ScalarIntVTs) { - setOperationAction(ISD::ADDC, VT, Legal); - setOperationAction(ISD::ADDE, VT, Legal); - setOperationAction(ISD::SUBC, VT, Legal); - setOperationAction(ISD::SUBE, VT, Legal); - } + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, + {MVT::i32, MVT::i64}, Legal); if (Subtarget.useCRBits()) { setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); @@ -265,16 +256,13 @@ setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); AddPromotedToType(ISD::FP_TO_UINT, MVT::i1, isPPC64 ? MVT::i64 : MVT::i32); - } else { - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); - } + } else + setOperationAction({ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + MVT::i1, Custom); // PowerPC does not support direct load/store of condition registers. - setOperationAction(ISD::LOAD, MVT::i1, Custom); - setOperationAction(ISD::STORE, MVT::i1, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i1, Custom); // FIXME: Remove this once the ANDI glue bug is fixed: if (ANDIGlueBug) @@ -291,18 +279,14 @@ // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on // PPC (the libcall is not available). - setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT}, + MVT::ppcf128, Custom); // We do not currently implement these libm ops for PowerPC. - setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); - setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); - setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); - setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); - setOperationAction(ISD::FREM, MVT::ppcf128, Expand); + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, + ISD::FNEARBYINT, ISD::FREM}, + MVT::ppcf128, Expand); // PowerPC has no SREM/UREM instructions unless we are on P9 // On P9 we may use a hardware instruction to compute the remainder. @@ -311,105 +295,53 @@ // rather than use the remainder instruction. The instructions are legalized // directly because the DivRemPairsPass performs the transformation at the IR // level. - if (Subtarget.isISA3_0()) { - setOperationAction(ISD::SREM, MVT::i32, Legal); - setOperationAction(ISD::UREM, MVT::i32, Legal); - setOperationAction(ISD::SREM, MVT::i64, Legal); - setOperationAction(ISD::UREM, MVT::i64, Legal); - } else { - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i64, Expand); - setOperationAction(ISD::UREM, MVT::i64, Expand); - } + if (Subtarget.isISA3_0()) + setOperationAction({ISD::SREM, ISD::UREM}, {MVT::i32, MVT::i64}, Legal); + else + setOperationAction({ISD::SREM, ISD::UREM}, {MVT::i32, MVT::i64}, Expand); // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i64, Expand); - setOperationAction(ISD::SDIVREM, MVT::i64, Expand); + setOperationAction( + {ISD::UMUL_LOHI, ISD::SMUL_LOHI, ISD::UDIVREM, ISD::SDIVREM}, + {MVT::i32, MVT::i64}, Expand); // Handle constrained floating-point operations of scalar. // TODO: Handle SPE specific operation. - setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); + setOperationAction( + {ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV}, + {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); - - if (!Subtarget.hasSPE()) { - setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); - } - - if (Subtarget.hasVSX()) { - setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal); - } + if (!Subtarget.hasSPE()) + setOperationAction(ISD::STRICT_FMA, {MVT::f32, MVT::f64}, Legal); - if (Subtarget.hasFSQRT()) { - setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); - } + if (Subtarget.hasVSX()) + setOperationAction(ISD::STRICT_FRINT, {MVT::f32, MVT::f64}, Legal); - if (Subtarget.hasFPRND()) { - setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); + if (Subtarget.hasFSQRT()) + setOperationAction(ISD::STRICT_FSQRT, {MVT::f32, MVT::f64}, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); - } + if (Subtarget.hasFPRND()) + setOperationAction({ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, + ISD::STRICT_FTRUNC, ISD::STRICT_FROUND}, + {MVT::f32, MVT::f64}, Legal); // We don't support sin/cos/sqrt/fmod/pow - setOperationAction(ISD::FSIN , MVT::f64, Expand); - setOperationAction(ISD::FCOS , MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - setOperationAction(ISD::FREM , MVT::f64, Expand); - setOperationAction(ISD::FPOW , MVT::f64, Expand); - setOperationAction(ISD::FSIN , MVT::f32, Expand); - setOperationAction(ISD::FCOS , MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FREM , MVT::f32, Expand); - setOperationAction(ISD::FPOW , MVT::f32, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FREM, ISD::FPOW}, + {MVT::f64, MVT::f32}, Expand); // MASS transformation for LLVM intrinsics with replicating fast-math flag // to be consistent to PPCGenScalarMASSEntries pass if (TM.getOptLevel() == CodeGenOpt::Aggressive && - TM.Options.PPCGenScalarMASSEntries) { - setOperationAction(ISD::FSIN , MVT::f64, Custom); - setOperationAction(ISD::FCOS , MVT::f64, Custom); - setOperationAction(ISD::FPOW , MVT::f64, Custom); - setOperationAction(ISD::FLOG, MVT::f64, Custom); - setOperationAction(ISD::FLOG10, MVT::f64, Custom); - setOperationAction(ISD::FEXP, MVT::f64, Custom); - setOperationAction(ISD::FSIN , MVT::f32, Custom); - setOperationAction(ISD::FCOS , MVT::f32, Custom); - setOperationAction(ISD::FPOW , MVT::f32, Custom); - setOperationAction(ISD::FLOG, MVT::f32, Custom); - setOperationAction(ISD::FLOG10, MVT::f32, Custom); - setOperationAction(ISD::FEXP, MVT::f32, Custom); - } + TM.Options.PPCGenScalarMASSEntries) + setOperationAction( + {ISD::FSIN, ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG10, ISD::FEXP}, + {MVT::f64, MVT::f32}, Custom); - if (Subtarget.hasSPE()) { - setOperationAction(ISD::FMA , MVT::f64, Expand); - setOperationAction(ISD::FMA , MVT::f32, Expand); - } else { - setOperationAction(ISD::FMA , MVT::f64, Legal); - setOperationAction(ISD::FMA , MVT::f32, Legal); - } + if (Subtarget.hasSPE()) + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Expand); + else + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Legal); if (Subtarget.hasSPE()) setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); @@ -427,78 +359,55 @@ Subtarget.hasFRES())) setOperationAction(ISD::FSQRT, MVT::f32, Expand); - if (Subtarget.hasFCPSGN()) { - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); - } else { - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - } - - if (Subtarget.hasFPRND()) { - setOperationAction(ISD::FFLOOR, MVT::f64, Legal); - setOperationAction(ISD::FCEIL, MVT::f64, Legal); - setOperationAction(ISD::FTRUNC, MVT::f64, Legal); - setOperationAction(ISD::FROUND, MVT::f64, Legal); + if (Subtarget.hasFCPSGN()) + setOperationAction(ISD::FCOPYSIGN, {MVT::f64, MVT::f32}, Legal); + else + setOperationAction(ISD::FCOPYSIGN, {MVT::f64, MVT::f32}, Expand); - setOperationAction(ISD::FFLOOR, MVT::f32, Legal); - setOperationAction(ISD::FCEIL, MVT::f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::f32, Legal); - setOperationAction(ISD::FROUND, MVT::f32, Legal); - } + if (Subtarget.hasFPRND()) + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, ISD::FROUND}, + {MVT::f64, MVT::f32}, Legal); - // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd - // to speed up scalar BSWAP64. - // CTPOP or CTTZ were introduced in P8/P9 respectively - setOperationAction(ISD::BSWAP, MVT::i32 , Expand); + // PowerPC does not have BSWAP, but we can use vector BSWAP instruction + // xxbrd to speed up scalar BSWAP64. CTPOP or CTTZ were introduced in P8/P9 + // respectively + setOperationAction(ISD::BSWAP, MVT::i32, Expand); if (Subtarget.hasP9Vector() && Subtarget.isPPC64()) - setOperationAction(ISD::BSWAP, MVT::i64 , Custom); + setOperationAction(ISD::BSWAP, MVT::i64, Custom); else - setOperationAction(ISD::BSWAP, MVT::i64 , Expand); - if (Subtarget.isISA3_0()) { - setOperationAction(ISD::CTTZ , MVT::i32 , Legal); - setOperationAction(ISD::CTTZ , MVT::i64 , Legal); - } else { - setOperationAction(ISD::CTTZ , MVT::i32 , Expand); - setOperationAction(ISD::CTTZ , MVT::i64 , Expand); - } + setOperationAction(ISD::BSWAP, MVT::i64, Expand); - if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { - setOperationAction(ISD::CTPOP, MVT::i32 , Legal); - setOperationAction(ISD::CTPOP, MVT::i64 , Legal); - } else { - setOperationAction(ISD::CTPOP, MVT::i32 , Expand); - setOperationAction(ISD::CTPOP, MVT::i64 , Expand); - } + if (Subtarget.isISA3_0()) + setOperationAction(ISD::CTTZ, {MVT::i32, MVT::i64}, Legal); + else + setOperationAction(ISD::CTTZ, {MVT::i32, MVT::i64}, Expand); + + if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) + setOperationAction(ISD::CTPOP, {MVT::i32, MVT::i64}, Legal); + else + setOperationAction(ISD::CTPOP, {MVT::i32, MVT::i64}, Expand); // PowerPC does not have ROTR - setOperationAction(ISD::ROTR, MVT::i32 , Expand); - setOperationAction(ISD::ROTR, MVT::i64 , Expand); + setOperationAction(ISD::ROTR, {MVT::i32, MVT::i64}, Expand); - if (!Subtarget.useCRBits()) { + if (!Subtarget.useCRBits()) // PowerPC does not have Select - setOperationAction(ISD::SELECT, MVT::i32, Expand); - setOperationAction(ISD::SELECT, MVT::i64, Expand); - setOperationAction(ISD::SELECT, MVT::f32, Expand); - setOperationAction(ISD::SELECT, MVT::f64, Expand); - } + setOperationAction(ISD::SELECT, {MVT::i32, MVT::i64, MVT::f32, MVT::f64}, + Expand); // PowerPC wants to turn select_cc of FP into fsel when possible. - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); + setOperationAction(ISD::SELECT_CC, {MVT::f32, MVT::f64}, Custom); // PowerPC wants to optimize integer setcc a bit if (!Subtarget.useCRBits()) setOperationAction(ISD::SETCC, MVT::i32, Custom); if (Subtarget.hasFPU()) { - setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FSETCC, {MVT::f32, MVT::f64, MVT::f128}, + Legal); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FSETCCS, {MVT::f32, MVT::f64, MVT::f128}, + Legal); } // PowerPC does not have BRCOND which requires SetCC @@ -509,49 +418,33 @@ if (Subtarget.hasSPE()) { // SPE has built-in conversions - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, + ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + MVT::i32, Legal); // SPE supports signaling compare of f32/f64. - setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FSETCCS, {MVT::f32, MVT::f64}, Legal); } else { // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::FP_TO_SINT}, MVT::i32, + Custom); // PowerPC does not have [U|S]INT_TO_FP - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); + setOperationAction({ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + MVT::i32, Expand); } if (Subtarget.hasDirectMove() && isPPC64) { - setOperationAction(ISD::BITCAST, MVT::f32, Legal); - setOperationAction(ISD::BITCAST, MVT::i32, Legal); - setOperationAction(ISD::BITCAST, MVT::i64, Legal); - setOperationAction(ISD::BITCAST, MVT::f64, Legal); - if (TM.Options.UnsafeFPMath) { - setOperationAction(ISD::LRINT, MVT::f64, Legal); - setOperationAction(ISD::LRINT, MVT::f32, Legal); - setOperationAction(ISD::LLRINT, MVT::f64, Legal); - setOperationAction(ISD::LLRINT, MVT::f32, Legal); - setOperationAction(ISD::LROUND, MVT::f64, Legal); - setOperationAction(ISD::LROUND, MVT::f32, Legal); - setOperationAction(ISD::LLROUND, MVT::f64, Legal); - setOperationAction(ISD::LLROUND, MVT::f32, Legal); - } - } else { - setOperationAction(ISD::BITCAST, MVT::f32, Expand); - setOperationAction(ISD::BITCAST, MVT::i32, Expand); - setOperationAction(ISD::BITCAST, MVT::i64, Expand); - setOperationAction(ISD::BITCAST, MVT::f64, Expand); - } + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32, MVT::i64, MVT::f64}, + Legal); + if (TM.Options.UnsafeFPMath) + setOperationAction({ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND}, + {MVT::f64, MVT::f32}, Legal); + } else + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32, MVT::i64, MVT::f64}, + Expand); // We cannot sextinreg(i1). Expand to shifts. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); @@ -567,23 +460,16 @@ // We want to legalize GlobalAddress and ConstantPool nodes into the // appropriate instructions to materialize the address. - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32, Custom); - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); - setOperationAction(ISD::JumpTable, MVT::i32, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); - setOperationAction(ISD::BlockAddress, MVT::i64, Custom); - setOperationAction(ISD::ConstantPool, MVT::i64, Custom); - setOperationAction(ISD::JumpTable, MVT::i64, Custom); + setOperationAction({ISD::GlobalAddress, ISD::GlobalTLSAddress, + ISD::BlockAddress, ISD::ConstantPool, ISD::JumpTable}, + {MVT::i32, MVT::i64}, Custom); // TRAP is legal. setOperationAction(ISD::TRAP, MVT::Other, Legal); // TRAMPOLINE is custom lowered. - setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); - setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); + setOperationAction({ISD::INIT_TRAMPOLINE, ISD::ADJUST_TRAMPOLINE}, MVT::Other, + Custom); // VASTART needs to be custom lowered to use the VarArgsFrameIndex setOperationAction(ISD::VASTART , MVT::Other, Custom); @@ -599,11 +485,10 @@ setOperationAction(ISD::VAARG, MVT::i32, Promote); AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); setOperationAction(ISD::VAARG, MVT::Other, Expand); - } else if (Subtarget.is32BitELFABI()) { + } else if (Subtarget.is32BitELFABI()) // VAARG is custom lowered with the 32-bit SVR4 ABI. - setOperationAction(ISD::VAARG, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::i64, Custom); - } else + setOperationAction(ISD::VAARG, {MVT::Other, MVT::i64}, Custom); + else setOperationAction(ISD::VAARG, MVT::Other, Expand); // VACOPY is custom lowered with the 32-bit SVR4 ABI. @@ -613,105 +498,72 @@ setOperationAction(ISD::VACOPY , MVT::Other, Expand); // Use the default implementation. - setOperationAction(ISD::VAEND , MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); + setOperationAction({ISD::VAEND, ISD::STACKSAVE}, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); - setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); - setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); - setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); - setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); + setOperationAction({ISD::DYNAMIC_STACKALLOC, ISD::GET_DYNAMIC_AREA_OFFSET, + ISD::EH_DWARF_CFA}, + {MVT::i32, MVT::i64}, Custom); // We want to custom lower some of our intrinsics. - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f64, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::ppcf128, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f64, Custom); + setOperationAction( + ISD::INTRINSIC_WO_CHAIN, + {MVT::Other, MVT::f64, MVT::ppcf128, MVT::v4f32, MVT::v2f64}, Custom); // To handle counter-based loop conditions. setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + setOperationAction(ISD::INTRINSIC_VOID, + {MVT::i8, MVT::i16, MVT::i32, MVT::Other}, Custom); // Comparisons that require checking two conditions. - if (Subtarget.hasSPE()) { - setCondCodeAction(ISD::SETO, MVT::f32, Expand); - setCondCodeAction(ISD::SETO, MVT::f64, Expand); - setCondCodeAction(ISD::SETUO, MVT::f32, Expand); - setCondCodeAction(ISD::SETUO, MVT::f64, Expand); - } - setCondCodeAction(ISD::SETULT, MVT::f32, Expand); - setCondCodeAction(ISD::SETULT, MVT::f64, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); - setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); - setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); - setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); - setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); - setCondCodeAction(ISD::SETONE, MVT::f32, Expand); - setCondCodeAction(ISD::SETONE, MVT::f64, Expand); - - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); + if (Subtarget.hasSPE()) + setCondCodeAction({ISD::SETO, ISD::SETUO}, {MVT::f32, MVT::f64}, Expand); + setCondCodeAction({ISD::SETULT, ISD::SETUGT, ISD::SETUEQ, ISD::SETOGE, + ISD::SETOLE, ISD::SETONE}, + {MVT::f32, MVT::f64}, Expand); + + setOperationAction(ISD::STRICT_FP_EXTEND, {MVT::f32, MVT::f64}, Legal); if (Subtarget.has64BitSupport()) { // They also have instructions for converting between i64 and fp. - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::STRICT_SINT_TO_FP, + ISD::FP_TO_SINT, ISD::SINT_TO_FP}, + MVT::i64, Custom); + setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_UINT, ISD::UINT_TO_FP}, + MVT::i64, Expand); // This is just the low 32 bits of a (signed) fp->i64 conversion. // We cannot do this with Promote because i64 is not a legal type. - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); + setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::FP_TO_UINT}, MVT::i32, + Custom); - if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) { - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); - } + if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i32, + Custom); } else { // PowerPC does not have FP_TO_UINT on 32-bit implementations. - if (Subtarget.hasSPE()) { - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); - } else { - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - } + if (Subtarget.hasSPE()) + setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::FP_TO_UINT}, MVT::i32, + Legal); + else + setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::FP_TO_UINT}, MVT::i32, + Expand); } // With the instructions enabled under FPCVT, we can do everything. if (Subtarget.hasFPCVT()) { - if (Subtarget.has64BitSupport()) { - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); - } + if (Subtarget.has64BitSupport()) + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP}, + MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); + setOperationAction({ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP}, + MVT::i32, Custom); } if (Subtarget.use64BitRegs()) { @@ -720,75 +572,49 @@ // BUILD_PAIR can't be handled natively, and should be expanded to shl/or setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); // 64-bit PowerPC wants to expand i128 shifts itself. - setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); - } else { + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i64, Custom); + } else // 32-bit PowerPC wants to expand i64 shifts itself. - setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); - } + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i32, Custom); // PowerPC has better expansions for funnel shifts than the generic // TargetLowering::expandFunnelShift. - if (Subtarget.has64BitSupport()) { - setOperationAction(ISD::FSHL, MVT::i64, Custom); - setOperationAction(ISD::FSHR, MVT::i64, Custom); - } - setOperationAction(ISD::FSHL, MVT::i32, Custom); - setOperationAction(ISD::FSHR, MVT::i32, Custom); + if (Subtarget.has64BitSupport()) + setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i64, Custom); + setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom); - if (Subtarget.hasVSX()) { - setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); - setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); - setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); - setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); - } + if (Subtarget.hasVSX()) + setOperationAction({ISD::FMAXNUM_IEEE, ISD::FMINNUM_IEEE}, + {MVT::f64, MVT::f32}, Legal); if (Subtarget.hasAltivec()) { - for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { - setOperationAction(ISD::SADDSAT, VT, Legal); - setOperationAction(ISD::SSUBSAT, VT, Legal); - setOperationAction(ISD::UADDSAT, VT, Legal); - setOperationAction(ISD::USUBSAT, VT, Legal); - } + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT, ISD::UADDSAT, ISD::USUBSAT}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Legal); // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. for (MVT VT : MVT::fixedlen_vector_valuetypes()) { // add/sub are legal for all supported vector VT's. - setOperationAction(ISD::ADD, VT, Legal); - setOperationAction(ISD::SUB, VT, Legal); + setOperationAction({ISD::ADD, ISD::SUB}, VT, Legal); // For v2i64, these are only valid with P8Vector. This is corrected after // the loop. - if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - } - else { - setOperationAction(ISD::SMAX, VT, Expand); - setOperationAction(ISD::SMIN, VT, Expand); - setOperationAction(ISD::UMAX, VT, Expand); - setOperationAction(ISD::UMIN, VT, Expand); - } + if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) + setOperationAction({ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, VT, + Legal); + else + setOperationAction({ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, VT, + Expand); - if (Subtarget.hasVSX()) { - setOperationAction(ISD::FMAXNUM, VT, Legal); - setOperationAction(ISD::FMINNUM, VT, Legal); - } + if (Subtarget.hasVSX()) + setOperationAction({ISD::FMAXNUM, ISD::FMINNUM}, VT, Legal); // Vector instructions introduced in P8 - if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { - setOperationAction(ISD::CTPOP, VT, Legal); - setOperationAction(ISD::CTLZ, VT, Legal); - } - else { - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - } + if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) + setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Legal); + else + setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Expand); // Vector instructions introduced in P9 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) @@ -818,58 +644,55 @@ AddPromotedToType (ISD::STORE, VT, MVT::v4i32); // No other operations are legal. - setOperationAction(ISD::MUL , VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::FDIV, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FNEG, VT, Expand); - setOperationAction(ISD::FSQRT, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FABS, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); - setOperationAction(ISD::BUILD_VECTOR, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); + setOperationAction({ISD::MUL, + ISD::SDIV, + ISD::SREM, + ISD::UDIV, + ISD::UREM, + ISD::FDIV, + ISD::FREM, + ISD::FNEG, + ISD::FSQRT, + ISD::FLOG, + ISD::FLOG10, + ISD::FLOG2, + ISD::FEXP, + ISD::FEXP2, + ISD::FSIN, + ISD::FCOS, + ISD::FABS, + ISD::FFLOOR, + ISD::FCEIL, + ISD::FTRUNC, + ISD::FRINT, + ISD::FNEARBYINT, + ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, + ISD::BUILD_VECTOR, + ISD::MULHU, + ISD::MULHS, + ISD::UMUL_LOHI, + ISD::SMUL_LOHI, + ISD::UDIVREM, + ISD::SDIVREM, + ISD::SCALAR_TO_VECTOR, + ISD::FPOW, + ISD::BSWAP, + ISD::SIGN_EXTEND_INREG, + ISD::ROTL, + ISD::ROTR}, + VT, Expand); for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(VT, InnerVT, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, + InnerVT, Expand); } } setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); - if (!Subtarget.hasP8Vector()) { - setOperationAction(ISD::SMAX, MVT::v2i64, Expand); - setOperationAction(ISD::SMIN, MVT::v2i64, Expand); - setOperationAction(ISD::UMAX, MVT::v2i64, Expand); - setOperationAction(ISD::UMIN, MVT::v2i64, Expand); - } + if (!Subtarget.hasP8Vector()) + setOperationAction({ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, + MVT::v2i64, Expand); // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle // with merges, splats, etc. @@ -877,38 +700,28 @@ // Vector truncates to sub-word integer that fit in an Altivec/VSX register // are cheap, so handle them before they get expanded to scalar. - setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); - - setOperationAction(ISD::AND , MVT::v4i32, Legal); - setOperationAction(ISD::OR , MVT::v4i32, Legal); - setOperationAction(ISD::XOR , MVT::v4i32, Legal); - setOperationAction(ISD::LOAD , MVT::v4i32, Legal); + setOperationAction( + ISD::TRUNCATE, + {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16}, Custom); + + setOperationAction({ISD::AND, ISD::OR, ISD::XOR, ISD::LOAD}, MVT::v4i32, + Legal); setOperationAction(ISD::SELECT, MVT::v4i32, Subtarget.useCRBits() ? Legal : Expand); - setOperationAction(ISD::STORE , MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); - setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); + setOperationAction({ISD::STORE, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, + ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP}, + MVT::v4i32, Legal); + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, ISD::FNEARBYINT}, + MVT::v4f32, Legal); // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8. setOperationAction(ISD::ROTL, MVT::v1i128, Custom); // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). if (Subtarget.hasAltivec()) - for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) - setOperationAction(ISD::ROTL, VT, Legal); + setOperationAction(ISD::ROTL, {MVT::v4i32, MVT::v8i16, MVT::v16i8}, + Legal); // With hasP8Altivec set, we can lower ISD::ROTL to vrld. if (Subtarget.hasP8Altivec()) setOperationAction(ISD::ROTL, MVT::v2i64, Legal); @@ -918,12 +731,10 @@ addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); - setOperationAction(ISD::MUL, MVT::v4f32, Legal); - setOperationAction(ISD::FMA, MVT::v4f32, Legal); + setOperationAction({ISD::MUL, ISD::FMA}, MVT::v4f32, Legal); if (Subtarget.hasVSX()) { - setOperationAction(ISD::FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); + setOperationAction({ISD::FDIV, ISD::FSQRT}, MVT::v4f32, Legal); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); } @@ -934,97 +745,61 @@ if (Subtarget.isISA3_1()) { setOperationAction(ISD::MUL, MVT::v2i64, Legal); - setOperationAction(ISD::MULHS, MVT::v2i64, Legal); - setOperationAction(ISD::MULHU, MVT::v2i64, Legal); - setOperationAction(ISD::MULHS, MVT::v4i32, Legal); - setOperationAction(ISD::MULHU, MVT::v4i32, Legal); - setOperationAction(ISD::UDIV, MVT::v2i64, Legal); - setOperationAction(ISD::SDIV, MVT::v2i64, Legal); - setOperationAction(ISD::UDIV, MVT::v4i32, Legal); - setOperationAction(ISD::SDIV, MVT::v4i32, Legal); - setOperationAction(ISD::UREM, MVT::v2i64, Legal); - setOperationAction(ISD::SREM, MVT::v2i64, Legal); - setOperationAction(ISD::UREM, MVT::v4i32, Legal); - setOperationAction(ISD::SREM, MVT::v4i32, Legal); - setOperationAction(ISD::UREM, MVT::v1i128, Legal); - setOperationAction(ISD::SREM, MVT::v1i128, Legal); - setOperationAction(ISD::UDIV, MVT::v1i128, Legal); - setOperationAction(ISD::SDIV, MVT::v1i128, Legal); + setOperationAction({ISD::MULHS, ISD::MULHU}, {MVT::v2i64, MVT::v4i32}, + Legal); + setOperationAction({ISD::UREM, ISD::SREM, ISD::UDIV, ISD::SDIV}, + {MVT::v2i64, MVT::v4i32, MVT::v1i128}, Legal); setOperationAction(ISD::ROTL, MVT::v1i128, Legal); } setOperationAction(ISD::MUL, MVT::v8i16, Legal); setOperationAction(ISD::MUL, MVT::v16i8, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); + setOperationAction(ISD::SCALAR_TO_VECTOR, {MVT::v4f32, MVT::v4i32}, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); + setOperationAction(ISD::BUILD_VECTOR, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}, + Custom); // Altivec does not contain unordered floating-point compare instructions - setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); - setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); - setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); + setCondCodeAction({ISD::SETUO, ISD::SETUEQ, ISD::SETO, ISD::SETONE}, + MVT::v4f32, Expand); if (Subtarget.hasVSX()) { - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); - if (Subtarget.hasP8Vector()) { - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); - } - if (Subtarget.hasDirectMove() && isPPC64) { - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); - } + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::EXTRACT_VECTOR_ELT}, + MVT::v2f64, Legal); + if (Subtarget.hasP8Vector()) + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::EXTRACT_VECTOR_ELT}, + MVT::v4f32, Legal); + if (Subtarget.hasDirectMove() && isPPC64) + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::EXTRACT_VECTOR_ELT}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Legal); setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); // The nearbyint variants are not allowed to raise the inexact exception // so we can only code-gen them with unsafe math. - if (TM.Options.UnsafeFPMath) { - setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); - } + if (TM.Options.UnsafeFPMath) + setOperationAction(ISD::FNEARBYINT, {MVT::f64, MVT::f32}, Legal); - setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); - setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); - setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); - setOperationAction(ISD::FRINT, MVT::v2f64, Legal); - setOperationAction(ISD::FROUND, MVT::v2f64, Legal); - setOperationAction(ISD::FROUND, MVT::f64, Legal); - setOperationAction(ISD::FRINT, MVT::f64, Legal); + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, ISD::FNEARBYINT, + ISD::FRINT, ISD::FROUND}, + MVT::v2f64, Legal); + setOperationAction({ISD::FROUND, ISD::FRINT}, MVT::f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); - setOperationAction(ISD::FRINT, MVT::v4f32, Legal); - setOperationAction(ISD::FROUND, MVT::v4f32, Legal); - setOperationAction(ISD::FROUND, MVT::f32, Legal); - setOperationAction(ISD::FRINT, MVT::f32, Legal); + setOperationAction({ISD::FNEARBYINT, ISD::FRINT, ISD::FROUND}, MVT::v4f32, + Legal); + setOperationAction({ISD::FROUND, ISD::FRINT}, MVT::f32, Legal); - setOperationAction(ISD::MUL, MVT::v2f64, Legal); - setOperationAction(ISD::FMA, MVT::v2f64, Legal); + setOperationAction({ISD::MUL, ISD::FMA}, MVT::v2f64, Legal); - setOperationAction(ISD::FDIV, MVT::v2f64, Legal); - setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); + setOperationAction({ISD::FDIV, ISD::FSQRT}, MVT::v2f64, Legal); // Share the Altivec comparison restrictions. - setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); - setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); - setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); + setCondCodeAction({ISD::SETUO, ISD::SETUEQ, ISD::SETO, ISD::SETONE}, + MVT::v2f64, Expand); - setOperationAction(ISD::LOAD, MVT::v2f64, Legal); - setOperationAction(ISD::STORE, MVT::v2f64, Legal); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v2f64, Legal); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); @@ -1038,30 +813,23 @@ addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); if (Subtarget.hasP8Altivec()) { - setOperationAction(ISD::SHL, MVT::v2i64, Legal); - setOperationAction(ISD::SRA, MVT::v2i64, Legal); - setOperationAction(ISD::SRL, MVT::v2i64, Legal); + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, MVT::v2i64, Legal); // 128 bit shifts can be accomplished via 3 instructions for SHL and // SRL, but not for SRA because of the instructions available: // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth // doing - setOperationAction(ISD::SHL, MVT::v1i128, Expand); - setOperationAction(ISD::SRL, MVT::v1i128, Expand); - setOperationAction(ISD::SRA, MVT::v1i128, Expand); + setOperationAction({ISD::SHL, ISD::SRL, ISD::SRA}, MVT::v1i128, Expand); setOperationAction(ISD::SETCC, MVT::v2i64, Legal); } else { - setOperationAction(ISD::SHL, MVT::v2i64, Expand); - setOperationAction(ISD::SRA, MVT::v2i64, Expand); - setOperationAction(ISD::SRL, MVT::v2i64, Expand); + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, MVT::v2i64, Expand); setOperationAction(ISD::SETCC, MVT::v2i64, Custom); // VSX v2i64 only supports non-arithmetic operations. - setOperationAction(ISD::ADD, MVT::v2i64, Expand); - setOperationAction(ISD::SUB, MVT::v2i64, Expand); + setOperationAction({ISD::ADD, ISD::SUB}, MVT::v2i64, Expand); } if (Subtarget.isISA3_1()) @@ -1076,75 +844,35 @@ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); + setOperationAction({ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, + ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, + ISD::FP_TO_UINT}, + MVT::v2i64, Legal); // Custom handling for partial vectors of integers converted to // floating point. We already have optimal handling for v2i32 through // the DAG combine, so those aren't necessary. - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); - - setOperationAction(ISD::FNEG, MVT::v4f32, Legal); - setOperationAction(ISD::FNEG, MVT::v2f64, Legal); - setOperationAction(ISD::FABS, MVT::v4f32, Legal); - setOperationAction(ISD::FABS, MVT::v2f64, Legal); - setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); - setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); - - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); + setOperationAction({ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::SINT_TO_FP}, + {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v4i16}, + Custom); + + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN}, + {MVT::v4f32, MVT::v2f64}, Legal); + + setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i64, MVT::v2f64}, Custom); // Handle constrained floating-point operations of vector. // The predictor is `hasVSX` because altivec instruction has // no exception but VSX vector instruction has. - setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); - - setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT, + ISD::STRICT_FMAXNUM, ISD::STRICT_FMINNUM, + ISD::STRICT_FRINT, ISD::STRICT_FFLOOR, + ISD::STRICT_FCEIL, ISD::STRICT_FTRUNC, + ISD::STRICT_FROUND}, + {MVT::v4f32, MVT::v2f64}, Legal); addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); addRegisterClass(MVT::f128, &PPC::VRRCRegClass); @@ -1159,11 +887,9 @@ setTruncStoreAction(MVT::f128, MVT::f32, Expand); // No implementation for these ops for PowerPC. - setOperationAction(ISD::FSIN, MVT::f128, Expand); - setOperationAction(ISD::FCOS, MVT::f128, Expand); - setOperationAction(ISD::FPOW, MVT::f128, Expand); - setOperationAction(ISD::FPOWI, MVT::f128, Expand); - setOperationAction(ISD::FREM, MVT::f128, Expand); + setOperationAction( + {ISD::FSIN, ISD::FCOS, ISD::FPOW, ISD::FPOWI, ISD::FREM}, MVT::f128, + Expand); } if (Subtarget.hasP8Altivec()) { @@ -1172,123 +898,92 @@ } if (Subtarget.hasP9Vector()) { - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v4i32, MVT::v4f32}, + Custom); // 128 bit shifts can be accomplished via 3 instructions for SHL and // SRL, but not for SRA because of the instructions available: // VS{RL} and VS{RL}O. - setOperationAction(ISD::SHL, MVT::v1i128, Legal); - setOperationAction(ISD::SRL, MVT::v1i128, Legal); + setOperationAction({ISD::SHL, ISD::SRL}, MVT::v1i128, Legal); setOperationAction(ISD::SRA, MVT::v1i128, Expand); - setOperationAction(ISD::FADD, MVT::f128, Legal); - setOperationAction(ISD::FSUB, MVT::f128, Legal); - setOperationAction(ISD::FDIV, MVT::f128, Legal); - setOperationAction(ISD::FMUL, MVT::f128, Legal); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); - - setOperationAction(ISD::FMA, MVT::f128, Legal); - setCondCodeAction(ISD::SETULT, MVT::f128, Expand); - setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); - setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); - setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); - setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); - setCondCodeAction(ISD::SETONE, MVT::f128, Expand); - - setOperationAction(ISD::FTRUNC, MVT::f128, Legal); - setOperationAction(ISD::FRINT, MVT::f128, Legal); - setOperationAction(ISD::FFLOOR, MVT::f128, Legal); - setOperationAction(ISD::FCEIL, MVT::f128, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); - setOperationAction(ISD::FROUND, MVT::f128, Legal); - - setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); - setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FDIV, ISD::FMUL, + ISD::FP_EXTEND, ISD::FMA}, + MVT::f128, Legal); + + setCondCodeAction({ISD::SETULT, ISD::SETUGT, ISD::SETUEQ, ISD::SETOGE, + ISD::SETOLE, ISD::SETONE}, + MVT::f128, Expand); + + setOperationAction({ISD::FTRUNC, ISD::FRINT, ISD::FFLOOR, ISD::FCEIL, + ISD::FNEARBYINT, ISD::FROUND}, + MVT::f128, Legal); + + setOperationAction(ISD::FP_ROUND, {MVT::f64, MVT::f32}, Legal); setOperationAction(ISD::BITCAST, MVT::i128, Custom); // Handle constrained floating-point operations of fp128 - setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT, + ISD::STRICT_FP_EXTEND, ISD::STRICT_FRINT, + ISD::STRICT_FNEARBYINT, ISD::STRICT_FFLOOR, + ISD::STRICT_FCEIL, ISD::STRICT_FTRUNC, + ISD::STRICT_FROUND}, + MVT::f128, Legal); + + setOperationAction(ISD::STRICT_FP_ROUND, {MVT::f64, MVT::f32}, Legal); setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); - setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); - setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); - setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); - setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); + setOperationAction( + ISD::BSWAP, {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v1i128}, Legal); } else if (Subtarget.hasVSX()) { - setOperationAction(ISD::LOAD, MVT::f128, Promote); - setOperationAction(ISD::STORE, MVT::f128, Promote); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::f128, Promote); AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32); AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32); // Set FADD/FSUB as libcall to avoid the legalizer to expand the // fp_to_uint and int_to_fp. - setOperationAction(ISD::FADD, MVT::f128, LibCall); - setOperationAction(ISD::FSUB, MVT::f128, LibCall); + setOperationAction({ISD::FADD, ISD::FSUB}, MVT::f128, LibCall); - setOperationAction(ISD::FMUL, MVT::f128, Expand); - setOperationAction(ISD::FDIV, MVT::f128, Expand); - setOperationAction(ISD::FNEG, MVT::f128, Expand); - setOperationAction(ISD::FABS, MVT::f128, Expand); - setOperationAction(ISD::FSQRT, MVT::f128, Expand); - setOperationAction(ISD::FMA, MVT::f128, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); + setOperationAction({ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, + ISD::FSQRT, ISD::FMA, ISD::FCOPYSIGN}, + MVT::f128, Expand); // Expand the fp_extend if the target type is fp128. - setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f128, + Expand); // Expand the fp_round if the source type is fp128. - for (MVT VT : {MVT::f32, MVT::f64}) { - setOperationAction(ISD::FP_ROUND, VT, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); - } - - setOperationAction(ISD::SETCC, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); - setOperationAction(ISD::BR_CC, MVT::f128, Expand); - - // Lower following f128 select_cc pattern: - // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE - setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); - - // We need to handle f128 SELECT_CC with integer result type. - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, + {MVT::f32, MVT::f64}, Custom); + + setOperationAction( + {ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, MVT::f128, + Custom); + setOperationAction(ISD::BR_CC, MVT::f128, Expand); + + // Lower following f128 select_cc pattern: + // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, + // NE + setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); + + // We need to handle f128 SELECT_CC with integer result type. + setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); + setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand); } if (Subtarget.hasP9Altivec()) { - if (Subtarget.isISA3_1()) { - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Legal); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Legal); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal); - } else { - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); - } - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); + if (Subtarget.isISA3_1()) + setOperationAction(ISD::INSERT_VECTOR_ELT, + {MVT::v2i64, MVT::v8i16, MVT::v16i8, MVT::v4i32}, + Legal); + else + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v8i16, MVT::v16i8}, + Custom); + setOperationAction(ISD::SIGN_EXTEND_INREG, + {MVT::v4i8, MVT::v4i16, MVT::v4i32, MVT::v2i8, + MVT::v2i16, MVT::v2i32, MVT::v2i64}, + Legal); } if (Subtarget.hasP10Vector()) { @@ -1298,14 +993,12 @@ if (Subtarget.pairedVectorMemops()) { addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass); - setOperationAction(ISD::LOAD, MVT::v256i1, Custom); - setOperationAction(ISD::STORE, MVT::v256i1, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v256i1, Custom); } if (Subtarget.hasMMA()) { addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass); - setOperationAction(ISD::LOAD, MVT::v512i1, Custom); - setOperationAction(ISD::STORE, MVT::v512i1, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::BUILD_VECTOR}, MVT::v512i1, + Custom); } if (Subtarget.has64BitSupport()) @@ -1316,16 +1009,13 @@ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); - if (!isPPC64) { - setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); - } + if (!isPPC64) + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i64, Expand); - if (shouldInlineQuadwordAtomics()) { - setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::i128, Custom); - } + if (shouldInlineQuadwordAtomics()) + setOperationAction( + {ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE, ISD::INTRINSIC_VOID}, MVT::i128, + Custom); setBooleanContents(ZeroOrOneBooleanContent); @@ -1334,14 +1024,11 @@ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); } - setLibcallName(RTLIB::MULO_I128, nullptr); + setLibcallName(RTLIB::MULO_I128); if (!isPPC64) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); - setLibcallName(RTLIB::MULO_I64, nullptr); + setLibcallName({RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, + RTLIB::MUL_I128, RTLIB::MULO_I64}); } if (!isPPC64) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -170,8 +170,8 @@ setStackPointerRegisterToSaveRestore(RISCV::X2); - for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) - setLoadExtAction(N, XLenVT, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT, + MVT::i1, Promote); // TODO: add all necessary setOperationAction calls. setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); @@ -181,100 +181,61 @@ setOperationAction(ISD::BRCOND, MVT::Other, Custom); setOperationAction(ISD::SELECT_CC, XLenVT, Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (!Subtarget.hasStdExtZbb()) { - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - } - - if (Subtarget.is64Bit()) { - setOperationAction(ISD::ADD, MVT::i32, Custom); - setOperationAction(ISD::SUB, MVT::i32, Custom); - setOperationAction(ISD::SHL, MVT::i32, Custom); - setOperationAction(ISD::SRA, MVT::i32, Custom); - setOperationAction(ISD::SRL, MVT::i32, Custom); - - setOperationAction(ISD::UADDO, MVT::i32, Custom); - setOperationAction(ISD::USUBO, MVT::i32, Custom); - setOperationAction(ISD::UADDSAT, MVT::i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::i32, Custom); - } else { - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); - setLibcallName(RTLIB::MULO_I64, nullptr); - } - - if (!Subtarget.hasStdExtM()) { - setOperationAction(ISD::MUL, XLenVT, Expand); - setOperationAction(ISD::MULHS, XLenVT, Expand); - setOperationAction(ISD::MULHU, XLenVT, Expand); - setOperationAction(ISD::SDIV, XLenVT, Expand); - setOperationAction(ISD::UDIV, XLenVT, Expand); - setOperationAction(ISD::SREM, XLenVT, Expand); - setOperationAction(ISD::UREM, XLenVT, Expand); - } else { + if (!Subtarget.hasStdExtZbb()) + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand); + + if (Subtarget.is64Bit()) + setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL, + ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT}, + MVT::i32, Custom); + else + setLibcallName({RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, + RTLIB::MUL_I128, RTLIB::MULO_I64}); + + if (!Subtarget.hasStdExtM()) + setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV, + ISD::SREM, ISD::UREM}, + XLenVT, Expand); + else { if (Subtarget.is64Bit()) { - setOperationAction(ISD::MUL, MVT::i32, Custom); - setOperationAction(ISD::MUL, MVT::i128, Custom); - - setOperationAction(ISD::SDIV, MVT::i8, Custom); - setOperationAction(ISD::UDIV, MVT::i8, Custom); - setOperationAction(ISD::UREM, MVT::i8, Custom); - setOperationAction(ISD::SDIV, MVT::i16, Custom); - setOperationAction(ISD::UDIV, MVT::i16, Custom); - setOperationAction(ISD::UREM, MVT::i16, Custom); - setOperationAction(ISD::SDIV, MVT::i32, Custom); - setOperationAction(ISD::UDIV, MVT::i32, Custom); - setOperationAction(ISD::UREM, MVT::i32, Custom); - } else { + setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom); + + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); + } else setOperationAction(ISD::MUL, MVT::i64, Custom); - } } - setOperationAction(ISD::SDIVREM, XLenVT, Expand); - setOperationAction(ISD::UDIVREM, XLenVT, Expand); - setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); - setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); + setOperationAction( + {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT, + Expand); - setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); - setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); - setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT, + Custom); if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || Subtarget.hasStdExtZbkb()) { - if (Subtarget.is64Bit()) { - setOperationAction(ISD::ROTL, MVT::i32, Custom); - setOperationAction(ISD::ROTR, MVT::i32, Custom); - } - } else { - setOperationAction(ISD::ROTL, XLenVT, Expand); - setOperationAction(ISD::ROTR, XLenVT, Expand); - } + if (Subtarget.is64Bit()) + setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom); + } else + setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand); if (Subtarget.hasStdExtZbp()) { // Custom lower bswap/bitreverse so we can convert them to GREVI to enable // more combining. - setOperationAction(ISD::BITREVERSE, XLenVT, Custom); - setOperationAction(ISD::BSWAP, XLenVT, Custom); - setOperationAction(ISD::BITREVERSE, MVT::i8, Custom); + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom); + setOperationAction(ISD::BITREVERSE, MVT::i8, Custom); // BSWAP i8 doesn't exist. - setOperationAction(ISD::BITREVERSE, MVT::i16, Custom); - setOperationAction(ISD::BSWAP, MVT::i16, Custom); + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom); - if (Subtarget.is64Bit()) { - setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); - setOperationAction(ISD::BSWAP, MVT::i32, Custom); - } + if (Subtarget.is64Bit()) + setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom); } else { // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll // pattern match it directly in isel. @@ -288,38 +249,28 @@ } if (Subtarget.hasStdExtZbb()) { - setOperationAction(ISD::SMIN, XLenVT, Legal); - setOperationAction(ISD::SMAX, XLenVT, Legal); - setOperationAction(ISD::UMIN, XLenVT, Legal); - setOperationAction(ISD::UMAX, XLenVT, Legal); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT, + Legal); - if (Subtarget.is64Bit()) { - setOperationAction(ISD::CTTZ, MVT::i32, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); - setOperationAction(ISD::CTLZ, MVT::i32, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); - } + if (Subtarget.is64Bit()) + setOperationAction( + {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, + MVT::i32, Custom); } else { - setOperationAction(ISD::CTTZ, XLenVT, Expand); - setOperationAction(ISD::CTLZ, XLenVT, Expand); - setOperationAction(ISD::CTPOP, XLenVT, Expand); + setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::ABS, MVT::i32, Custom); } if (Subtarget.hasStdExtZbt()) { - setOperationAction(ISD::FSHL, XLenVT, Custom); - setOperationAction(ISD::FSHR, XLenVT, Custom); + setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom); setOperationAction(ISD::SELECT, XLenVT, Legal); - if (Subtarget.is64Bit()) { - setOperationAction(ISD::FSHL, MVT::i32, Custom); - setOperationAction(ISD::FSHR, MVT::i32, Custom); - } - } else { + if (Subtarget.is64Bit()) + setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom); + } else setOperationAction(ISD::SELECT, XLenVT, Custom); - } static constexpr ISD::NodeType FPLegalNodeTypes[] = { ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT, @@ -352,24 +303,12 @@ setOperationAction(ISD::SELECT, MVT::f16, Custom); setOperationAction(ISD::BR_CC, MVT::f16, Expand); - setOperationAction(ISD::FREM, MVT::f16, Promote); - setOperationAction(ISD::FCEIL, MVT::f16, Promote); - setOperationAction(ISD::FFLOOR, MVT::f16, Promote); - setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); - setOperationAction(ISD::FRINT, MVT::f16, Promote); - setOperationAction(ISD::FROUND, MVT::f16, Promote); - setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote); - setOperationAction(ISD::FTRUNC, MVT::f16, Promote); - setOperationAction(ISD::FPOW, MVT::f16, Promote); - setOperationAction(ISD::FPOWI, MVT::f16, Promote); - setOperationAction(ISD::FCOS, MVT::f16, Promote); - setOperationAction(ISD::FSIN, MVT::f16, Promote); - setOperationAction(ISD::FSINCOS, MVT::f16, Promote); - setOperationAction(ISD::FEXP, MVT::f16, Promote); - setOperationAction(ISD::FEXP2, MVT::f16, Promote); - setOperationAction(ISD::FLOG, MVT::f16, Promote); - setOperationAction(ISD::FLOG2, MVT::f16, Promote); - setOperationAction(ISD::FLOG10, MVT::f16, Promote); + setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, + ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC, + ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN, + ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG, + ISD::FLOG2, ISD::FLOG10}, + MVT::f16, Promote); // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have // complete support for all operations in LegalizeDAG. @@ -414,30 +353,26 @@ setTruncStoreAction(MVT::f64, MVT::f16, Expand); } - if (Subtarget.is64Bit()) { - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); - } + if (Subtarget.is64Bit()) + setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT}, + MVT::i32, Custom); if (Subtarget.hasStdExtF()) { - setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom); + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT, + Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal); + setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP}, + XLenVT, Legal); setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom); setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); } - setOperationAction(ISD::GlobalAddress, XLenVT, Custom); - setOperationAction(ISD::BlockAddress, XLenVT, Custom); - setOperationAction(ISD::ConstantPool, XLenVT, Custom); - setOperationAction(ISD::JumpTable, XLenVT, Custom); + setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool, + ISD::JumpTable}, + XLenVT, Custom); setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); @@ -446,8 +381,7 @@ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Subtarget.is64Bit() ? Legal : Custom); - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); @@ -468,19 +402,16 @@ // RVV intrinsics may have illegal operands. // We also need to custom legalize vmv.x.s. - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); - if (Subtarget.is64Bit()) { + setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, + {MVT::i8, MVT::i16}, Custom); + if (Subtarget.is64Bit()) setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); - } else { - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); - } + else + setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, + MVT::i64, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::Other, Custom); static const unsigned IntegerVPOps[] = { ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL, @@ -504,81 +435,55 @@ ISD::VP_SITOFP, ISD::VP_UITOFP, ISD::VP_SETCC}; - if (!Subtarget.is64Bit()) { + if (!Subtarget.is64Bit()) // We must custom-lower certain vXi64 operations on RV32 due to the vector // element type being illegal. - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); - - setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); - - setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom); - setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom); - } + setOperationAction( + {ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, ISD::VECREDUCE_ADD, + ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR, + ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX, + ISD::VECREDUCE_UMIN, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, + ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, + ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN}, + MVT::i64, Custom); for (MVT VT : BoolVecVTs) { setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); // Mask VTs are custom-expanded into a series of standard nodes - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS, + ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::VP_MERGE, VT, Expand); - setOperationAction(ISD::VP_SELECT, VT, Expand); - - setOperationAction(ISD::VP_AND, VT, Custom); - setOperationAction(ISD::VP_OR, VT, Custom); - setOperationAction(ISD::VP_XOR, VT, Custom); + setOperationAction( + {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT, + Expand); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - - setOperationAction(ISD::VP_REDUCE_AND, VT, Custom); - setOperationAction(ISD::VP_REDUCE_OR, VT, Custom); - setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom); + setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, + ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, ISD::VP_REDUCE_AND, + ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, + VT, Custom); // RVV has native int->float & float->int conversions where the // element type sizes are within one power-of-two of each other. Any // wider distances between type sizes have to be lowered as sequences // which progressively narrow the gap in stages. - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + VT, Custom); // Expand all extending loads to types larger than this, and truncating // stores from types larger than this. for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { setTruncStoreAction(OtherVT, VT, Expand); - setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT, + VT, Expand); } - setOperationAction(ISD::VP_FPTOSI, VT, Custom); - setOperationAction(ISD::VP_FPTOUI, VT, Custom); + setOperationAction({ISD::VP_FPTOSI, ISD::VP_FPTOUI}, VT, Custom); } for (MVT VT : IntVecVTs) { @@ -590,98 +495,68 @@ setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); // Vectors implement MULHS/MULHU. - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand); // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*. - if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) { - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - } + if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) + setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT, + Legal); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - - setOperationAction(ISD::BSWAP, VT, Expand); + setOperationAction( + {ISD::ROTL, ISD::ROTR, ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, + VT, Expand); // Custom-lower extensions and truncations from/to mask types. - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, + VT, Custom); // RVV has native int->float & float->int conversions where the // element type sizes are within one power-of-two of each other. Any // wider distances between type sizes have to be lowered as sequences // which progressively narrow the gap in stages. - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + VT, Custom); - setOperationAction(ISD::SADDSAT, VT, Legal); - setOperationAction(ISD::UADDSAT, VT, Legal); - setOperationAction(ISD::SSUBSAT, VT, Legal); - setOperationAction(ISD::USUBSAT, VT, Legal); + setOperationAction( + {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal); // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" // nodes which truncate by one power of two at a time. setOperationAction(ISD::TRUNCATE, VT, Custom); // Custom-lower insert/extract operations to simplify patterns. - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, + Custom); // Custom-lower reduction operations to set up the corresponding custom // nodes' operands. - setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); + setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR, + ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN}, + VT, Custom); for (unsigned VPOpc : IntegerVPOps) setOperationAction(VPOpc, VT, Custom); - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - - setOperationAction(ISD::VP_LOAD, VT, Custom); - setOperationAction(ISD::VP_STORE, VT, Custom); - setOperationAction(ISD::VP_GATHER, VT, Custom); - setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE, + ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD, + ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER, + ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, + ISD::EXTRACT_SUBVECTOR, ISD::SELECT}, + VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - - setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::STEP_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); + setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom); for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { setTruncStoreAction(VT, OtherVT, Expand); - setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT, + VT, Expand); } // Splice @@ -694,8 +569,8 @@ VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32; EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount()); if (isTypeLegal(FloatVT)) { - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom); + setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, + Custom); } } } @@ -720,52 +595,35 @@ // sizes are within one power-of-two of each other. Therefore conversions // between vXf16 and vXf64 must be lowered as sequences which convert via // vXf32. - setOperationAction(ISD::FP_ROUND, VT, Custom); - setOperationAction(ISD::FP_EXTEND, VT, Custom); + setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); // Custom-lower insert/extract operations to simplify patterns. - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, + Custom); // Expand various condition codes (explained above). for (auto CC : VFPCCToExpand) setCondCodeAction(CC, VT, Expand); - setOperationAction(ISD::FMINNUM, VT, Legal); - setOperationAction(ISD::FMAXNUM, VT, Legal); - - setOperationAction(ISD::FTRUNC, VT, Custom); - setOperationAction(ISD::FCEIL, VT, Custom); - setOperationAction(ISD::FFLOOR, VT, Custom); - setOperationAction(ISD::FROUND, VT, Custom); + setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal); - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, + ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, + ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX}, + VT, Custom); setOperationAction(ISD::FCOPYSIGN, VT, Legal); - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - - setOperationAction(ISD::VP_LOAD, VT, Custom); - setOperationAction(ISD::VP_STORE, VT, Custom); - setOperationAction(ISD::VP_GATHER, VT, Custom); - setOperationAction(ISD::VP_SCATTER, VT, Custom); + setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE, + ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD, + ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER, + ISD::SELECT}, + VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - - setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); - setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); + setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, + ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_REVERSE, + ISD::VECTOR_SPLICE}, + VT, Custom); for (unsigned VPOpc : FloatingPointVPOps) setOperationAction(VPOpc, VT, Custom); @@ -808,57 +666,41 @@ setOperationAction(Op, VT, Expand); for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) { setTruncStoreAction(VT, OtherVT, Expand); - setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, + OtherVT, VT, Expand); } // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - - setOperationAction(ISD::SETCC, VT, Custom); - - setOperationAction(ISD::SELECT, VT, Custom); - - setOperationAction(ISD::TRUNCATE, VT, Custom); - - setOperationAction(ISD::BITCAST, VT, Custom); - - setOperationAction(ISD::VECREDUCE_AND, VT, Custom); - setOperationAction(ISD::VECREDUCE_OR, VT, Custom); - setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); - - setOperationAction(ISD::VP_REDUCE_AND, VT, Custom); - setOperationAction(ISD::VP_REDUCE_OR, VT, Custom); - setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom); - - setOperationAction(ISD::SINT_TO_FP, VT, Custom); - setOperationAction(ISD::UINT_TO_FP, VT, Custom); - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); + setOperationAction({ISD::INSERT_SUBVECTOR, + ISD::EXTRACT_SUBVECTOR, + ISD::BUILD_VECTOR, + ISD::CONCAT_VECTORS, + ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_VECTOR_ELT, + ISD::LOAD, + ISD::STORE, + ISD::SETCC, + ISD::SELECT, + ISD::TRUNCATE, + ISD::BITCAST, + ISD::VECREDUCE_AND, + ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, + ISD::VP_REDUCE_AND, + ISD::VP_REDUCE_OR, + ISD::VP_REDUCE_XOR, + ISD::SINT_TO_FP, + ISD::UINT_TO_FP, + ISD::FP_TO_SINT, + ISD::FP_TO_UINT}, + VT, Custom); // Operations below are different for between masks and other vectors. if (VT.getVectorElementType() == MVT::i1) { - setOperationAction(ISD::VP_AND, VT, Custom); - setOperationAction(ISD::VP_OR, VT, Custom); - setOperationAction(ISD::VP_XOR, VT, Custom); - setOperationAction(ISD::AND, VT, Custom); - setOperationAction(ISD::OR, VT, Custom); - setOperationAction(ISD::XOR, VT, Custom); - - setOperationAction(ISD::VP_FPTOSI, VT, Custom); - setOperationAction(ISD::VP_FPTOUI, VT, Custom); - setOperationAction(ISD::VP_SETCC, VT, Custom); + setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND, + ISD::OR, ISD::XOR, ISD::VP_FPTOSI, ISD::VP_FPTOUI, + ISD::VP_SETCC}, + VT, Custom); continue; } @@ -875,61 +717,35 @@ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - - setOperationAction(ISD::VP_LOAD, VT, Custom); - setOperationAction(ISD::VP_STORE, VT, Custom); - setOperationAction(ISD::VP_GATHER, VT, Custom); - setOperationAction(ISD::VP_SCATTER, VT, Custom); - - setOperationAction(ISD::ADD, VT, Custom); - setOperationAction(ISD::MUL, VT, Custom); - setOperationAction(ISD::SUB, VT, Custom); - setOperationAction(ISD::AND, VT, Custom); - setOperationAction(ISD::OR, VT, Custom); - setOperationAction(ISD::XOR, VT, Custom); - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::SREM, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - setOperationAction(ISD::UREM, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); - - setOperationAction(ISD::SMIN, VT, Custom); - setOperationAction(ISD::SMAX, VT, Custom); - setOperationAction(ISD::UMIN, VT, Custom); - setOperationAction(ISD::UMAX, VT, Custom); - setOperationAction(ISD::ABS, VT, Custom); + setOperationAction( + {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER, + ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER, + ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, + ISD::OR, ISD::XOR, ISD::SDIV, ISD::SREM, + ISD::UDIV, ISD::UREM, ISD::SHL, ISD::SRA, + ISD::SRL, ISD::SMIN, ISD::SMAX, ISD::UMIN, + ISD::UMAX, ISD::ABS}, + VT, Custom); // vXi64 MULHS/MULHU requires the V extension instead of Zve64*. - if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) { - setOperationAction(ISD::MULHS, VT, Custom); - setOperationAction(ISD::MULHU, VT, Custom); - } + if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) + setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom); - setOperationAction(ISD::SADDSAT, VT, Custom); - setOperationAction(ISD::UADDSAT, VT, Custom); - setOperationAction(ISD::SSUBSAT, VT, Custom); - setOperationAction(ISD::USUBSAT, VT, Custom); + setOperationAction({ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, + ISD::USUBSAT, ISD::VSELECT}, + VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + setOperationAction( + {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom); // Custom-lower reduction operations to set up the corresponding custom // nodes' operands. - setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); + setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX, + ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX, + ISD::VECREDUCE_UMIN}, + VT, Custom); for (unsigned VPOpc : IntegerVPOps) setOperationAction(VPOpc, VT, Custom); @@ -941,10 +757,9 @@ VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32; EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount()); - if (isTypeLegal(FloatVT)) { - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom); - } + if (isTypeLegal(FloatVT)) + setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, + Custom); } } @@ -961,70 +776,60 @@ } // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); - - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - - setOperationAction(ISD::LOAD, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - - setOperationAction(ISD::VP_LOAD, VT, Custom); - setOperationAction(ISD::VP_STORE, VT, Custom); - setOperationAction(ISD::VP_GATHER, VT, Custom); - setOperationAction(ISD::VP_SCATTER, VT, Custom); - - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FSUB, VT, Custom); - setOperationAction(ISD::FMUL, VT, Custom); - setOperationAction(ISD::FDIV, VT, Custom); - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); - setOperationAction(ISD::FSQRT, VT, Custom); - setOperationAction(ISD::FMA, VT, Custom); - setOperationAction(ISD::FMINNUM, VT, Custom); - setOperationAction(ISD::FMAXNUM, VT, Custom); - - setOperationAction(ISD::FP_ROUND, VT, Custom); - setOperationAction(ISD::FP_EXTEND, VT, Custom); - - setOperationAction(ISD::FTRUNC, VT, Custom); - setOperationAction(ISD::FCEIL, VT, Custom); - setOperationAction(ISD::FFLOOR, VT, Custom); - setOperationAction(ISD::FROUND, VT, Custom); + setOperationAction({ISD::INSERT_SUBVECTOR, + ISD::EXTRACT_SUBVECTOR, + ISD::BUILD_VECTOR, + ISD::CONCAT_VECTORS, + ISD::VECTOR_SHUFFLE, + ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_VECTOR_ELT, + ISD::LOAD, + ISD::STORE, + ISD::MLOAD, + ISD::MSTORE, + ISD::MGATHER, + ISD::MSCATTER, + ISD::VP_LOAD, + ISD::VP_STORE, + ISD::VP_GATHER, + ISD::VP_SCATTER, + ISD::FADD, + ISD::FSUB, + ISD::FMUL, + ISD::FDIV, + ISD::FNEG, + ISD::FABS, + ISD::FCOPYSIGN, + ISD::FSQRT, + ISD::FMA, + ISD::FMINNUM, + ISD::FMAXNUM, + ISD::FP_ROUND, + ISD::FP_EXTEND, + ISD::FTRUNC, + ISD::FCEIL, + ISD::FFLOOR, + ISD::FROUND}, + VT, Custom); for (auto CC : VFPCCToExpand) setCondCodeAction(CC, VT, Expand); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); + setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); - setOperationAction(ISD::BITCAST, VT, Custom); - - setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + setOperationAction({ISD::BITCAST, ISD::VECREDUCE_FADD, + ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_FMIN, + ISD::VECREDUCE_FMAX}, + VT, Custom); for (unsigned VPOpc : FloatingPointVPOps) setOperationAction(VPOpc, VT, Custom); } // Custom-legalize bitcasts from fixed-length vectors to scalar types. - setOperationAction(ISD::BITCAST, MVT::i8, Custom); - setOperationAction(ISD::BITCAST, MVT::i16, Custom); - setOperationAction(ISD::BITCAST, MVT::i32, Custom); - setOperationAction(ISD::BITCAST, MVT::i64, Custom); + setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, + Custom); if (Subtarget.hasStdExtZfh()) setOperationAction(ISD::BITCAST, MVT::f16, Custom); if (Subtarget.hasStdExtF()) diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1485,26 +1485,21 @@ } // Truncating/extending stores/loads are also not supported. for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand); - - setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand); - setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, + MVT::v2i32, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i32, + VT, Expand); setTruncStoreAction(VT, MVT::v2i32, Expand); setTruncStoreAction(MVT::v2i32, VT, Expand); } // However, load and store *are* legal. - setOperationAction(ISD::LOAD, MVT::v2i32, Legal); - setOperationAction(ISD::STORE, MVT::v2i32, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal); - setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal); + setOperationAction( + {ISD::LOAD, ISD::STORE, ISD::EXTRACT_VECTOR_ELT, ISD::BUILD_VECTOR}, + MVT::v2i32, Legal); // And we need to promote i64 loads/stores into vector load/store - setOperationAction(ISD::LOAD, MVT::i64, Custom); - setOperationAction(ISD::STORE, MVT::i64, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i64, Custom); // Sadly, this doesn't work: // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); @@ -1512,11 +1507,9 @@ } // Turn FP extload into load/fpextend - for (MVT VT : MVT::fp_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); - } + for (MVT VT : MVT::fp_valuetypes()) + for (auto MemVT : {MVT::f16, MVT::f32, MVT::f64}) + setLoadExtAction(ISD::EXTLOAD, VT, MemVT, Expand); // Sparc doesn't have i1 sign extending load for (MVT VT : MVT::integer_valuetypes()) @@ -1531,102 +1524,61 @@ setTruncStoreAction(MVT::f128, MVT::f64, Expand); // Custom legalize GlobalAddress nodes into LO/HI parts. - setOperationAction(ISD::GlobalAddress, PtrVT, Custom); - setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); - setOperationAction(ISD::ConstantPool, PtrVT, Custom); - setOperationAction(ISD::BlockAddress, PtrVT, Custom); + setOperationAction({ISD::GlobalAddress, ISD::GlobalTLSAddress, + ISD::ConstantPool, ISD::BlockAddress}, + PtrVT, Custom); // Sparc doesn't have sext_inreg, replace them with shl/sra - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i16, MVT::i8, MVT::i1}, + Expand); // Sparc has no REM or DIVREM operations. - setOperationAction(ISD::UREM, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::SDIVREM, MVT::i32, Expand); - setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + setOperationAction({ISD::UREM, ISD::SREM, ISD::SDIVREM, ISD::UDIVREM}, + MVT::i32, Expand); // ... nor does SparcV9. - if (Subtarget->is64Bit()) { - setOperationAction(ISD::UREM, MVT::i64, Expand); - setOperationAction(ISD::SREM, MVT::i64, Expand); - setOperationAction(ISD::SDIVREM, MVT::i64, Expand); - setOperationAction(ISD::UDIVREM, MVT::i64, Expand); - } + if (Subtarget->is64Bit()) + setOperationAction({ISD::UREM, ISD::SREM, ISD::SDIVREM, ISD::UDIVREM}, + MVT::i64, Expand); // Custom expand fp<->sint - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::SINT_TO_FP}, {MVT::i32, MVT::i64}, + Custom); // Custom Expand fp<->uint - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_UINT, ISD::UINT_TO_FP}, {MVT::i32, MVT::i64}, + Custom); // Lower f16 conversion operations into library calls - setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); - setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand); - setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand); + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, + {MVT::f32, MVT::f64, MVT::f128}, Expand); - setOperationAction(ISD::BITCAST, MVT::f32, Expand); - setOperationAction(ISD::BITCAST, MVT::i32, Expand); + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32}, Expand); // Sparc has no select or setcc: expand to SELECT_CC. - setOperationAction(ISD::SELECT, MVT::i32, Expand); - setOperationAction(ISD::SELECT, MVT::f32, Expand); - setOperationAction(ISD::SELECT, MVT::f64, Expand); - setOperationAction(ISD::SELECT, MVT::f128, Expand); - - setOperationAction(ISD::SETCC, MVT::i32, Expand); - setOperationAction(ISD::SETCC, MVT::f32, Expand); - setOperationAction(ISD::SETCC, MVT::f64, Expand); - setOperationAction(ISD::SETCC, MVT::f128, Expand); + setOperationAction({ISD::SELECT, ISD::SETCC}, + {MVT::i32, MVT::f32, MVT::f64, MVT::f128}, Expand); // Sparc doesn't have BRCOND either, it has BR_CC. - setOperationAction(ISD::BRCOND, MVT::Other, Expand); - setOperationAction(ISD::BRIND, MVT::Other, Expand); - setOperationAction(ISD::BR_JT, MVT::Other, Expand); - setOperationAction(ISD::BR_CC, MVT::i32, Custom); - setOperationAction(ISD::BR_CC, MVT::f32, Custom); - setOperationAction(ISD::BR_CC, MVT::f64, Custom); - setOperationAction(ISD::BR_CC, MVT::f128, Custom); - - setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); - - setOperationAction(ISD::ADDC, MVT::i32, Custom); - setOperationAction(ISD::ADDE, MVT::i32, Custom); - setOperationAction(ISD::SUBC, MVT::i32, Custom); - setOperationAction(ISD::SUBE, MVT::i32, Custom); + setOperationAction({ISD::BRCOND, ISD::BRIND, ISD::BR_JT}, MVT::Other, Expand); + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, + {MVT::i32, MVT::f32, MVT::f64, MVT::f128}, Custom); + + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, MVT::i32, + Custom); if (Subtarget->is64Bit()) { - setOperationAction(ISD::ADDC, MVT::i64, Custom); - setOperationAction(ISD::ADDE, MVT::i64, Custom); - setOperationAction(ISD::SUBC, MVT::i64, Custom); - setOperationAction(ISD::SUBE, MVT::i64, Custom); + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE, ISD::BR_CC, + ISD::SELECT_CC}, + MVT::i64, Custom); setOperationAction(ISD::BITCAST, MVT::f64, Expand); - setOperationAction(ISD::BITCAST, MVT::i64, Expand); - setOperationAction(ISD::SELECT, MVT::i64, Expand); - setOperationAction(ISD::SETCC, MVT::i64, Expand); - setOperationAction(ISD::BR_CC, MVT::i64, Custom); - setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); + setOperationAction({ISD::BITCAST, ISD::SELECT, ISD::SETCC}, MVT::i64, + Expand); setOperationAction(ISD::CTPOP, MVT::i64, Subtarget->usePopc() ? Legal : Expand); - setOperationAction(ISD::CTTZ , MVT::i64, Expand); - setOperationAction(ISD::CTLZ , MVT::i64, Expand); - setOperationAction(ISD::BSWAP, MVT::i64, Expand); - setOperationAction(ISD::ROTL , MVT::i64, Expand); - setOperationAction(ISD::ROTR , MVT::i64, Expand); + setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::BSWAP, ISD::ROTL, ISD::ROTR}, + MVT::i64, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); } @@ -1648,72 +1600,45 @@ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); // Custom Lower Atomic LOAD/STORE - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Custom); if (Subtarget->is64Bit()) { - setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); + setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP}, MVT::i64, + Legal); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i64, Custom); } if (!Subtarget->is64Bit()) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::MULO_I64, nullptr); - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); + setLibcallName( + {RTLIB::MULO_I64, RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128}); } - setLibcallName(RTLIB::MULO_I128, nullptr); + setLibcallName(RTLIB::MULO_I128); if (!Subtarget->isV9()) { // SparcV8 does not have FNEGD and FABSD. - setOperationAction(ISD::FNEG, MVT::f64, Custom); - setOperationAction(ISD::FABS, MVT::f64, Custom); - } - - setOperationAction(ISD::FSIN , MVT::f128, Expand); - setOperationAction(ISD::FCOS , MVT::f128, Expand); - setOperationAction(ISD::FSINCOS, MVT::f128, Expand); - setOperationAction(ISD::FREM , MVT::f128, Expand); - setOperationAction(ISD::FMA , MVT::f128, Expand); - setOperationAction(ISD::FSIN , MVT::f64, Expand); - setOperationAction(ISD::FCOS , MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - setOperationAction(ISD::FREM , MVT::f64, Expand); - setOperationAction(ISD::FMA , MVT::f64, Expand); - setOperationAction(ISD::FSIN , MVT::f32, Expand); - setOperationAction(ISD::FCOS , MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); - setOperationAction(ISD::FREM , MVT::f32, Expand); - setOperationAction(ISD::FMA , MVT::f32, Expand); - setOperationAction(ISD::CTTZ , MVT::i32, Expand); - setOperationAction(ISD::CTLZ , MVT::i32, Expand); - setOperationAction(ISD::ROTL , MVT::i32, Expand); - setOperationAction(ISD::ROTR , MVT::i32, Expand); - setOperationAction(ISD::BSWAP, MVT::i32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FPOW , MVT::f128, Expand); - setOperationAction(ISD::FPOW , MVT::f64, Expand); - setOperationAction(ISD::FPOW , MVT::f32, Expand); - - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + setOperationAction({ISD::FNEG, ISD::FABS}, MVT::f64, Custom); + } + + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FREM, ISD::FMA}, + MVT::f128, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FREM, ISD::FMA}, + {MVT::f64, MVT::f32}, Expand); + setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::ROTL, ISD::ROTR, ISD::BSWAP}, + MVT::i32, Expand); + setOperationAction({ISD::FCOPYSIGN, ISD::FPOW}, + {MVT::f128, MVT::f64, MVT::f32}, Expand); + + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, MVT::i32, + Expand); // Expands to [SU]MUL_LOHI. - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::MUL, MVT::i32, Expand); + setOperationAction({ISD::MULHU, ISD::MULHS, ISD::MUL}, MVT::i32, Expand); if (Subtarget->useSoftMulDiv()) { // .umul works for both signed and unsigned - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Expand); setLibcallName(RTLIB::MUL_I32, ".umul"); setOperationAction(ISD::SDIV, MVT::i32, Expand); @@ -1727,17 +1652,13 @@ } if (Subtarget->is64Bit()) { - setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); - setOperationAction(ISD::MULHU, MVT::i64, Expand); - setOperationAction(ISD::MULHS, MVT::i64, Expand); + setOperationAction({ISD::UMUL_LOHI, ISD::SMUL_LOHI, ISD::MULHU, ISD::MULHS}, + MVT::i64, Expand); - setOperationAction(ISD::UMULO, MVT::i64, Custom); - setOperationAction(ISD::SMULO, MVT::i64, Custom); + setOperationAction({ISD::UMULO, ISD::SMULO}, MVT::i64, Custom); - setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i64, Expand); } // VASTART needs to be custom lowered to use the VarArgsFrameIndex. @@ -1745,14 +1666,12 @@ // VAARG needs to be lowered to not do unaligned accesses for doubles. setOperationAction(ISD::VAARG , MVT::Other, Custom); - setOperationAction(ISD::TRAP , MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); // Use the default implementation. - setOperationAction(ISD::VACOPY , MVT::Other, Expand); - setOperationAction(ISD::VAEND , MVT::Other, Expand); - setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand); + setOperationAction( + {ISD::VACOPY, ISD::VAEND, ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, + Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); setStackPointerRegisterToSaveRestore(SP::O6); @@ -1760,29 +1679,20 @@ setOperationAction(ISD::CTPOP, MVT::i32, Subtarget->usePopc() ? Legal : Expand); - if (Subtarget->isV9() && Subtarget->hasHardQuad()) { - setOperationAction(ISD::LOAD, MVT::f128, Legal); - setOperationAction(ISD::STORE, MVT::f128, Legal); - } else { - setOperationAction(ISD::LOAD, MVT::f128, Custom); - setOperationAction(ISD::STORE, MVT::f128, Custom); - } + if (Subtarget->isV9() && Subtarget->hasHardQuad()) + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::f128, Legal); + else + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::f128, Custom); if (Subtarget->hasHardQuad()) { - setOperationAction(ISD::FADD, MVT::f128, Legal); - setOperationAction(ISD::FSUB, MVT::f128, Legal); - setOperationAction(ISD::FMUL, MVT::f128, Legal); - setOperationAction(ISD::FDIV, MVT::f128, Legal); - setOperationAction(ISD::FSQRT, MVT::f128, Legal); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FSQRT, + ISD::FP_EXTEND}, + MVT::f128, Legal); setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); - if (Subtarget->isV9()) { - setOperationAction(ISD::FNEG, MVT::f128, Legal); - setOperationAction(ISD::FABS, MVT::f128, Legal); - } else { - setOperationAction(ISD::FNEG, MVT::f128, Custom); - setOperationAction(ISD::FABS, MVT::f128, Custom); - } + if (Subtarget->isV9()) + setOperationAction({ISD::FNEG, ISD::FABS}, MVT::f128, Legal); + else + setOperationAction({ISD::FNEG, ISD::FABS}, MVT::f128, Custom); if (!Subtarget->is64Bit()) { setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll"); @@ -1794,17 +1704,11 @@ } else { // Custom legalize f128 operations. - setOperationAction(ISD::FADD, MVT::f128, Custom); - setOperationAction(ISD::FSUB, MVT::f128, Custom); - setOperationAction(ISD::FMUL, MVT::f128, Custom); - setOperationAction(ISD::FDIV, MVT::f128, Custom); - setOperationAction(ISD::FSQRT, MVT::f128, Custom); - setOperationAction(ISD::FNEG, MVT::f128, Custom); - setOperationAction(ISD::FABS, MVT::f128, Custom); + setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FSQRT, + ISD::FNEG, ISD::FABS, ISD::FP_EXTEND}, + MVT::f128, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); + setOperationAction(ISD::FP_ROUND, {MVT::f64, MVT::f32}, Custom); // Setup Runtime library names. if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) { @@ -1846,12 +1750,10 @@ } } - if (Subtarget->fixAllFDIVSQRT()) { + if (Subtarget->fixAllFDIVSQRT()) // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as // the former instructions generate errata on LEON processors. - setOperationAction(ISD::FDIV, MVT::f32, Promote); - setOperationAction(ISD::FSQRT, MVT::f32, Promote); - } + setOperationAction({ISD::FDIV, ISD::FSQRT}, MVT::f32, Promote); if (Subtarget->hasNoFMULS()) { setOperationAction(ISD::FMUL, MVT::f32, Promote); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -140,16 +140,14 @@ MVT VT = MVT::SimpleValueType(I); if (isTypeLegal(VT)) { // Lower SET_CC into an IPM-based sequence. - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + VT, Custom); // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). setOperationAction(ISD::SELECT, VT, Expand); // Lower SELECT_CC and BR_CC into separate comparisons and branches. - setOperationAction(ISD::SELECT_CC, VT, Custom); - setOperationAction(ISD::BR_CC, VT, Custom); + setOperationAction({ISD::SELECT_CC, ISD::BR_CC}, VT, Custom); } } @@ -162,36 +160,28 @@ // Handle integer types. for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; - I <= MVT::LAST_INTEGER_VALUETYPE; - ++I) { + I <= MVT::LAST_INTEGER_VALUETYPE; ++I) { MVT VT = MVT::SimpleValueType(I); if (isTypeLegal(VT)) { setOperationAction(ISD::ABS, VT, Legal); // Expand individual DIV and REMs into DIVREMs. - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Custom); - setOperationAction(ISD::UDIVREM, VT, Custom); + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT, + Expand); + setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom); // Support addition/subtraction with overflow. - setOperationAction(ISD::SADDO, VT, Custom); - setOperationAction(ISD::SSUBO, VT, Custom); + setOperationAction({ISD::SADDO, ISD::SSUBO}, VT, Custom); // Support addition/subtraction with carry. - setOperationAction(ISD::UADDO, VT, Custom); - setOperationAction(ISD::USUBO, VT, Custom); + setOperationAction({ISD::UADDO, ISD::USUBO}, VT, Custom); // Support carry in as value rather than glue. - setOperationAction(ISD::ADDCARRY, VT, Custom); - setOperationAction(ISD::SUBCARRY, VT, Custom); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY}, VT, Custom); // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and // stores, putting a serialization instruction after the stores. - setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); - setOperationAction(ISD::ATOMIC_STORE, VT, Custom); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, VT, Custom); // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are // available, or if the operand is constant. @@ -204,14 +194,11 @@ setOperationAction(ISD::CTPOP, VT, Expand); // No special instructions for these. - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); + setOperationAction({ISD::CTTZ, ISD::ROTR}, VT, Expand); // Use *MUL_LOHI where possible instead of MULH*. - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Custom); - setOperationAction(ISD::UMUL_LOHI, VT, Custom); + setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Expand); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Custom); // Only z196 and above have native support for conversions to unsigned. // On z10, promoting to i64 doesn't generate an inexact condition for @@ -236,28 +223,21 @@ // Type legalization will convert 8- and 16-bit atomic operations into // forms that operate on i32s (but still keeping the original memory VT). // Lower them into full i32 operations. - setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); + setOperationAction( + {ISD::ATOMIC_SWAP, ISD::ATOMIC_LOAD_ADD, ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR, ISD::ATOMIC_LOAD_XOR, + ISD::ATOMIC_LOAD_NAND, ISD::ATOMIC_LOAD_MIN, ISD::ATOMIC_LOAD_MAX, + ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX}, + MVT::i32, Custom); // Even though i128 is not a legal type, we still need to custom lower // the atomic operations in order to exploit SystemZ instructions. - setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i128, Custom); // We can use the CC result of compare-and-swap to implement // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS. - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom); - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); + setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, + {MVT::i32, MVT::i64, MVT::i128}, Custom); setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); @@ -267,15 +247,14 @@ // z10 has instructions for signed but not unsigned FP conversion. // Handle unsigned 32-bit types as signed 64-bit types. if (!Subtarget.hasFPExtension()) { - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i32, + Promote); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i64, + Expand); } // We have native support for a 64-bit CTLZ, via FLOGR. - setOperationAction(ISD::CTLZ, MVT::i32, Promote); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote); + setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Promote); setOperationAction(ISD::CTLZ, MVT::i64, Legal); // On z15 we have native support for a 64-bit CTPOP. @@ -288,38 +267,30 @@ setOperationAction(ISD::OR, MVT::i64, Custom); // Expand 128 bit shifts without using a libcall. - setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); + setOperationAction({ISD::SRL_PARTS, ISD::SHL_PARTS, ISD::SRA_PARTS}, MVT::i64, + Expand); + setLibcallName({RTLIB::SRL_I128, RTLIB::SHL_I128, RTLIB::SRA_I128}); // Handle bitcast from fp128 to i128. setOperationAction(ISD::BITCAST, MVT::i128, Custom); // We have native instructions for i8, i16 and i32 extensions, but not i1. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - } + for (MVT VT : MVT::integer_valuetypes()) + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); // Handle the various types of symbolic address. - setOperationAction(ISD::ConstantPool, PtrVT, Custom); - setOperationAction(ISD::GlobalAddress, PtrVT, Custom); - setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); - setOperationAction(ISD::BlockAddress, PtrVT, Custom); - setOperationAction(ISD::JumpTable, PtrVT, Custom); + setOperationAction({ISD::ConstantPool, ISD::GlobalAddress, + ISD::GlobalTLSAddress, ISD::BlockAddress, ISD::JumpTable}, + PtrVT, Custom); // We need to handle dynamic allocations specially because of the // 160-byte area at the bottom of the stack. - setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); - setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom); + setOperationAction({ISD::DYNAMIC_STACKALLOC, ISD::GET_DYNAMIC_AREA_OFFSET}, + PtrVT, Custom); - setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Custom); // Handle prefetches with PFD or PFDRL. setOperationAction(ISD::PREFETCH, MVT::Other, Custom); @@ -333,9 +304,8 @@ // Likewise all truncating stores and extending loads. for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(VT, InnerVT, Expand); - setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); - setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, + InnerVT, Expand); } if (isTypeLegal(VT)) { @@ -343,16 +313,13 @@ // vector register, even if there is no native support for the format // as such. In particular, we can do these for v4f32 even though there // are no specific instructions for that format. - setOperationAction(ISD::LOAD, VT, Legal); - setOperationAction(ISD::STORE, VT, Legal); - setOperationAction(ISD::VSELECT, VT, Legal); - setOperationAction(ISD::BITCAST, VT, Legal); - setOperationAction(ISD::UNDEF, VT, Legal); + setOperationAction( + {ISD::LOAD, ISD::STORE, ISD::VSELECT, ISD::BITCAST, ISD::UNDEF}, VT, + Legal); // Likewise, except that we need to replace the nodes with something // more specific. - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + setOperationAction({ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE}, VT, Custom); } } @@ -360,91 +327,57 @@ for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { if (isTypeLegal(VT)) { // These operations have direct equivalents. - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); - setOperationAction(ISD::ADD, VT, Legal); - setOperationAction(ISD::SUB, VT, Legal); + setOperationAction( + {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, ISD::ADD, ISD::SUB}, + VT, Legal); if (VT != MVT::v2i64) setOperationAction(ISD::MUL, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - setOperationAction(ISD::AND, VT, Legal); - setOperationAction(ISD::OR, VT, Legal); - setOperationAction(ISD::XOR, VT, Legal); + setOperationAction({ISD::ABS, ISD::AND, ISD::OR, ISD::XOR}, VT, Legal); if (Subtarget.hasVectorEnhancements1()) setOperationAction(ISD::CTPOP, VT, Legal); else setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::CTTZ, VT, Legal); - setOperationAction(ISD::CTLZ, VT, Legal); + setOperationAction({ISD::CTTZ, ISD::CTLZ}, VT, Legal); // Convert a GPR scalar to a vector by inserting it into element 0. setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); // Use a series of unpacks for extensions. - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, VT, + Custom); // Detect shifts by a scalar amount and convert them into // V*_BY_SCALAR. - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::SRL, VT, Custom); + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Custom); // At present ROTL isn't matched by DAGCombiner. ROTR should be // converted into ROTL. - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); + setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand); // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands // and inverting the result as necessary. - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC}, VT, Custom); if (Subtarget.hasVectorEnhancements1()) setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); } } - if (Subtarget.hasVector()) { + if (Subtarget.hasVector()) // There should be no need to check for float types other than v2f64 // since <2 x f32> isn't a legal type. - setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); - - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal); - } - - if (Subtarget.hasVectorEnhancements2()) { - setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal); - - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal); - } + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP}, + {MVT::v2i64, MVT::v2f64}, Legal); + + if (Subtarget.hasVectorEnhancements2()) + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP}, + {MVT::v4i32, MVT::v4f32}, Legal); // Handle floating-point types. for (unsigned I = MVT::FIRST_FP_VALUETYPE; @@ -456,154 +389,85 @@ setOperationAction(ISD::FRINT, VT, Legal); // We can use the extended form of FI for other rounding operations. - if (Subtarget.hasFPExtension()) { - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::FROUND, VT, Legal); - } + if (Subtarget.hasFPExtension()) + setOperationAction({ISD::FNEARBYINT, ISD::FFLOOR, ISD::FCEIL, + ISD::FTRUNC, ISD::FROUND}, + VT, Legal); // No special instructions for these. - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction( + {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FREM, ISD::FPOW}, VT, + Expand); // Handle constrained floating-point operations. - setOperationAction(ISD::STRICT_FADD, VT, Legal); - setOperationAction(ISD::STRICT_FSUB, VT, Legal); - setOperationAction(ISD::STRICT_FMUL, VT, Legal); - setOperationAction(ISD::STRICT_FDIV, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); - setOperationAction(ISD::STRICT_FSQRT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); - if (Subtarget.hasFPExtension()) { - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FROUND, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - } + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FMA, ISD::STRICT_FSQRT, + ISD::STRICT_FRINT, ISD::STRICT_FP_ROUND, + ISD::STRICT_FP_EXTEND}, + VT, Legal); + if (Subtarget.hasFPExtension()) + setOperationAction({ISD::STRICT_FNEARBYINT, ISD::STRICT_FFLOOR, + ISD::STRICT_FCEIL, ISD::STRICT_FROUND, + ISD::STRICT_FTRUNC}, + VT, Legal); } } // Handle floating-point vector types. if (Subtarget.hasVector()) { // Scalar-to-vector conversion is just a subreg. - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); - setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); + setOperationAction(ISD::SCALAR_TO_VECTOR, {MVT::v4f32, MVT::v2f64}, Legal); // Some insertions and extractions can be done directly but others // need to go via integers. - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + {MVT::v4f32, MVT::v2f64}, Custom); // These operations have direct equivalents. - setOperationAction(ISD::FADD, MVT::v2f64, Legal); - setOperationAction(ISD::FNEG, MVT::v2f64, Legal); - setOperationAction(ISD::FSUB, MVT::v2f64, Legal); - setOperationAction(ISD::FMUL, MVT::v2f64, Legal); - setOperationAction(ISD::FMA, MVT::v2f64, Legal); - setOperationAction(ISD::FDIV, MVT::v2f64, Legal); - setOperationAction(ISD::FABS, MVT::v2f64, Legal); - setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); - setOperationAction(ISD::FRINT, MVT::v2f64, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); - setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); - setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); - setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); - setOperationAction(ISD::FROUND, MVT::v2f64, Legal); + setOperationAction({ISD::FADD, ISD::FNEG, ISD::FSUB, ISD::FMUL, ISD::FMA, + ISD::FDIV, ISD::FABS, ISD::FSQRT, ISD::FRINT, + ISD::FNEARBYINT, ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, + ISD::FROUND}, + MVT::v2f64, Legal); // Handle constrained floating-point operations. - setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FMA, ISD::STRICT_FDIV, ISD::STRICT_FSQRT, + ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, + ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, + ISD::STRICT_FTRUNC, ISD::STRICT_FROUND}, + MVT::v2f64, Legal); } // The vector enhancements facility 1 has instructions for these. if (Subtarget.hasVectorEnhancements1()) { - setOperationAction(ISD::FADD, MVT::v4f32, Legal); - setOperationAction(ISD::FNEG, MVT::v4f32, Legal); - setOperationAction(ISD::FSUB, MVT::v4f32, Legal); - setOperationAction(ISD::FMUL, MVT::v4f32, Legal); - setOperationAction(ISD::FMA, MVT::v4f32, Legal); - setOperationAction(ISD::FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::FABS, MVT::v4f32, Legal); - setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); - setOperationAction(ISD::FRINT, MVT::v4f32, Legal); - setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); - setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); - setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); - setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); - setOperationAction(ISD::FROUND, MVT::v4f32, Legal); - - setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal); - setOperationAction(ISD::FMINNUM, MVT::f64, Legal); - setOperationAction(ISD::FMINIMUM, MVT::f64, Legal); - - setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal); - setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); - setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal); - - setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); - setOperationAction(ISD::FMINNUM, MVT::f32, Legal); - setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); - - setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); - setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); - setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); - - setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); - setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal); - setOperationAction(ISD::FMINNUM, MVT::f128, Legal); - setOperationAction(ISD::FMINIMUM, MVT::f128, Legal); + setOperationAction({ISD::FADD, ISD::FNEG, ISD::FSUB, ISD::FMUL, ISD::FMA, + ISD::FDIV, ISD::FABS, ISD::FSQRT, ISD::FRINT, + ISD::FNEARBYINT, ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, + ISD::FROUND}, + MVT::v4f32, Legal); + + setOperationAction( + {ISD::FMAXNUM, ISD::FMAXIMUM, ISD::FMINNUM, ISD::FMINIMUM}, + {MVT::f64, MVT::v2f64, MVT::f32, MVT::v4f32, MVT::f128}, Legal); // Handle constrained floating-point operations. - setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); - for (auto VT : { MVT::f32, MVT::f64, MVT::f128, - MVT::v4f32, MVT::v2f64 }) { - setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal); - setOperationAction(ISD::STRICT_FMINNUM, VT, Legal); - setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal); - setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal); - } + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FMA, ISD::STRICT_FDIV, ISD::STRICT_FSQRT, + ISD::STRICT_FRINT, ISD::STRICT_FNEARBYINT, + ISD::STRICT_FFLOOR, ISD::STRICT_FCEIL, + ISD::STRICT_FROUND, ISD::STRICT_FTRUNC}, + MVT::v4f32, Legal); + + setOperationAction({ISD::STRICT_FMAXNUM, ISD::STRICT_FMINNUM, + ISD::STRICT_FMAXIMUM, ISD::STRICT_FMINIMUM}, + {MVT::f32, MVT::f64, MVT::f128, MVT::v4f32, MVT::v2f64}, + Legal); } // We only have fused f128 multiply-addition on vector registers. - if (!Subtarget.hasVectorEnhancements1()) { - setOperationAction(ISD::FMA, MVT::f128, Expand); - setOperationAction(ISD::STRICT_FMA, MVT::f128, Expand); - } + if (!Subtarget.hasVectorEnhancements1()) + setOperationAction({ISD::FMA, ISD::STRICT_FMA}, MVT::f128, Expand); // We don't have a copysign instruction on vector registers. if (Subtarget.hasVectorEnhancements1()) @@ -622,21 +486,18 @@ } // Floating-point truncation and stores need to be done separately. - setTruncStoreAction(MVT::f64, MVT::f32, Expand); + setTruncStoreAction(MVT::f64, MVT::f32, Expand); setTruncStoreAction(MVT::f128, MVT::f32, Expand); setTruncStoreAction(MVT::f128, MVT::f64, Expand); // We have 64-bit FPR<->GPR moves, but need special handling for // 32-bit forms. - if (!Subtarget.hasVector()) { - setOperationAction(ISD::BITCAST, MVT::i32, Custom); - setOperationAction(ISD::BITCAST, MVT::f32, Custom); - } + if (!Subtarget.hasVector()) + setOperationAction(ISD::BITCAST, {MVT::i32, MVT::f32}, Custom); // VASTART and VACOPY need to deal with the SystemZ-specific varargs // structure, but VAEND is a no-op. - setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VACOPY, MVT::Other, Custom); + setOperationAction({ISD::VASTART, ISD::VACOPY}, MVT::Other, Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); // Codes for which we want to perform some z-specific combinations. @@ -662,8 +523,8 @@ ISD::INTRINSIC_W_CHAIN}); // Handle intrinsics. - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_WO_CHAIN}, + MVT::Other, Custom); // We want to use MVC in preference to even a single load/store pair. MaxStoresPerMemcpy = 0; diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -102,9 +102,8 @@ // VE doesn't have i1 sign extending load. for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); setTruncStoreAction(VT, MVT::i1, Expand); } @@ -117,35 +116,29 @@ } // VE doesn't have fp128 load/store, so expand them in custom lower. - setOperationAction(ISD::LOAD, MVT::f128, Custom); - setOperationAction(ISD::STORE, MVT::f128, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::f128, Custom); /// } Load & Store // Custom legalize address nodes into LO/HI parts. MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0)); - setOperationAction(ISD::BlockAddress, PtrVT, Custom); - setOperationAction(ISD::GlobalAddress, PtrVT, Custom); - setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); - setOperationAction(ISD::ConstantPool, PtrVT, Custom); - setOperationAction(ISD::JumpTable, PtrVT, Custom); + setOperationAction({ISD::BlockAddress, ISD::GlobalAddress, + ISD::GlobalTLSAddress, ISD::ConstantPool, ISD::JumpTable}, + PtrVT, Custom); /// VAARG handling { setOperationAction(ISD::VASTART, MVT::Other, Custom); // VAARG needs to be lowered to access with 8 bytes alignment. setOperationAction(ISD::VAARG, MVT::Other, Custom); // Use the default implementation. - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction({ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); /// } VAARG handling /// Stack { - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); - setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom); // Use the default implementation. - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); /// } Stack /// Branch { @@ -161,27 +154,20 @@ /// Int Ops { for (MVT IntVT : {MVT::i32, MVT::i64}) { // VE has no REM or DIVREM operations. - setOperationAction(ISD::UREM, IntVT, Expand); - setOperationAction(ISD::SREM, IntVT, Expand); - setOperationAction(ISD::SDIVREM, IntVT, Expand); - setOperationAction(ISD::UDIVREM, IntVT, Expand); + setOperationAction({ISD::UREM, ISD::SREM, ISD::SDIVREM, ISD::UDIVREM}, + IntVT, Expand); // VE has no SHL_PARTS/SRA_PARTS/SRL_PARTS operations. - setOperationAction(ISD::SHL_PARTS, IntVT, Expand); - setOperationAction(ISD::SRA_PARTS, IntVT, Expand); - setOperationAction(ISD::SRL_PARTS, IntVT, Expand); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, IntVT, + Expand); // VE has no MULHU/S or U/SMUL_LOHI operations. // TODO: Use MPD instruction to implement SMUL_LOHI for i32 type. - setOperationAction(ISD::MULHU, IntVT, Expand); - setOperationAction(ISD::MULHS, IntVT, Expand); - setOperationAction(ISD::UMUL_LOHI, IntVT, Expand); - setOperationAction(ISD::SMUL_LOHI, IntVT, Expand); + setOperationAction({ISD::MULHU, ISD::MULHS, ISD::UMUL_LOHI, ISD::SMUL_LOHI}, + IntVT, Expand); // VE has no CTTZ, ROTL, ROTR operations. - setOperationAction(ISD::CTTZ, IntVT, Expand); - setOperationAction(ISD::ROTL, IntVT, Expand); - setOperationAction(ISD::ROTR, IntVT, Expand); + setOperationAction({ISD::CTTZ, ISD::ROTL, ISD::ROTR}, IntVT, Expand); // VE has 64 bits instruction which works as i64 BSWAP operation. This // instruction works fine as i32 BSWAP operation with an additional @@ -191,31 +177,25 @@ // VE has only 64 bits instructions which work as i64 BITREVERSE/CTLZ/CTPOP // operations. Use isel patterns for i64, promote for i32. LegalizeAction Act = (IntVT == MVT::i32) ? Promote : Legal; - setOperationAction(ISD::BITREVERSE, IntVT, Act); - setOperationAction(ISD::CTLZ, IntVT, Act); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, IntVT, Act); - setOperationAction(ISD::CTPOP, IntVT, Act); + setOperationAction( + {ISD::BITREVERSE, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF, ISD::CTPOP}, IntVT, + Act); // VE has only 64 bits instructions which work as i64 AND/OR/XOR operations. // Use isel patterns for i64, promote for i32. - setOperationAction(ISD::AND, IntVT, Act); - setOperationAction(ISD::OR, IntVT, Act); - setOperationAction(ISD::XOR, IntVT, Act); + setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, IntVT, Act); } /// } Int Ops /// Conversion { // VE doesn't have instructions for fp<->uint, so expand them by llvm - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); // use i64 - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); // use i64 - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + setOperationAction({ISD::FP_TO_UINT, ISD::UINT_TO_FP}, MVT::i32, + Promote); // use i64 + setOperationAction({ISD::FP_TO_UINT, ISD::UINT_TO_FP}, MVT::i64, Expand); // fp16 not supported - for (MVT FPVT : MVT::fp_valuetypes()) { - setOperationAction(ISD::FP16_TO_FP, FPVT, Expand); - setOperationAction(ISD::FP_TO_FP16, FPVT, Expand); - } + for (MVT FPVT : MVT::fp_valuetypes()) + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, FPVT, Expand); /// } Conversion /// Floating-point Ops { @@ -223,30 +203,23 @@ /// and fcmp. // VE doesn't have following floating point operations. - for (MVT VT : MVT::fp_valuetypes()) { - setOperationAction(ISD::FNEG, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - } + for (MVT VT : MVT::fp_valuetypes()) + setOperationAction({ISD::FNEG, ISD::FREM}, VT, Expand); // VE doesn't have fdiv of f128. setOperationAction(ISD::FDIV, MVT::f128, Expand); - for (MVT FPVT : {MVT::f32, MVT::f64}) { - // f32 and f64 uses ConstantFP. f128 uses ConstantPool. - setOperationAction(ISD::ConstantFP, FPVT, Legal); - } + // f32 and f64 uses ConstantFP. f128 uses ConstantPool. + setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal); /// } Floating-point Ops /// Floating-point math functions { // VE doesn't have following floating point math functions. - for (MVT VT : MVT::fp_valuetypes()) { - setOperationAction(ISD::FABS, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FSQRT, VT, Expand); - } + for (MVT VT : MVT::fp_valuetypes()) + setOperationAction( + {ISD::FABS, ISD::FCOPYSIGN, ISD::FCOS, ISD::FSIN, ISD::FSQRT}, VT, + Expand); /// } Floating-point math functions @@ -265,20 +238,16 @@ setOperationAction(ISD::ATOMIC_SWAP, VT, Custom); // FIXME: Support "atmam" instructions. - setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Expand); + setOperationAction({ISD::ATOMIC_LOAD_ADD, ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_AND, ISD::ATOMIC_LOAD_OR}, + VT, Expand); // VE doesn't have follwing instructions. - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_CLR, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); - setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); + setOperationAction({ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, ISD::ATOMIC_LOAD_CLR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_NAND, + ISD::ATOMIC_LOAD_MIN, ISD::ATOMIC_LOAD_MAX, + ISD::ATOMIC_LOAD_UMIN, ISD::ATOMIC_LOAD_UMAX}, + VT, Expand); } /// } Atomic instructions @@ -304,8 +273,8 @@ for (MVT LegalVecVT : AllVectorVTs) { setOperationAction(ISD::BUILD_VECTOR, LegalVecVT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, LegalVecVT, Legal); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, LegalVecVT, Legal); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + LegalVecVT, Legal); // Translate all vector instructions with legal element types to VVP_* // nodes. // TODO We will custom-widen into VVP_* nodes in the future. While we are @@ -315,15 +284,15 @@ setOperationAction(ISD::VP_OPC, LegalVecVT, Custom); #define ADD_VVP_OP(VVP_NAME, ISD_NAME) \ setOperationAction(ISD::ISD_NAME, LegalVecVT, Custom); - setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_LOAD, LegalVecVT, Custom); - setOperationAction(ISD::EXPERIMENTAL_VP_STRIDED_STORE, LegalVecVT, Custom); + setOperationAction( + {ISD::EXPERIMENTAL_VP_STRIDED_LOAD, ISD::EXPERIMENTAL_VP_STRIDED_STORE}, + LegalVecVT, Custom); #include "VVPNodes.def" } - for (MVT LegalPackedVT : AllPackedVTs) { - setOperationAction(ISD::INSERT_VECTOR_ELT, LegalPackedVT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, LegalPackedVT, Custom); - } + for (MVT LegalPackedVT : AllPackedVTs) + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + LegalPackedVT, Custom); // vNt32, vNt64 ops (legal element types) for (MVT VT : MVT::vector_valuetypes()) { @@ -332,16 +301,12 @@ if (ElemBits != 32 && ElemBits != 64) continue; - for (unsigned MemOpc : {ISD::MLOAD, ISD::MSTORE, ISD::LOAD, ISD::STORE}) - setOperationAction(MemOpc, VT, Custom); - - const ISD::NodeType IntReductionOCs[] = { - ISD::VECREDUCE_ADD, ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, - ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMIN, - ISD::VECREDUCE_SMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_UMAX}; - - for (unsigned IntRedOpc : IntReductionOCs) - setOperationAction(IntRedOpc, VT, Custom); + setOperationAction( + {ISD::MLOAD, ISD::MSTORE, ISD::LOAD, ISD::STORE, ISD::VECREDUCE_ADD, + ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMIN, ISD::VECREDUCE_SMAX, + ISD::VECREDUCE_UMIN, ISD::VECREDUCE_UMAX}, + VT, Custom); } } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -78,39 +78,28 @@ // Transform loads and stores to pointers in address space 1 to loads and // stores to WebAssembly global variables, outside linear memory. - for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) { - setOperationAction(ISD::LOAD, T, Custom); - setOperationAction(ISD::STORE, T, Custom); - } - if (Subtarget->hasSIMD128()) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, - MVT::v2f64}) { - setOperationAction(ISD::LOAD, T, Custom); - setOperationAction(ISD::STORE, T, Custom); - } - } - if (Subtarget->hasReferenceTypes()) { + for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) + setOperationAction({ISD::LOAD, ISD::STORE}, T, Custom); + if (Subtarget->hasSIMD128()) + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, + MVT::v2i64, MVT::v2f64}, + Custom); + if (Subtarget->hasReferenceTypes()) // We need custom load and store lowering for both externref, funcref and // Other. The MVT::Other here represents tables of reference types. - for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) { - setOperationAction(ISD::LOAD, T, Custom); - setOperationAction(ISD::STORE, T, Custom); - } - } + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::externref, MVT::funcref, MVT::Other}, Custom); - setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); - setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom); - setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); - setOperationAction(ISD::JumpTable, MVTPtr, Custom); - setOperationAction(ISD::BlockAddress, MVTPtr, Custom); + setOperationAction({ISD::GlobalAddress, ISD::GlobalTLSAddress, + ISD::ExternalSymbol, ISD::JumpTable, ISD::BlockAddress}, + MVTPtr, Custom); setOperationAction(ISD::BRIND, MVT::Other, Custom); // Take the default expansion for va_arg, va_copy, and va_end. There is no // default action for va_start, so we do that custom. setOperationAction(ISD::VASTART, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { // Don't expand the floating-point types to constant pools. @@ -120,20 +109,18 @@ ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) setCondCodeAction(CC, T, Expand); // Expand floating-point library function operators. - for (auto Op : - {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA}) - setOperationAction(Op, T, Expand); + setOperationAction( + {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA}, T, + Expand); // Note supported floating-point library function operators that otherwise // default to expand. - for (auto Op : - {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}) - setOperationAction(Op, T, Legal); + setOperationAction( + {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}, T, + Legal); // Support minimum and maximum, which otherwise default to expand. - setOperationAction(ISD::FMINIMUM, T, Legal); - setOperationAction(ISD::FMAXIMUM, T, Legal); + setOperationAction({ISD::FMINIMUM, ISD::FMAXIMUM}, T, Legal); // WebAssembly currently has no builtin f16 support. - setOperationAction(ISD::FP16_TO_FP, T, Expand); - setOperationAction(ISD::FP_TO_FP16, T, Expand); + setOperationAction({ISD::FP16_TO_FP, ISD::FP_TO_FP16}, T, Expand); setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand); setTruncStoreAction(T, MVT::f16, Expand); } @@ -143,17 +130,15 @@ {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) { - for (auto T : {MVT::i32, MVT::i64}) - setOperationAction(Op, T, Expand); + setOperationAction(Op, {MVT::i32, MVT::i64}, Expand); if (Subtarget->hasSIMD128()) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) - setOperationAction(Op, T, Expand); + setOperationAction(Op, {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Expand); } if (Subtarget->hasNontrappingFPToInt()) - for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) - for (auto T : {MVT::i32, MVT::i64}) - setOperationAction(Op, T, Custom); + setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, + {MVT::i32, MVT::i64}, Custom); // SIMD-specific configuration if (Subtarget->hasSIMD128()) { @@ -176,83 +161,79 @@ setTargetDAGCombine(ISD::TRUNCATE); // Support saturating add for i8x16 and i16x8 - for (auto Op : {ISD::SADDSAT, ISD::UADDSAT}) - for (auto T : {MVT::v16i8, MVT::v8i16}) - setOperationAction(Op, T, Legal); + setOperationAction({ISD::SADDSAT, ISD::UADDSAT}, {MVT::v16i8, MVT::v8i16}, + Legal); // Support integer abs - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) - setOperationAction(ISD::ABS, T, Legal); + setOperationAction(ISD::ABS, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, Legal); // Custom lower BUILD_VECTORs to minimize number of replace_lanes - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, - MVT::v2f64}) - setOperationAction(ISD::BUILD_VECTOR, T, Custom); + setOperationAction(ISD::BUILD_VECTOR, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, + MVT::v2i64, MVT::v2f64}, + Custom); // We have custom shuffle lowering to expose the shuffle mask - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, - MVT::v2f64}) - setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); + setOperationAction(ISD::VECTOR_SHUFFLE, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, + MVT::v2i64, MVT::v2f64}, + Custom); // Custom lowering since wasm shifts must have a scalar shift amount - for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) - setOperationAction(Op, T, Custom); + setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Custom); // Custom lower lane accesses to expand out variable indices - for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, - MVT::v2f64}) - setOperationAction(Op, T, Custom); + setOperationAction({ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, + MVT::v2i64, MVT::v2f64}, + Custom); // There is no i8x16.mul instruction setOperationAction(ISD::MUL, MVT::v16i8, Expand); // There is no vector conditional select instruction - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, - MVT::v2f64}) - setOperationAction(ISD::SELECT_CC, T, Expand); + setOperationAction(ISD::SELECT_CC, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, + MVT::v2i64, MVT::v2f64}, + Expand); // Expand integer operations supported for scalars but not SIMD - for (auto Op : - {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) - setOperationAction(Op, T, Expand); + setOperationAction( + {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, Expand); // But we do have integer min and max operations - for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) - setOperationAction(Op, T, Legal); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Legal); // And we have popcnt for i8x16. It can be used to expand ctlz/cttz. setOperationAction(ISD::CTPOP, MVT::v16i8, Legal); - setOperationAction(ISD::CTLZ, MVT::v16i8, Expand); - setOperationAction(ISD::CTTZ, MVT::v16i8, Expand); + setOperationAction({ISD::CTLZ, ISD::CTTZ}, MVT::v16i8, Expand); // Custom lower bit counting operations for other types to scalarize them. - for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}) - for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64}) - setOperationAction(Op, T, Custom); + setOperationAction({ISD::CTLZ, ISD::CTTZ, ISD::CTPOP}, + {MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // Expand float operations supported for scalars but not SIMD - for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, - ISD::FEXP, ISD::FEXP2, ISD::FRINT}) - for (auto T : {MVT::v4f32, MVT::v2f64}) - setOperationAction(Op, T, Expand); + setOperationAction({ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, + ISD::FEXP, ISD::FEXP2, ISD::FRINT}, + {MVT::v4f32, MVT::v2f64}, Expand); // Unsigned comparison operations are unavailable for i64x2 vectors. for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE}) setCondCodeAction(CC, MVT::v2i64, Custom); // 64x2 conversions are not in the spec - for (auto Op : - {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}) - for (auto T : {MVT::v2i64, MVT::v2f64}) - setOperationAction(Op, T, Expand); + setOperationAction( + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, + {MVT::v2i64, MVT::v2f64}, Expand); // But saturating fp_to_int converstions are - for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) - setOperationAction(Op, MVT::v4i32, Custom); + setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, MVT::v4i32, + Custom); } // As a special case, these operators use the type to mean the type to @@ -268,18 +249,15 @@ setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); // Dynamic stack allocation: use the default expansion. - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); - setOperationAction(ISD::FrameIndex, MVT::i32, Custom); - setOperationAction(ISD::FrameIndex, MVT::i64, Custom); + setOperationAction(ISD::FrameIndex, {MVT::i32, MVT::i64}, Custom); setOperationAction(ISD::CopyToReg, MVT::Other, Custom); // Expand these forms; we pattern-match the forms that we can handle in isel. - for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) - for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) - setOperationAction(Op, T, Expand); + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, + {MVT::i32, MVT::i64, MVT::f32, MVT::f64}, Expand); // We have custom switch handling. setOperationAction(ISD::BR_JT, MVT::Other, Custom); @@ -292,16 +270,16 @@ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); setTruncStoreAction(MVT::f64, MVT::f32, Expand); for (auto T : MVT::integer_valuetypes()) - for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) - setLoadExtAction(Ext, T, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, T, MVT::i1, + Promote); if (Subtarget->hasSIMD128()) { for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64}) { for (auto MemT : MVT::fixedlen_vector_valuetypes()) { if (MVT(T) != MemT) { setTruncStoreAction(T, MemT, Expand); - for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) - setLoadExtAction(Ext, T, MemT, Expand); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, T, + MemT, Expand); } } } @@ -318,13 +296,12 @@ setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); // Trap lowers to wasm unreachable - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); // Exception handling intrinsics - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + setOperationAction( + {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::Other, Custom); setMaxAtomicSizeInBitsSupported(64); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -159,8 +159,7 @@ if (Subtarget.getTargetTriple().isOSMSVCRT()) { // MSVCRT doesn't have powi; fall back to pow - setLibcallName(RTLIB::POWI_F32, nullptr); - setLibcallName(RTLIB::POWI_F64, nullptr); + setLibcallName({RTLIB::POWI_F32, RTLIB::POWI_F64}); } // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to @@ -191,23 +190,18 @@ setTruncStoreAction(MVT::f64, MVT::f32, Expand); // SETOEQ and SETUNE require checking two conditions. - for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) { - setCondCodeAction(ISD::SETOEQ, VT, Expand); - setCondCodeAction(ISD::SETUNE, VT, Expand); - } + setCondCodeAction({ISD::SETOEQ, ISD::SETUNE}, {MVT::f32, MVT::f64, MVT::f80}, + Expand); // Integer absolute. if (Subtarget.canUseCMOV()) { - setOperationAction(ISD::ABS , MVT::i16 , Custom); - setOperationAction(ISD::ABS , MVT::i32 , Custom); + setOperationAction(ISD::ABS, {MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::ABS , MVT::i64 , Custom); } // Signed saturation subtraction. - setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom); + setOperationAction(ISD::SSUBSAT, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom); @@ -216,8 +210,7 @@ // For slow shld targets we only lower for code size. LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal; - setOperationAction(ShiftOp , MVT::i8 , Custom); - setOperationAction(ShiftOp , MVT::i16 , Custom); + setOperationAction(ShiftOp, {MVT::i8, MVT::i16}, Custom); setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction); if (Subtarget.is64Bit()) setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction); @@ -226,48 +219,44 @@ if (!Subtarget.useSoftFloat()) { // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this // operation. - setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::i8, MVT::i16}, Promote); // We have an algorithm for SSE2, and we turn this into a 64-bit // FILD or VCVTUSI2SS/SD for other targets. - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i32, + Custom); // We have an algorithm for SSE2->double, and we turn this into a // 64-bit FILD followed by conditional FADD for other targets. - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i64, + Custom); // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have // this operation. - setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i8, + Promote); // SSE has no i16 to fp conversion, only i32. We promote in the handler // to allow f80 to use i16 and f64 to use i16 with sse1 only - setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i16, + Custom); // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i32, + Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i64, + Custom); // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have // this operation. setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT}, + {MVT::i16, MVT::i32}, Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT}, MVT::i64, + Custom); // Handle FP_TO_UINT by promoting the destination to a larger signed // conversion. @@ -277,43 +266,31 @@ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::LRINT, MVT::f32, Custom); - setOperationAction(ISD::LRINT, MVT::f64, Custom); - setOperationAction(ISD::LLRINT, MVT::f32, Custom); - setOperationAction(ISD::LLRINT, MVT::f64, Custom); + setOperationAction({ISD::LRINT, ISD::LLRINT}, {MVT::f32, MVT::f64}, Custom); - if (!Subtarget.is64Bit()) { - setOperationAction(ISD::LRINT, MVT::i64, Custom); - setOperationAction(ISD::LLRINT, MVT::i64, Custom); - } + if (!Subtarget.is64Bit()) + setOperationAction({ISD::LRINT, ISD::LLRINT}, MVT::i64, Custom); } if (Subtarget.hasSSE2()) { // Custom lowering for saturating float to int conversions. // We handle promotion to larger result types manually. - for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) { - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom); - } - if (Subtarget.is64Bit()) { - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); - } + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); + if (Subtarget.is64Bit()) + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, MVT::i64, + Custom); } // Handle address space casts between mixed sized pointers. - setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); - setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); + setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); // TODO: when we have SSE, these could be more efficient, by using movd/movq. if (!Subtarget.hasSSE2()) { - setOperationAction(ISD::BITCAST , MVT::f32 , Expand); - setOperationAction(ISD::BITCAST , MVT::i32 , Expand); + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32}, Expand); if (Subtarget.is64Bit()) { setOperationAction(ISD::BITCAST , MVT::f64 , Expand); // Without SSE, i64->f64 goes through memory. @@ -332,32 +309,23 @@ // (low) operations are left as Legal, as there are single-result // instructions for this in x86. Using the two-result multiply instructions // when both high and low results are needed must be arranged by dagcombine. - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - } + setOperationAction( + {ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Expand); setOperationAction(ISD::BR_JT , MVT::Other, Expand); setOperationAction(ISD::BRCOND , MVT::Other, Custom); - for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128, - MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::BR_CC, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - } + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, + {MVT::f32, MVT::f64, MVT::f80, MVT::f128, MVT::i8, + MVT::i16, MVT::i32, MVT::i64}, + Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i16, MVT::i8}, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); - setOperationAction(ISD::FREM , MVT::f32 , Expand); - setOperationAction(ISD::FREM , MVT::f64 , Expand); - setOperationAction(ISD::FREM , MVT::f80 , Expand); - setOperationAction(ISD::FREM , MVT::f128 , Expand); + setOperationAction(ISD::FREM, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) { setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); @@ -374,10 +342,8 @@ // is enabled. setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTTZ, MVT::i16, Custom); - setOperationAction(ISD::CTTZ , MVT::i32 , Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal); + setOperationAction(ISD::CTTZ, {MVT::i16, MVT::i32}, Custom); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, {MVT::i16, MVT::i32}, Legal); if (Subtarget.is64Bit()) { setOperationAction(ISD::CTTZ , MVT::i64 , Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal); @@ -393,8 +359,7 @@ for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::CTLZ , VT, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom); + setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, VT, Custom); } } @@ -407,9 +372,7 @@ Op, MVT::f32, (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand); // There's never any support for operations beyond MVT::f32. - setOperationAction(Op, MVT::f64, Expand); - setOperationAction(Op, MVT::f80, Expand); - setOperationAction(Op, MVT::f128, Expand); + setOperationAction(Op, {MVT::f64, MVT::f80, MVT::f128}, Expand); } setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); @@ -421,9 +384,7 @@ setTruncStoreAction(MVT::f80, MVT::f16, Expand); setTruncStoreAction(MVT::f128, MVT::f16, Expand); - setOperationAction(ISD::PARITY, MVT::i8, Custom); - setOperationAction(ISD::PARITY, MVT::i16, Custom); - setOperationAction(ISD::PARITY, MVT::i32, Custom); + setOperationAction(ISD::PARITY, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::PARITY, MVT::i64, Custom); if (Subtarget.hasPOPCNT()) { @@ -432,9 +393,7 @@ // on the dest that popcntl hasn't had since Cannon Lake. setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTPOP , MVT::i8 , Expand); - setOperationAction(ISD::CTPOP , MVT::i16 , Expand); - setOperationAction(ISD::CTPOP , MVT::i32 , Expand); + setOperationAction(ISD::CTPOP, {MVT::i8, MVT::i16, MVT::i32}, Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::CTPOP , MVT::i64 , Expand); else @@ -447,17 +406,13 @@ setOperationAction(ISD::BSWAP , MVT::i16 , Expand); // X86 wants to expand cmov itself. - for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) { - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - } + setOperationAction( + {ISD::SELECT, ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, Custom); for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction({ISD::SELECT, ISD::SETCC}, VT, Custom); } // Custom action for SELECT MMX and expand action for SELECT_CC MMX @@ -468,8 +423,8 @@ // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since // LLVM/Clang supports zero-cost DWARF and SEH exception handling. setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); - setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); - setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); + setOperationAction({ISD::EH_SJLJ_LONGJMP, ISD::EH_SJLJ_SETUP_DISPATCH}, + MVT::Other, Custom); if (TM.Options.ExceptionModel == ExceptionHandling::SjLj) setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); @@ -477,21 +432,18 @@ for (auto VT : { MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::ConstantPool , VT, Custom); - setOperationAction(ISD::JumpTable , VT, Custom); - setOperationAction(ISD::GlobalAddress , VT, Custom); - setOperationAction(ISD::GlobalTLSAddress, VT, Custom); - setOperationAction(ISD::ExternalSymbol , VT, Custom); - setOperationAction(ISD::BlockAddress , VT, Custom); + setOperationAction({ISD::ConstantPool, ISD::JumpTable, ISD::GlobalAddress, + ISD::GlobalTLSAddress, ISD::ExternalSymbol, + ISD::BlockAddress}, + VT, Custom); } // 64-bit shl, sra, srl (iff 32-bit x86) for (auto VT : { MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::SHL_PARTS, VT, Custom); - setOperationAction(ISD::SRA_PARTS, VT, Custom); - setOperationAction(ISD::SRL_PARTS, VT, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, VT, + Custom); } if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow()) @@ -500,15 +452,11 @@ setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); // Expand certain atomics - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); - setOperationAction(ISD::ATOMIC_STORE, VT, Custom); - } + setOperationAction({ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_ADD, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_AND, + ISD::ATOMIC_STORE}, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Custom); if (!Subtarget.is64Bit()) setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); @@ -523,14 +471,12 @@ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); } - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); + setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); - setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); + setOperationAction({ISD::INIT_TRAMPOLINE, ISD::ADJUST_TRAMPOLINE}, MVT::Other, + Custom); - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); if (Subtarget.getTargetTriple().isPS4()) setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); else @@ -540,17 +486,16 @@ setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VAEND , MVT::Other, Expand); bool Is64Bit = Subtarget.is64Bit(); - setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand); + setOperationAction({ISD::VAARG, ISD::VACOPY}, MVT::Other, + Is64Bit ? Custom : Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering. - setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom); - setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom); + setOperationAction({ISD::GC_TRANSITION_START, ISD::GC_TRANSITION_END}, + MVT::Other, Custom); if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { // f32 and f64 use SSE. @@ -577,18 +522,14 @@ setOperationAction(ISD::FCOPYSIGN, VT, Custom); // These might be better off as horizontal vector ops. - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FSUB, VT, Custom); + setOperationAction({ISD::FADD, ISD::FSUB}, VT, Custom); // We don't support sin/cos/fmod - setOperationAction(ISD::FSIN , VT, Expand); - setOperationAction(ISD::FCOS , VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, VT, Expand); } // Lower this to MOVMSK plus an AND. - setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); - setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); + setOperationAction(ISD::FGETSIGN, {MVT::i64, MVT::i32}, Custom); } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() && (UseX87 || Is64Bit)) { @@ -613,16 +554,12 @@ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); // We don't support sin/cos/fmod - setOperationAction(ISD::FSIN , MVT::f32, Expand); - setOperationAction(ISD::FCOS , MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, MVT::f32, Expand); - if (UseX87) { + if (UseX87) // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - } + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, MVT::f64, + Expand); } else if (UseX87) { // f32 and f64 in x87. // Set up the FP register classes. @@ -630,13 +567,10 @@ addRegisterClass(MVT::f32, &X86::RFP32RegClass); for (auto VT : { MVT::f32, MVT::f64 }) { - setOperationAction(ISD::UNDEF, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); + setOperationAction({ISD::UNDEF, ISD::FCOPYSIGN}, VT, Expand); // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN , VT, Expand); - setOperationAction(ISD::FCOS , VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, VT, Expand); } } @@ -661,29 +595,19 @@ addLegalFPImmediate(APFloat(+0.0)); // xorpd } // Handle constrained floating-point operations of scalar. - setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FP_ROUND, + ISD::STRICT_FSQRT}, + {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); // We don't support FMA. - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Expand); // f80 always uses X87. if (UseX87) { addRegisterClass(MVT::f80, &X86::RFP80RegClass); - setOperationAction(ISD::UNDEF, MVT::f80, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); + setOperationAction({ISD::UNDEF, ISD::FCOPYSIGN}, MVT::f80, Expand); { APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended()); addLegalFPImmediate(TmpFlt); // FLD0 @@ -700,28 +624,18 @@ } // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN , MVT::f80, Expand); - setOperationAction(ISD::FCOS , MVT::f80, Expand); - setOperationAction(ISD::FSINCOS, MVT::f80, Expand); - - setOperationAction(ISD::FFLOOR, MVT::f80, Expand); - setOperationAction(ISD::FCEIL, MVT::f80, Expand); - setOperationAction(ISD::FTRUNC, MVT::f80, Expand); - setOperationAction(ISD::FRINT, MVT::f80, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); - setOperationAction(ISD::FMA, MVT::f80, Expand); - setOperationAction(ISD::LROUND, MVT::f80, Expand); - setOperationAction(ISD::LLROUND, MVT::f80, Expand); - setOperationAction(ISD::LRINT, MVT::f80, Custom); - setOperationAction(ISD::LLRINT, MVT::f80, Custom); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FFLOOR, + ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, ISD::FNEARBYINT, + ISD::FMA, ISD::LROUND, ISD::LLROUND}, + MVT::f80, Expand); + + setOperationAction({ISD::LRINT, ISD::LLRINT}, MVT::f80, Custom); // Handle constrained floating-point operations of scalar. - setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT, + ISD::STRICT_FP_EXTEND}, + MVT::f80, Legal); // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten // as Custom. setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal); @@ -734,47 +648,34 @@ addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps - setOperationAction(ISD::FADD, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall); - setOperationAction(ISD::FSUB, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall); - setOperationAction(ISD::FDIV, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall); - setOperationAction(ISD::FMUL, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall); - setOperationAction(ISD::FMA, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall); - - setOperationAction(ISD::FABS, MVT::f128, Custom); - setOperationAction(ISD::FNEG, MVT::f128, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom); - - setOperationAction(ISD::FSIN, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall); - setOperationAction(ISD::FCOS, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall); - setOperationAction(ISD::FSINCOS, MVT::f128, LibCall); + setOperationAction({ISD::FADD, ISD::STRICT_FADD, ISD::FSUB, + ISD::STRICT_FSUB, ISD::FDIV, ISD::STRICT_FDIV, + ISD::FMUL, ISD::STRICT_FMUL, ISD::FMA, ISD::STRICT_FMA}, + MVT::f128, LibCall); + + setOperationAction({ISD::FABS, ISD::FNEG, ISD::FCOPYSIGN}, MVT::f128, + Custom); + + setOperationAction({ISD::FSIN, ISD::STRICT_FSIN, ISD::FCOS, + ISD::STRICT_FCOS, ISD::FSINCOS}, + MVT::f128, LibCall); // No STRICT_FSINCOS - setOperationAction(ISD::FSQRT, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall); + setOperationAction({ISD::FSQRT, ISD::STRICT_FSQRT}, MVT::f128, LibCall); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f128, + Custom); // We need to custom handle any FP_ROUND with an f128 input, but // LegalizeDAG uses the result type to know when to run a custom handler. // So we have to list all legal floating point result types here. - if (isTypeLegal(MVT::f32)) { - setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); - } - if (isTypeLegal(MVT::f64)) { - setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom); - } - if (isTypeLegal(MVT::f80)) { - setOperationAction(ISD::FP_ROUND, MVT::f80, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom); - } + if (isTypeLegal(MVT::f32)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f32, + Custom); + if (isTypeLegal(MVT::f64)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f64, + Custom); + if (isTypeLegal(MVT::f80)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f80, + Custom); setOperationAction(ISD::SETCC, MVT::f128, Custom); @@ -787,77 +688,63 @@ } // Always use a library call for pow. - setOperationAction(ISD::FPOW , MVT::f32 , Expand); - setOperationAction(ISD::FPOW , MVT::f64 , Expand); - setOperationAction(ISD::FPOW , MVT::f80 , Expand); - setOperationAction(ISD::FPOW , MVT::f128 , Expand); - - setOperationAction(ISD::FLOG, MVT::f80, Expand); - setOperationAction(ISD::FLOG2, MVT::f80, Expand); - setOperationAction(ISD::FLOG10, MVT::f80, Expand); - setOperationAction(ISD::FEXP, MVT::f80, Expand); - setOperationAction(ISD::FEXP2, MVT::f80, Expand); - setOperationAction(ISD::FMINNUM, MVT::f80, Expand); - setOperationAction(ISD::FMAXNUM, MVT::f80, Expand); + setOperationAction(ISD::FPOW, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); + + setOperationAction({ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, + ISD::FMINNUM, ISD::FMAXNUM}, + MVT::f80, Expand); // Some FP actions are always expanded for vector types. - for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16, - MVT::v4f32, MVT::v8f32, MVT::v16f32, - MVT::v2f64, MVT::v4f64, MVT::v8f64 }) { - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - } + setOperationAction( + {ISD::FSIN, ISD::FSINCOS, ISD::FCOS, ISD::FREM, ISD::FCOPYSIGN, ISD::FPOW, + ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2}, + {MVT::v8f16, MVT::v16f16, MVT::v32f16, MVT::v4f32, MVT::v8f32, + MVT::v16f32, MVT::v2f64, MVT::v4f64, MVT::v8f64}, + Expand); // First set operation action for all vector types to either promote // (for widening) or expand (for scalarization). Then we will selectively // turn on ones that can be effectively codegen'd. for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); - setOperationAction(ISD::FMA, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::SETCC, VT, Expand); - setOperationAction(ISD::FP_TO_UINT, VT, Expand); - setOperationAction(ISD::FP_TO_SINT, VT, Expand); - setOperationAction(ISD::UINT_TO_FP, VT, Expand); - setOperationAction(ISD::SINT_TO_FP, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); - setOperationAction(ISD::TRUNCATE, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND, VT, Expand); - setOperationAction(ISD::ZERO_EXTEND, VT, Expand); - setOperationAction(ISD::ANY_EXTEND, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction({ISD::SDIV, + ISD::UDIV, + ISD::SREM, + ISD::UREM, + ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_SUBVECTOR, + ISD::INSERT_SUBVECTOR, + ISD::FMA, + ISD::FFLOOR, + ISD::FCEIL, + ISD::FTRUNC, + ISD::FRINT, + ISD::FNEARBYINT, + ISD::SMUL_LOHI, + ISD::MULHS, + ISD::UMUL_LOHI, + ISD::MULHU, + ISD::SDIVREM, + ISD::UDIVREM, + ISD::CTPOP, + ISD::CTTZ, + ISD::CTLZ, + ISD::ROTL, + ISD::ROTR, + ISD::BSWAP, + ISD::SETCC, + ISD::FP_TO_UINT, + ISD::FP_TO_SINT, + ISD::UINT_TO_FP, + ISD::SINT_TO_FP, + ISD::SIGN_EXTEND_INREG, + ISD::TRUNCATE, + ISD::SIGN_EXTEND, + ISD::ZERO_EXTEND, + ISD::ANY_EXTEND, + ISD::SELECT_CC}, + VT, Expand); for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(InnerVT, VT, Expand); @@ -889,23 +776,16 @@ addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass); - setOperationAction(ISD::FNEG, MVT::v4f32, Custom); - setOperationAction(ISD::FABS, MVT::v4f32, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); - setOperationAction(ISD::VSELECT, MVT::v4f32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); - setOperationAction(ISD::SELECT, MVT::v4f32, Custom); + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::BUILD_VECTOR, + ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::EXTRACT_VECTOR_ELT, ISD::SELECT}, + MVT::v4f32, Custom); - setOperationAction(ISD::LOAD, MVT::v2f32, Custom); - setOperationAction(ISD::STORE, MVT::v2f32, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + MVT::v4f32, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { @@ -923,234 +803,165 @@ addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass); - for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8, - MVT::v2i16, MVT::v4i16, MVT::v2i32 }) { - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::SREM, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - setOperationAction(ISD::UREM, VT, Custom); - } - - setOperationAction(ISD::MUL, MVT::v2i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i8, Custom); - setOperationAction(ISD::MUL, MVT::v8i8, Custom); - - setOperationAction(ISD::MUL, MVT::v16i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i32, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); - setOperationAction(ISD::MULHU, MVT::v4i32, Custom); - setOperationAction(ISD::MULHS, MVT::v4i32, Custom); - setOperationAction(ISD::MULHU, MVT::v16i8, Custom); - setOperationAction(ISD::MULHS, MVT::v16i8, Custom); - setOperationAction(ISD::MULHU, MVT::v8i16, Legal); - setOperationAction(ISD::MULHS, MVT::v8i16, Legal); - setOperationAction(ISD::MUL, MVT::v8i16, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal); - - setOperationAction(ISD::SMULO, MVT::v16i8, Custom); - setOperationAction(ISD::UMULO, MVT::v16i8, Custom); - - setOperationAction(ISD::FNEG, MVT::v2f64, Custom); - setOperationAction(ISD::FABS, MVT::v2f64, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom); - - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom); - } - - setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom); - - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); - - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::ABS, VT, Custom); + setOperationAction( + {ISD::SDIV, ISD::SREM, ISD::UDIV, ISD::UREM}, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}, + Custom); - // The condition codes aren't legal in SSE/AVX and under AVX512 we use - // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); - } + setOperationAction(ISD::MUL, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i64, MVT::v4i32, + MVT::v8i16, MVT::v16i8}, + Custom); + + setOperationAction({ISD::MULHU, ISD::MULHS}, {MVT::v4i32, MVT::v16i8}, + Custom); + setOperationAction({ISD::MULHU, ISD::MULHS, ISD::MUL}, MVT::v8i16, Legal); + setOperationAction(ISD::AVGCEILU, {MVT::v16i8, MVT::v8i16}, Legal); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v16i8, Custom); - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN}, MVT::v2f64, + Custom); + + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SMAX, ISD::SMIN}, VT, + VT == MVT::v8i16 ? Legal : Custom); + setOperationAction({ISD::UMAX, ISD::UMIN}, VT, + VT == MVT::v16i8 ? Legal : Custom); } - for (auto VT : { MVT::v2f64, MVT::v2i64 }) { - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); + setOperationAction({ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, ISD::SSUBSAT}, + {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::USUBSAT, {MVT::v4i32, MVT::v2i64}, Custom); - if (VT == MVT::v2i64 && !Subtarget.is64Bit()) - continue; + setOperationAction(ISD::INSERT_VECTOR_ELT, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}, + Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - } - - // Custom lower v2i64 and v2f64 selects. - setOperationAction(ISD::SELECT, MVT::v2f64, Custom); - setOperationAction(ISD::SELECT, MVT::v2i64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i32, Custom); - setOperationAction(ISD::SELECT, MVT::v8i16, Custom); - setOperationAction(ISD::SELECT, MVT::v16i8, Custom); - - setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom); - - // Custom legalize these to avoid over promotion or custom promotion. - for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) { - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom); - } - - setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom); - - setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom); - - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom); - - // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion. - setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom); - - setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom); - setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom); - - // We want to legalize this to an f64 load rather than an i64 load on - // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for - // store. - setOperationAction(ISD::LOAD, MVT::v2i32, Custom); - setOperationAction(ISD::LOAD, MVT::v4i16, Custom); - setOperationAction(ISD::LOAD, MVT::v8i8, Custom); - setOperationAction(ISD::STORE, MVT::v2i32, Custom); - setOperationAction(ISD::STORE, MVT::v4i16, Custom); - setOperationAction(ISD::STORE, MVT::v8i8, Custom); - - setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); - setOperationAction(ISD::BITCAST, MVT::v4i16, Custom); - setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); - if (!Subtarget.hasAVX512()) - setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::CTPOP, ISD::ABS}, + VT, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom); + // The condition codes aren't legal in SSE/AVX and under AVX512 we use + // setcc all the way to isel and prefer SETGT in some isel patterns. + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); + } - setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::BUILD_VECTOR, + ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::EXTRACT_VECTOR_ELT}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); + for (auto VT : {MVT::v2f64, MVT::v2i64}) { + setOperationAction( + {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::VSELECT}, VT, Custom); - // In the customized shift lowering, the legal v4i32/v2i64 cases - // in AVX2 will be recognized. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - if (VT == MVT::v2i64) continue; - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); - } + if (VT == MVT::v2i64 && !Subtarget.is64Bit()) + continue; + + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + VT, Custom); + } + + // Custom lower v2i64 and v2f64 selects. + setOperationAction( + ISD::SELECT, + {MVT::v2f64, MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}, Custom); + + setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::v2i32, + Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom); + + // Custom legalize these to avoid over promotion or custom promotion. + setOperationAction( + {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT}, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}, Custom); + + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v4i32, + Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v2i32, + Custom); + + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v2i32, MVT::v4i32}, Custom); + + // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion. + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v2f32, Custom); + + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND, ISD::FP_ROUND, + ISD::STRICT_FP_ROUND}, + MVT::v2f32, Custom); + + // We want to legalize this to an f64 load rather than an i64 load on + // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for + // store. + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::v2i32, MVT::v4i16, MVT::v8i8}, Custom); + + setOperationAction(ISD::BITCAST, {MVT::v2i32, MVT::v4i16, MVT::v8i8}, + Custom); + if (!Subtarget.hasAVX512()) + setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); + + setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, + {MVT::v2i64, MVT::v4i32, MVT::v8i16}, Custom); + + setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); + + setOperationAction( + ISD::TRUNCATE, + {MVT::v2i8, MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}, + Custom); + + // In the customized shift lowering, the legal v4i32/v2i64 cases + // in AVX2 will be recognized. + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SRL, ISD::SHL, ISD::SRA}, VT, Custom); + if (VT == MVT::v2i64) + continue; + setOperationAction({ISD::ROTL, ISD::ROTR, ISD::FSHL, ISD::FSHR}, VT, + Custom); + } - setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); + setOperationAction({ISD::STRICT_FSQRT, ISD::STRICT_FADD, ISD::STRICT_FSUB, + ISD::STRICT_FMUL, ISD::STRICT_FDIV}, + MVT::v2f64, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { - setOperationAction(ISD::ABS, MVT::v16i8, Legal); - setOperationAction(ISD::ABS, MVT::v8i16, Legal); - setOperationAction(ISD::ABS, MVT::v4i32, Legal); + setOperationAction(ISD::ABS, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v8i16, Custom); - setOperationAction(ISD::CTLZ, MVT::v4i32, Custom); - setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); + setOperationAction( + ISD::CTLZ, {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // These might be better off as horizontal vector ops. - setOperationAction(ISD::ADD, MVT::i16, Custom); - setOperationAction(ISD::ADD, MVT::i32, Custom); - setOperationAction(ISD::SUB, MVT::i16, Custom); - setOperationAction(ISD::SUB, MVT::i32, Custom); + setOperationAction({ISD::ADD, ISD::SUB}, {MVT::i16, MVT::i32}, Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { - setOperationAction(ISD::FFLOOR, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal); - setOperationAction(ISD::FCEIL, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal); - setOperationAction(ISD::FTRUNC, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal); - setOperationAction(ISD::FRINT, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal); - setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal); - setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + RoundedTy, Legal); setOperationAction(ISD::FROUND, RoundedTy, Custom); } - setOperationAction(ISD::SMAX, MVT::v16i8, Legal); - setOperationAction(ISD::SMAX, MVT::v4i32, Legal); - setOperationAction(ISD::UMAX, MVT::v8i16, Legal); - setOperationAction(ISD::UMAX, MVT::v4i32, Legal); - setOperationAction(ISD::SMIN, MVT::v16i8, Legal); - setOperationAction(ISD::SMIN, MVT::v4i32, Legal); - setOperationAction(ISD::UMIN, MVT::v8i16, Legal); - setOperationAction(ISD::UMIN, MVT::v4i32, Legal); + setOperationAction({ISD::SMAX, ISD::SMIN}, {MVT::v16i8, MVT::v4i32}, Legal); + setOperationAction({ISD::UMAX, ISD::UMIN}, {MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom); - setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom); - setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom); + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::v2i64, Custom); // FIXME: Do we need to handle scalar-to-vector here? setOperationAction(ISD::MUL, MVT::v4i32, Legal); @@ -1161,10 +972,9 @@ // SSE41 brings specific instructions for doing vector sign extend even in // cases where we don't have SRA. - for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal); - } + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v8i16, MVT::v4i32, MVT::v2i64}, Legal); // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { @@ -1179,33 +989,30 @@ if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) { // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can // do the pre and post work in the vector domain. - setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::v4i64, + Custom); // We need to mark SINT_TO_FP as Custom even though we want to expand it // so that DAG combine doesn't try to turn it into uint_to_fp. - setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v4i64, + Custom); } } - if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) { - setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom); - } + if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) + setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom); if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) { - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - } + setOperationAction({ISD::ROTL, ISD::ROTR}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + Custom); // XOP can efficiently perform BITREVERSE with VPPERM. - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); - - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); + setOperationAction(ISD::BITREVERSE, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::v16i8, + MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v32i8, + MVT::v16i16, MVT::v8i32, MVT::v4i64}, + Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { @@ -1225,24 +1032,15 @@ : &X86::VR256RegClass); for (auto VT : { MVT::v8f32, MVT::v4f64 }) { - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - setOperationAction(ISD::FROUNDEVEN, VT, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + VT, Legal); - setOperationAction(ISD::FROUND, VT, Custom); - - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); + setOperationAction({ISD::FROUND, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN}, + VT, Custom); } // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted @@ -1259,17 +1057,10 @@ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v32i1, Custom); @@ -1277,120 +1068,84 @@ // In the customized shift lowering, the legal v8i32/v4i64 cases // in AVX2 will be recognized. for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); + setOperationAction({ISD::SRL, ISD::SHL, ISD::SRA}, VT, Custom); if (VT == MVT::v4i64) continue; - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); + setOperationAction({ISD::ROTL, ISD::ROTR, ISD::FSHL, ISD::FSHR}, VT, + Custom); } // These types need custom splitting if their input is a 128-bit vector. - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, + {MVT::v8i64, MVT::v16i32}, Custom); - setOperationAction(ISD::SELECT, MVT::v4f64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i64, Custom); - setOperationAction(ISD::SELECT, MVT::v8i32, Custom); - setOperationAction(ISD::SELECT, MVT::v16i16, Custom); - setOperationAction(ISD::SELECT, MVT::v32i8, Custom); - setOperationAction(ISD::SELECT, MVT::v8f32, Custom); + setOperationAction(ISD::SELECT, + {MVT::v4f64, MVT::v4i64, MVT::v8i32, MVT::v16i16, + MVT::v32i8, MVT::v8f32}, + Custom); - for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - } + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + {MVT::v16i16, MVT::v8i32, MVT::v4i64}, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Custom); + setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::CTLZ, VT, Custom); + for (auto VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}) { + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::CTPOP, ISD::CTLZ}, + VT, Custom); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); - } + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); + } if (Subtarget.hasAnyFMA()) { - for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, - MVT::v2f64, MVT::v4f64 }) { - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); - } + setOperationAction( + {ISD::FMA, ISD::STRICT_FMA}, + {MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Legal); } - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom); - } + setOperationAction({ISD::ADD, ISD::SUB}, + {MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + HasInt256 ? Legal : Custom); setOperationAction(ISD::MUL, MVT::v4i64, Custom); - setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom); + setOperationAction(ISD::MUL, {MVT::v8i32, MVT::v16i16}, + HasInt256 ? Legal : Custom); setOperationAction(ISD::MUL, MVT::v32i8, Custom); - setOperationAction(ISD::MULHU, MVT::v8i32, Custom); - setOperationAction(ISD::MULHS, MVT::v8i32, Custom); - setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MULHU, MVT::v32i8, Custom); - setOperationAction(ISD::MULHS, MVT::v32i8, Custom); - setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom); - - setOperationAction(ISD::SMULO, MVT::v32i8, Custom); - setOperationAction(ISD::UMULO, MVT::v32i8, Custom); - - setOperationAction(ISD::ABS, MVT::v4i64, Custom); - setOperationAction(ISD::SMAX, MVT::v4i64, Custom); - setOperationAction(ISD::UMAX, MVT::v4i64, Custom); - setOperationAction(ISD::SMIN, MVT::v4i64, Custom); - setOperationAction(ISD::UMIN, MVT::v4i64, Custom); - - setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom); - setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom); - setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom); - - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { - setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom); - } - - for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); - } + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v8i32, Custom); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v16i16, + HasInt256 ? Legal : Custom); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v32i8, Custom); + setOperationAction(ISD::AVGCEILU, {MVT::v16i16, MVT::v32i8}, + HasInt256 ? Legal : Custom); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v32i8, Custom); + + setOperationAction({ISD::ABS, ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN}, + MVT::v4i64, Custom); + + setOperationAction({ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, ISD::SSUBSAT}, + {MVT::v32i8, MVT::v16i16}, HasInt256 ? Legal : Custom); + setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, {MVT::v8i32, MVT::v4i64}, + Custom); + + setOperationAction({ISD::ABS, ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN}, + {MVT::v32i8, MVT::v16i16, MVT::v8i32}, + HasInt256 ? Legal : Custom); + + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v16i16, MVT::v8i32, MVT::v4i64}, Custom); if (HasInt256) { // The custom lowering for UINT_TO_FP for v8i32 becomes interesting // when we have a 256bit-wide blend with immediate. - setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::v8i32, + Custom); // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { @@ -1411,31 +1166,27 @@ // Extract subvector is special because the value type // (result) is 128-bit but the source is 256-bit wide. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v4f32, MVT::v2f64 }) { - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); - } + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v4f32, MVT::v2f64}, + Legal); // Custom lower several nodes for 256-bit types. for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64 }) { - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); + setOperationAction({ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::SCALAR_TO_VECTOR, ISD::CONCAT_VECTORS, + ISD::STORE}, + VT, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); } if (HasInt256) { setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MGATHER, MVT::v2f32, Custom); - setOperationAction(ISD::MGATHER, MVT::v2i32, Custom); + setOperationAction(ISD::MGATHER, {MVT::v2f32, MVT::v2i32}, Custom); for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) @@ -1453,9 +1204,9 @@ addRegisterClass(MVT::v8i1, &X86::VK8RegClass); addRegisterClass(MVT::v16i1, &X86::VK16RegClass); - setOperationAction(ISD::SELECT, MVT::v1i1, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom); + setOperationAction( + {ISD::SELECT, ISD::EXTRACT_VECTOR_ELT, ISD::BUILD_VECTOR}, MVT::v1i1, + Custom); setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32); setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32); @@ -1465,51 +1216,33 @@ setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32); setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32); setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v2i1, Custom); // There is no byte sized k-register load or store without AVX512DQ. - if (!Subtarget.hasDQI()) { - setOperationAction(ISD::LOAD, MVT::v1i1, Custom); - setOperationAction(ISD::LOAD, MVT::v2i1, Custom); - setOperationAction(ISD::LOAD, MVT::v4i1, Custom); - setOperationAction(ISD::LOAD, MVT::v8i1, Custom); - - setOperationAction(ISD::STORE, MVT::v1i1, Custom); - setOperationAction(ISD::STORE, MVT::v2i1, Custom); - setOperationAction(ISD::STORE, MVT::v4i1, Custom); - setOperationAction(ISD::STORE, MVT::v8i1, Custom); - } + if (!Subtarget.hasDQI()) + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - } + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Custom); - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) - setOperationAction(ISD::VSELECT, VT, Expand); + setOperationAction(ISD::VSELECT, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1}, + Expand); - for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::TRUNCATE, VT, Custom); + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::SELECT, ISD::TRUNCATE, ISD::BUILD_VECTOR, + ISD::CONCAT_VECTORS, ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_SUBVECTOR, ISD::INSERT_VECTOR_ELT, + ISD::VECTOR_SHUFFLE}, + {MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1}, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - } - - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 }) - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); } // This block controls legalization for 512-bit operations with 32/64 bit @@ -1536,10 +1269,8 @@ } for (MVT VT : { MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); + setOperationAction({ISD::FNEG, ISD::FABS}, VT, Custom); + setOperationAction({ISD::FMA, ISD::STRICT_FMA}, VT, Legal); setOperationAction(ISD::FCOPYSIGN, VT, Custom); } @@ -1549,25 +1280,15 @@ setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32); setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32); } - setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal); - - setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, + ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v16i32, Legal); + + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + {MVT::v16f32, MVT::v8f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal); @@ -1582,135 +1303,92 @@ // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE // to 512-bit rather than use the AVX2 instructions so that we can use // k-masks. - if (!Subtarget.hasVLX()) { - for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, - MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) { - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - } - } + if (!Subtarget.hasVLX()) + setOperationAction({ISD::MLOAD, ISD::MSTORE}, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, + MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal); - setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal); + setOperationAction(ISD::TRUNCATE, {MVT::v8i32, MVT::v16i16}, Legal); setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom); setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); - - if (HasBWI) { + setOperationAction({ISD::ZERO_EXTEND, ISD::ANY_EXTEND, ISD::SIGN_EXTEND}, + {MVT::v32i16, MVT::v16i32, MVT::v8i64}, Custom); + + if (HasBWI) // Extends from v64i1 masks to 512-bit vectors. - setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom); - } + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + MVT::v64i8, Custom); for (auto VT : { MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - setOperationAction(ISD::FROUNDEVEN, VT, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + VT, Legal); setOperationAction(ISD::FROUND, VT, Custom); } - for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); - } + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v32i16, MVT::v16i32, MVT::v8i64}, Custom); - setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom); + setOperationAction({ISD::ADD, ISD::SUB}, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v8i64, Custom); + setOperationAction(ISD::MUL, MVT::v8i64, Custom); setOperationAction(ISD::MUL, MVT::v16i32, Legal); setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v64i8, Custom); - - setOperationAction(ISD::MULHU, MVT::v16i32, Custom); - setOperationAction(ISD::MULHS, MVT::v16i32, Custom); - setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MULHS, MVT::v64i8, Custom); - setOperationAction(ISD::MULHU, MVT::v64i8, Custom); - setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom); - - setOperationAction(ISD::SMULO, MVT::v64i8, Custom); - setOperationAction(ISD::UMULO, MVT::v64i8, Custom); - - setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom); - - for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::MUL, MVT::v64i8, Custom); + + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v16i32, Custom); + setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::v32i16, + HasBWI ? Legal : Custom); + setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::v64i8, Custom); + setOperationAction(ISD::AVGCEILU, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v64i8, Custom); + + setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom); + + for (auto VT : {MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64}) { + setOperationAction( + {ISD::SRL, ISD::SHL, ISD::SRA, ISD::ROTL, ISD::ROTR, ISD::SETCC}, VT, + Custom); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); } for (auto VT : { MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, ISD::ABS}, + VT, Legal); + setOperationAction({ISD::CTPOP, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + VT, Custom); } for (auto VT : { MVT::v64i8, MVT::v32i16 }) { setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom); setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom); setOperationAction(ISD::CTLZ, VT, Custom); - setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom); - } - - setOperationAction(ISD::FSHL, MVT::v64i8, Custom); - setOperationAction(ISD::FSHR, MVT::v64i8, Custom); - setOperationAction(ISD::FSHL, MVT::v32i16, Custom); - setOperationAction(ISD::FSHR, MVT::v32i16, Custom); - setOperationAction(ISD::FSHL, MVT::v16i32, Custom); - setOperationAction(ISD::FSHR, MVT::v16i32, Custom); + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, + ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, + ISD::SSUBSAT}, + VT, HasBWI ? Legal : Custom); + } + + setOperationAction({ISD::FSHL, ISD::FSHR}, + {MVT::v64i8, MVT::v32i16, MVT::v16i32}, Custom); if (Subtarget.hasDQI()) { - setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v8i64, Legal); setOperationAction(ISD::MUL, MVT::v8i64, Legal); } @@ -1738,43 +1416,32 @@ MVT::v16f32, MVT::v8f64 }) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::SELECT, ISD::VSELECT, ISD::BUILD_VECTOR, + ISD::EXTRACT_VECTOR_ELT, ISD::VECTOR_SHUFFLE, + ISD::SCALAR_TO_VECTOR, ISD::INSERT_VECTOR_ELT}, + VT, Custom); } for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::MLOAD, VT, Legal); - setOperationAction(ISD::MSTORE, VT, Legal); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - } - if (HasBWI) { - for (auto VT : { MVT::v64i8, MVT::v32i16 }) { - setOperationAction(ISD::MLOAD, VT, Legal); - setOperationAction(ISD::MSTORE, VT, Legal); - } - } else { - setOperationAction(ISD::STORE, MVT::v32i16, Custom); - setOperationAction(ISD::STORE, MVT::v64i8, Custom); + setOperationAction({ISD::MLOAD, ISD::MSTORE}, VT, Legal); + setOperationAction({ISD::MGATHER, ISD::MSCATTER}, VT, Custom); } + if (HasBWI) + setOperationAction({ISD::MLOAD, ISD::MSTORE}, {MVT::v64i8, MVT::v32i16}, + Legal); + else + setOperationAction(ISD::STORE, {MVT::v32i16, MVT::v64i8}, Custom); if (Subtarget.hasVBMI2()) { - for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v16i16, MVT::v8i32, MVT::v4i64, - MVT::v32i16, MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); - } + setOperationAction({ISD::FSHL, ISD::FSHR}, + {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v16i16, + MVT::v8i32, MVT::v4i64, MVT::v32i16, MVT::v16i32, + MVT::v8i64}, + Custom); setOperationAction(ISD::ROTL, MVT::v32i16, Custom); - setOperationAction(ISD::ROTR, MVT::v8i16, Custom); - setOperationAction(ISD::ROTR, MVT::v16i16, Custom); - setOperationAction(ISD::ROTR, MVT::v32i16, Custom); + setOperationAction(ISD::ROTR, {MVT::v8i16, MVT::v16i16, MVT::v32i16}, + Custom); } }// useAVX512Regs @@ -1785,23 +1452,11 @@ // These operations are handled on non-VLX by artificially widening in // isel patterns. - setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, + setOperationAction({ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v8i32, MVT::v4i32}, Subtarget.hasVLX() ? Legal : Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom); if (Subtarget.hasDQI()) { // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion. @@ -1810,65 +1465,44 @@ isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!"); // v2i64 FP_TO_S/UINT(v2f32) custom conversion. - setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v2f32, Custom); } - for (auto VT : { MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - } + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, ISD::ABS}, + {MVT::v2i64, MVT::v4i64}, Legal); - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - } + setOperationAction({ISD::ROTL, ISD::ROTR}, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, + Custom); // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom); - setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom); + setOperationAction(ISD::MSCATTER, {MVT::v2f32, MVT::v2i32}, Custom); - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, - MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) - setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::MSCATTER, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, + MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Custom); if (Subtarget.hasDQI()) { for (auto VT : { MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::SINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::UINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_SINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_UINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, VT, - Subtarget.hasVLX() ? Legal : Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + VT, Subtarget.hasVLX() ? Legal : Custom); setOperationAction(ISD::MUL, VT, Legal); } } - if (Subtarget.hasCDI()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::CTLZ, VT, Legal); - } - } // Subtarget.hasCDI() + if (Subtarget.hasCDI()) + setOperationAction( + ISD::CTLZ, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); - if (Subtarget.hasVPOPCNTDQ()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) - setOperationAction(ISD::CTPOP, VT, Legal); - } + if (Subtarget.hasVPOPCNTDQ()) + setOperationAction( + ISD::CTPOP, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); } // This block control legalization of v32i1/v64i1 which are available with @@ -1880,121 +1514,89 @@ for (auto VT : { MVT::v32i1, MVT::v64i1 }) { setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); + setOperationAction({ISD::TRUNCATE, ISD::SETCC, ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, ISD::SELECT, + ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, + ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR}, + VT, Custom); } - for (auto VT : { MVT::v16i1, MVT::v32i1 }) - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, {MVT::v16i1, MVT::v32i1}, + Custom); // Extends from v32i1 masks to 256-bit vectors. - setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom); + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + MVT::v32i8, Custom); - for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { - setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); - } + setOperationAction({ISD::MLOAD, ISD::MSTORE}, + {MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16}, + Subtarget.hasVLX() ? Legal : Custom); - // These operations are handled on non-VLX by artificially widening in - // isel patterns. - // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? + // These operations are handled on non-VLX by artificially widening in + // isel patterns. + // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? - if (Subtarget.hasBITALG()) { - for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 }) - setOperationAction(ISD::CTPOP, VT, Legal); - } + if (Subtarget.hasBITALG()) + setOperationAction( + ISD::CTPOP, {MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16}, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) { - auto setGroup = [&] (MVT VT) { - setOperationAction(ISD::FADD, VT, Legal); - setOperationAction(ISD::STRICT_FADD, VT, Legal); - setOperationAction(ISD::FSUB, VT, Legal); - setOperationAction(ISD::STRICT_FSUB, VT, Legal); - setOperationAction(ISD::FMUL, VT, Legal); - setOperationAction(ISD::STRICT_FMUL, VT, Legal); - setOperationAction(ISD::FDIV, VT, Legal); - setOperationAction(ISD::STRICT_FDIV, VT, Legal); - setOperationAction(ISD::FSQRT, VT, Legal); - setOperationAction(ISD::STRICT_FSQRT, VT, Legal); - - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - - setOperationAction(ISD::LOAD, VT, Legal); - setOperationAction(ISD::STORE, VT, Legal); - - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); - setOperationAction(ISD::VSELECT, VT, Legal); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + auto setGroup = [&](MVT VT) { + setOperationAction({ISD::FADD, ISD::STRICT_FADD, + ISD::FSUB, ISD::STRICT_FSUB, + ISD::FMUL, ISD::STRICT_FMUL, + ISD::FDIV, ISD::STRICT_FDIV, + ISD::FSQRT, ISD::STRICT_FSQRT, + ISD::FFLOOR, ISD::STRICT_FFLOOR, + ISD::FCEIL, ISD::STRICT_FCEIL, + ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, + ISD::FNEARBYINT, ISD::STRICT_FNEARBYINT, + ISD::LOAD, ISD::STORE, + ISD::FMA, ISD::STRICT_FMA, + ISD::VSELECT}, + VT, Legal); + + setOperationAction({ISD::BUILD_VECTOR, ISD::SELECT, ISD::FNEG, ISD::FABS, + ISD::FCOPYSIGN, ISD::EXTRACT_VECTOR_ELT, + ISD::VECTOR_SHUFFLE}, + VT, Custom); }; // AVX512_FP16 scalar operations setGroup(MVT::f16); addRegisterClass(MVT::f16, &X86::FR16XRegClass); - setOperationAction(ISD::FREM, MVT::f16, Promote); - setOperationAction(ISD::STRICT_FREM, MVT::f16, Promote); - setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); - setOperationAction(ISD::BR_CC, MVT::f16, Expand); - setOperationAction(ISD::SETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); - setOperationAction(ISD::FROUND, MVT::f16, Custom); + setOperationAction({ISD::FREM, ISD::STRICT_FREM}, MVT::f16, Promote); + setOperationAction({ISD::SELECT_CC, ISD::BR_CC}, MVT::f16, Expand); + setOperationAction( + {ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::FROUND}, + MVT::f16, Custom); setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote); - setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Legal); - setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); + setOperationAction({ISD::FROUNDEVEN, ISD::STRICT_FROUNDEVEN}, MVT::f16, + Legal); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f16, Custom); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); - if (isTypeLegal(MVT::f80)) { - setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom); - } + if (isTypeLegal(MVT::f80)) + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f80, + Custom); - setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand); - setCondCodeAction(ISD::SETUNE, MVT::f16, Expand); + setCondCodeAction({ISD::SETOEQ, ISD::SETUNE}, MVT::f16, Expand); if (Subtarget.useAVX512Regs()) { setGroup(MVT::v32f16); addRegisterClass(MVT::v32f16, &X86::VR512RegClass); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v32i16, Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v32i16, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32f16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v32i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v32i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v32i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v32i16, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + MVT::v32i16, Custom); setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i8, MVT::v32i16); setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8, MVT::v32i16); @@ -2015,8 +1617,8 @@ setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Legal); setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal); - setOperationAction(ISD::STRICT_FSETCC, MVT::v32i1, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::v32i1, Custom); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, MVT::v32i1, + Custom); } if (Subtarget.hasVLX()) { @@ -2027,25 +1629,19 @@ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal); - - setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v16i16, MVT::v8i16}, Legal); + + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + MVT::v8i16, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal); // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v8f16, MVT::v16f16}, + Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal); @@ -2057,8 +1653,7 @@ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal); // Need to custom widen these to prevent scalarization. - setOperationAction(ISD::LOAD, MVT::v4f16, Custom); - setOperationAction(ISD::STORE, MVT::v4f16, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v4f16, Custom); } // Support fp16 0 immediate @@ -2085,38 +1680,23 @@ if (Subtarget.hasFP16()) { // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64 - setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16 - setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16 - setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32 - setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, + {MVT::v2f16, MVT::v4f16}, Custom); } - setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i32, MVT::v8i64, MVT::v16i64}, + Custom); } if (Subtarget.hasAMXTILE()) { @@ -2124,12 +1704,11 @@ } // We want to custom lower some of our intrinsics. - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - if (!Subtarget.is64Bit()) { + setOperationAction( + {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::Other, Custom); + if (!Subtarget.is64Bit()) setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); - } // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't // handle type legalization for these operations here. @@ -2141,55 +1720,42 @@ if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; // Add/Sub/Mul with overflow operations are custom lowered. - setOperationAction(ISD::SADDO, VT, Custom); - setOperationAction(ISD::UADDO, VT, Custom); - setOperationAction(ISD::SSUBO, VT, Custom); - setOperationAction(ISD::USUBO, VT, Custom); - setOperationAction(ISD::SMULO, VT, Custom); - setOperationAction(ISD::UMULO, VT, Custom); + setOperationAction({ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO, + ISD::SMULO, ISD::UMULO}, + VT, Custom); // Support carry in as value rather than glue. - setOperationAction(ISD::ADDCARRY, VT, Custom); - setOperationAction(ISD::SUBCARRY, VT, Custom); - setOperationAction(ISD::SETCCCARRY, VT, Custom); - setOperationAction(ISD::SADDO_CARRY, VT, Custom); - setOperationAction(ISD::SSUBO_CARRY, VT, Custom); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY, ISD::SETCCCARRY, + ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, + VT, Custom); } if (!Subtarget.is64Bit()) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); + setLibcallName(RTLIB::SHL_I128); + setLibcallName(RTLIB::SRL_I128); + setLibcallName(RTLIB::SRA_I128); + setLibcallName(RTLIB::MUL_I128); // The MULO libcall is not part of libgcc, only compiler-rt. - setLibcallName(RTLIB::MULO_I64, nullptr); + setLibcallName(RTLIB::MULO_I64); } // The MULO libcall is not part of libgcc, only compiler-rt. - setLibcallName(RTLIB::MULO_I128, nullptr); + setLibcallName(RTLIB::MULO_I128); // Combine sin / cos into _sincos_stret if it is available. if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { - setOperationAction(ISD::FSINCOS, MVT::f64, Custom); - setOperationAction(ISD::FSINCOS, MVT::f32, Custom); - } - - if (Subtarget.isTargetWin64()) { - setOperationAction(ISD::SDIV, MVT::i128, Custom); - setOperationAction(ISD::UDIV, MVT::i128, Custom); - setOperationAction(ISD::SREM, MVT::i128, Custom); - setOperationAction(ISD::UREM, MVT::i128, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom); + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Custom); } + if (Subtarget.isTargetWin64()) + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP}, + MVT::i128, Custom); + // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)` // is. We should promote the value to 64-bits to solve this. // This is what the CRT headers do - `fmodf` is an inline header diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -88,24 +88,17 @@ setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? // XCore does not have the NodeTypes below. - setOperationAction(ISD::BR_CC, MVT::i32, Expand); - setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, MVT::i32, Expand); // 64bit - setOperationAction(ISD::ADD, MVT::i64, Custom); - setOperationAction(ISD::SUB, MVT::i64, Custom); - setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); - setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); - setOperationAction(ISD::MULHS, MVT::i32, Expand); - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); - setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + setOperationAction({ISD::ADD, ISD::SUB}, MVT::i64, Custom); + setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom); + setOperationAction( + {ISD::MULHS, ISD::MULHU, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, + MVT::i32, Expand); // Bit Manipulation - setOperationAction(ISD::CTPOP, MVT::i32, Expand); - setOperationAction(ISD::ROTL , MVT::i32, Expand); - setOperationAction(ISD::ROTR , MVT::i32, Expand); + setOperationAction({ISD::CTPOP, ISD::ROTL, ISD::ROTR}, MVT::i32, Expand); setOperationAction(ISD::BITREVERSE , MVT::i32, Legal); setOperationAction(ISD::TRAP, MVT::Other, Legal); @@ -113,35 +106,29 @@ // Jump tables. setOperationAction(ISD::BR_JT, MVT::Other, Custom); - setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); - setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); + setOperationAction({ISD::GlobalAddress, ISD::BlockAddress}, MVT::i32, Custom); // Conversion of i64 -> double produces constantpool nodes - setOperationAction(ISD::ConstantPool, MVT::i32, Custom); + setOperationAction(ISD::ConstantPool, MVT::i32, Custom); // Loads for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, VT, MVT::i1, + Promote); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); } // Custom expand misaligned loads / stores. - setOperationAction(ISD::LOAD, MVT::i32, Custom); - setOperationAction(ISD::STORE, MVT::i32, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i32, Custom); // Varargs - setOperationAction(ISD::VAEND, MVT::Other, Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Expand); - setOperationAction(ISD::VAARG, MVT::Other, Custom); - setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction({ISD::VAEND, ISD::VACOPY}, MVT::Other, Expand); + setOperationAction({ISD::VAARG, ISD::VASTART}, MVT::Other, Custom); // Dynamic stack - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); // Exception handling @@ -152,12 +139,11 @@ // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); + setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Custom); // TRAMPOLINE is custom lowered. - setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); - setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); + setOperationAction({ISD::INIT_TRAMPOLINE, ISD::ADJUST_TRAMPOLINE}, MVT::Other, + Custom); // We want to custom lower some of our intrinsics. setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);