diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -159,8 +159,7 @@ if (Subtarget.getTargetTriple().isOSMSVCRT()) { // MSVCRT doesn't have powi; fall back to pow - setLibcallName(RTLIB::POWI_F32, nullptr); - setLibcallName(RTLIB::POWI_F64, nullptr); + setLibcallName({RTLIB::POWI_F32, RTLIB::POWI_F64}, nullptr); } // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to @@ -191,23 +190,18 @@ setTruncStoreAction(MVT::f64, MVT::f32, Expand); // SETOEQ and SETUNE require checking two conditions. - for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) { - setCondCodeAction(ISD::SETOEQ, VT, Expand); - setCondCodeAction(ISD::SETUNE, VT, Expand); - } + setCondCodeAction({ISD::SETOEQ, ISD::SETUNE}, {MVT::f32, MVT::f64, MVT::f80}, + Expand); // Integer absolute. if (Subtarget.canUseCMOV()) { - setOperationAction(ISD::ABS , MVT::i16 , Custom); - setOperationAction(ISD::ABS , MVT::i32 , Custom); + setOperationAction(ISD::ABS, {MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::ABS , MVT::i64 , Custom); } // Signed saturation subtraction. - setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom); + setOperationAction(ISD::SSUBSAT, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom); @@ -216,8 +210,7 @@ // For slow shld targets we only lower for code size. LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal; - setOperationAction(ShiftOp , MVT::i8 , Custom); - setOperationAction(ShiftOp , MVT::i16 , Custom); + setOperationAction(ShiftOp, {MVT::i8, MVT::i16}, Custom); setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction); if (Subtarget.is64Bit()) setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction); @@ -226,48 +219,44 @@ if (!Subtarget.useSoftFloat()) { // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this // operation. - setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::i8, MVT::i16}, Promote); // We have an algorithm for SSE2, and we turn this into a 64-bit // FILD or VCVTUSI2SS/SD for other targets. - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i32, + Custom); // We have an algorithm for SSE2->double, and we turn this into a // 64-bit FILD followed by conditional FADD for other targets. - setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::i64, + Custom); // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have // this operation. - setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i8, + Promote); // SSE has no i16 to fp conversion, only i32. We promote in the handler // to allow f80 to use i16 and f64 to use i16 with sse1 only - setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i16, + Custom); // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i32, + Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. - setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::i64, + Custom); // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have // this operation. setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT}, + {MVT::i16, MVT::i32}, Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT}, MVT::i64, + Custom); // Handle FP_TO_UINT by promoting the destination to a larger signed // conversion. @@ -277,43 +266,31 @@ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); + setOperationAction({ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::LRINT, MVT::f32, Custom); - setOperationAction(ISD::LRINT, MVT::f64, Custom); - setOperationAction(ISD::LLRINT, MVT::f32, Custom); - setOperationAction(ISD::LLRINT, MVT::f64, Custom); + setOperationAction({ISD::LRINT, ISD::LLRINT}, {MVT::f32, MVT::f64}, Custom); - if (!Subtarget.is64Bit()) { - setOperationAction(ISD::LRINT, MVT::i64, Custom); - setOperationAction(ISD::LLRINT, MVT::i64, Custom); - } + if (!Subtarget.is64Bit()) + setOperationAction({ISD::LRINT, ISD::LLRINT}, MVT::i64, Custom); } if (Subtarget.hasSSE2()) { // Custom lowering for saturating float to int conversions. // We handle promotion to larger result types manually. - for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) { - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom); - } - if (Subtarget.is64Bit()) { - setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom); - setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); - } + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, + {MVT::i8, MVT::i16, MVT::i32}, Custom); + if (Subtarget.is64Bit()) + setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, MVT::i64, + Custom); } // Handle address space casts between mixed sized pointers. - setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); - setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); + setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); // TODO: when we have SSE, these could be more efficient, by using movd/movq. if (!Subtarget.hasSSE2()) { - setOperationAction(ISD::BITCAST , MVT::f32 , Expand); - setOperationAction(ISD::BITCAST , MVT::i32 , Expand); + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32}, Expand); if (Subtarget.is64Bit()) { setOperationAction(ISD::BITCAST , MVT::f64 , Expand); // Without SSE, i64->f64 goes through memory. @@ -332,32 +309,23 @@ // (low) operations are left as Legal, as there are single-result // instructions for this in x86. Using the two-result multiply instructions // when both high and low results are needed must be arranged by dagcombine. - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - } + setOperationAction( + {ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Expand); setOperationAction(ISD::BR_JT , MVT::Other, Expand); setOperationAction(ISD::BRCOND , MVT::Other, Custom); - for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128, - MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::BR_CC, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); - } + setOperationAction({ISD::BR_CC, ISD::SELECT_CC}, + {MVT::f32, MVT::f64, MVT::f80, MVT::f128, MVT::i8, + MVT::i16, MVT::i32, MVT::i64}, + Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i16, MVT::i8}, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); - setOperationAction(ISD::FREM , MVT::f32 , Expand); - setOperationAction(ISD::FREM , MVT::f64 , Expand); - setOperationAction(ISD::FREM , MVT::f80 , Expand); - setOperationAction(ISD::FREM , MVT::f128 , Expand); + setOperationAction(ISD::FREM, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) { setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); @@ -374,10 +342,8 @@ // is enabled. setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTTZ, MVT::i16, Custom); - setOperationAction(ISD::CTTZ , MVT::i32 , Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal); + setOperationAction(ISD::CTTZ, {MVT::i16, MVT::i32}, Custom); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, {MVT::i16, MVT::i32}, Legal); if (Subtarget.is64Bit()) { setOperationAction(ISD::CTTZ , MVT::i64 , Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal); @@ -393,8 +359,7 @@ for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::CTLZ , VT, Custom); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom); + setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, VT, Custom); } } @@ -407,9 +372,7 @@ Op, MVT::f32, (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand); // There's never any support for operations beyond MVT::f32. - setOperationAction(Op, MVT::f64, Expand); - setOperationAction(Op, MVT::f80, Expand); - setOperationAction(Op, MVT::f128, Expand); + setOperationAction(Op, {MVT::f64, MVT::f80, MVT::f128}, Expand); } setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); @@ -421,9 +384,7 @@ setTruncStoreAction(MVT::f80, MVT::f16, Expand); setTruncStoreAction(MVT::f128, MVT::f16, Expand); - setOperationAction(ISD::PARITY, MVT::i8, Custom); - setOperationAction(ISD::PARITY, MVT::i16, Custom); - setOperationAction(ISD::PARITY, MVT::i32, Custom); + setOperationAction(ISD::PARITY, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::PARITY, MVT::i64, Custom); if (Subtarget.hasPOPCNT()) { @@ -432,9 +393,7 @@ // on the dest that popcntl hasn't had since Cannon Lake. setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTPOP , MVT::i8 , Expand); - setOperationAction(ISD::CTPOP , MVT::i16 , Expand); - setOperationAction(ISD::CTPOP , MVT::i32 , Expand); + setOperationAction(ISD::CTPOP, {MVT::i8, MVT::i16, MVT::i32}, Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::CTPOP , MVT::i64 , Expand); else @@ -447,17 +406,13 @@ setOperationAction(ISD::BSWAP , MVT::i16 , Expand); // X86 wants to expand cmov itself. - for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) { - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - } + setOperationAction( + {ISD::SELECT, ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, Custom); for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction({ISD::SELECT, ISD::SETCC}, VT, Custom); } // Custom action for SELECT MMX and expand action for SELECT_CC MMX @@ -468,8 +423,8 @@ // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since // LLVM/Clang supports zero-cost DWARF and SEH exception handling. setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); - setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); - setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); + setOperationAction({ISD::EH_SJLJ_LONGJMP, ISD::EH_SJLJ_SETUP_DISPATCH}, + MVT::Other, Custom); if (TM.Options.ExceptionModel == ExceptionHandling::SjLj) setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); @@ -477,21 +432,18 @@ for (auto VT : { MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::ConstantPool , VT, Custom); - setOperationAction(ISD::JumpTable , VT, Custom); - setOperationAction(ISD::GlobalAddress , VT, Custom); - setOperationAction(ISD::GlobalTLSAddress, VT, Custom); - setOperationAction(ISD::ExternalSymbol , VT, Custom); - setOperationAction(ISD::BlockAddress , VT, Custom); + setOperationAction({ISD::ConstantPool, ISD::JumpTable, ISD::GlobalAddress, + ISD::GlobalTLSAddress, ISD::ExternalSymbol, + ISD::BlockAddress}, + VT, Custom); } // 64-bit shl, sra, srl (iff 32-bit x86) for (auto VT : { MVT::i32, MVT::i64 }) { if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::SHL_PARTS, VT, Custom); - setOperationAction(ISD::SRA_PARTS, VT, Custom); - setOperationAction(ISD::SRL_PARTS, VT, Custom); + setOperationAction({ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS}, VT, + Custom); } if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow()) @@ -500,15 +452,11 @@ setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); // Expand certain atomics - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); - setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); - setOperationAction(ISD::ATOMIC_STORE, VT, Custom); - } + setOperationAction({ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, ISD::ATOMIC_LOAD_SUB, + ISD::ATOMIC_LOAD_ADD, ISD::ATOMIC_LOAD_OR, + ISD::ATOMIC_LOAD_XOR, ISD::ATOMIC_LOAD_AND, + ISD::ATOMIC_STORE}, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Custom); if (!Subtarget.is64Bit()) setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); @@ -523,14 +471,12 @@ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); } - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); + setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); - setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); + setOperationAction({ISD::INIT_TRAMPOLINE, ISD::ADJUST_TRAMPOLINE}, MVT::Other, + Custom); - setOperationAction(ISD::TRAP, MVT::Other, Legal); - setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); + setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); if (Subtarget.getTargetTriple().isPS4()) setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); else @@ -540,17 +486,16 @@ setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VAEND , MVT::Other, Expand); bool Is64Bit = Subtarget.is64Bit(); - setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand); - setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand); + setOperationAction({ISD::VAARG, ISD::VACOPY}, MVT::Other, + Is64Bit ? Custom : Expand); - setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); - setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering. - setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom); - setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom); + setOperationAction({ISD::GC_TRANSITION_START, ISD::GC_TRANSITION_END}, + MVT::Other, Custom); if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { // f32 and f64 use SSE. @@ -577,18 +522,14 @@ setOperationAction(ISD::FCOPYSIGN, VT, Custom); // These might be better off as horizontal vector ops. - setOperationAction(ISD::FADD, VT, Custom); - setOperationAction(ISD::FSUB, VT, Custom); + setOperationAction({ISD::FADD, ISD::FSUB}, VT, Custom); // We don't support sin/cos/fmod - setOperationAction(ISD::FSIN , VT, Expand); - setOperationAction(ISD::FCOS , VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, VT, Expand); } // Lower this to MOVMSK plus an AND. - setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); - setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); + setOperationAction(ISD::FGETSIGN, {MVT::i64, MVT::i32}, Custom); } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() && (UseX87 || Is64Bit)) { @@ -613,16 +554,12 @@ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); // We don't support sin/cos/fmod - setOperationAction(ISD::FSIN , MVT::f32, Expand); - setOperationAction(ISD::FCOS , MVT::f32, Expand); - setOperationAction(ISD::FSINCOS, MVT::f32, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, MVT::f32, Expand); - if (UseX87) { + if (UseX87) // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN, MVT::f64, Expand); - setOperationAction(ISD::FCOS, MVT::f64, Expand); - setOperationAction(ISD::FSINCOS, MVT::f64, Expand); - } + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, MVT::f64, + Expand); } else if (UseX87) { // f32 and f64 in x87. // Set up the FP register classes. @@ -630,13 +567,10 @@ addRegisterClass(MVT::f32, &X86::RFP32RegClass); for (auto VT : { MVT::f32, MVT::f64 }) { - setOperationAction(ISD::UNDEF, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); + setOperationAction({ISD::UNDEF, ISD::FCOPYSIGN}, VT, Expand); // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN , VT, Expand); - setOperationAction(ISD::FCOS , VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, VT, Expand); } } @@ -661,29 +595,19 @@ addLegalFPImmediate(APFloat(+0.0)); // xorpd } // Handle constrained floating-point operations of scalar. - setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FP_ROUND, + ISD::STRICT_FSQRT}, + {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); // We don't support FMA. - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Expand); // f80 always uses X87. if (UseX87) { addRegisterClass(MVT::f80, &X86::RFP80RegClass); - setOperationAction(ISD::UNDEF, MVT::f80, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); + setOperationAction({ISD::UNDEF, ISD::FCOPYSIGN}, MVT::f80, Expand); { APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended()); addLegalFPImmediate(TmpFlt); // FLD0 @@ -700,28 +624,18 @@ } // Always expand sin/cos functions even though x87 has an instruction. - setOperationAction(ISD::FSIN , MVT::f80, Expand); - setOperationAction(ISD::FCOS , MVT::f80, Expand); - setOperationAction(ISD::FSINCOS, MVT::f80, Expand); - - setOperationAction(ISD::FFLOOR, MVT::f80, Expand); - setOperationAction(ISD::FCEIL, MVT::f80, Expand); - setOperationAction(ISD::FTRUNC, MVT::f80, Expand); - setOperationAction(ISD::FRINT, MVT::f80, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); - setOperationAction(ISD::FMA, MVT::f80, Expand); - setOperationAction(ISD::LROUND, MVT::f80, Expand); - setOperationAction(ISD::LLROUND, MVT::f80, Expand); - setOperationAction(ISD::LRINT, MVT::f80, Custom); - setOperationAction(ISD::LLRINT, MVT::f80, Custom); + setOperationAction({ISD::FSIN, ISD::FCOS, ISD::FSINCOS}, MVT::f80, Expand); + + setOperationAction({ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, ISD::FRINT, + ISD::FNEARBYINT, ISD::FMA, ISD::LROUND, ISD::LLROUND}, + MVT::f80, Expand); + setOperationAction({ISD::LRINT, ISD::LLRINT}, MVT::f80, Custom); // Handle constrained floating-point operations of scalar. - setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT, + ISD::STRICT_FP_EXTEND}, + MVT::f80, Legal); // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten // as Custom. setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal); @@ -734,47 +648,34 @@ addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps - setOperationAction(ISD::FADD, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall); - setOperationAction(ISD::FSUB, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall); - setOperationAction(ISD::FDIV, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall); - setOperationAction(ISD::FMUL, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall); - setOperationAction(ISD::FMA, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall); - - setOperationAction(ISD::FABS, MVT::f128, Custom); - setOperationAction(ISD::FNEG, MVT::f128, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom); - - setOperationAction(ISD::FSIN, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall); - setOperationAction(ISD::FCOS, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall); - setOperationAction(ISD::FSINCOS, MVT::f128, LibCall); + setOperationAction({ISD::FADD, ISD::STRICT_FADD, ISD::FSUB, + ISD::STRICT_FSUB, ISD::FDIV, ISD::STRICT_FDIV, + ISD::FMUL, ISD::STRICT_FMUL, ISD::FMA, ISD::STRICT_FMA}, + MVT::f128, LibCall); + + setOperationAction({ISD::FABS, ISD::FNEG, ISD::FCOPYSIGN}, MVT::f128, + Custom); + + setOperationAction({ISD::FSIN, ISD::STRICT_FSIN, ISD::FCOS, + ISD::STRICT_FCOS, ISD::FSINCOS}, + MVT::f128, LibCall); // No STRICT_FSINCOS - setOperationAction(ISD::FSQRT, MVT::f128, LibCall); - setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall); + setOperationAction({ISD::FSQRT, ISD::STRICT_FSQRT}, MVT::f128, LibCall); - setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f128, + Custom); // We need to custom handle any FP_ROUND with an f128 input, but // LegalizeDAG uses the result type to know when to run a custom handler. // So we have to list all legal floating point result types here. - if (isTypeLegal(MVT::f32)) { - setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); - } - if (isTypeLegal(MVT::f64)) { - setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom); - } - if (isTypeLegal(MVT::f80)) { - setOperationAction(ISD::FP_ROUND, MVT::f80, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom); - } + if (isTypeLegal(MVT::f32)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f32, + Custom); + if (isTypeLegal(MVT::f64)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f64, + Custom); + if (isTypeLegal(MVT::f80)) + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f80, + Custom); setOperationAction(ISD::SETCC, MVT::f128, Custom); @@ -787,77 +688,47 @@ } // Always use a library call for pow. - setOperationAction(ISD::FPOW , MVT::f32 , Expand); - setOperationAction(ISD::FPOW , MVT::f64 , Expand); - setOperationAction(ISD::FPOW , MVT::f80 , Expand); - setOperationAction(ISD::FPOW , MVT::f128 , Expand); - - setOperationAction(ISD::FLOG, MVT::f80, Expand); - setOperationAction(ISD::FLOG2, MVT::f80, Expand); - setOperationAction(ISD::FLOG10, MVT::f80, Expand); - setOperationAction(ISD::FEXP, MVT::f80, Expand); - setOperationAction(ISD::FEXP2, MVT::f80, Expand); - setOperationAction(ISD::FMINNUM, MVT::f80, Expand); - setOperationAction(ISD::FMAXNUM, MVT::f80, Expand); + setOperationAction(ISD::FPOW, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); + + setOperationAction({ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, + ISD::FMINNUM, ISD::FMAXNUM}, + MVT::f80, Expand); // Some FP actions are always expanded for vector types. - for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16, - MVT::v4f32, MVT::v8f32, MVT::v16f32, - MVT::v2f64, MVT::v4f64, MVT::v8f64 }) { - setOperationAction(ISD::FSIN, VT, Expand); - setOperationAction(ISD::FSINCOS, VT, Expand); - setOperationAction(ISD::FCOS, VT, Expand); - setOperationAction(ISD::FREM, VT, Expand); - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::FPOW, VT, Expand); - setOperationAction(ISD::FLOG, VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP, VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - } + setOperationAction( + {ISD::FSIN, ISD::FSINCOS, ISD::FCOS, ISD::FREM, ISD::FCOPYSIGN, ISD::FPOW, + ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, ISD::FEXP2}, + {MVT::v8f16, MVT::v16f16, MVT::v32f16, MVT::v4f32, MVT::v8f32, + MVT::v16f32, MVT::v2f64, MVT::v4f64, MVT::v8f64}, + Expand); // First set operation action for all vector types to either promote // (for widening) or expand (for scalarization). Then we will selectively // turn on ones that can be effectively codegen'd. for (MVT VT : MVT::fixedlen_vector_valuetypes()) { - setOperationAction(ISD::SDIV, VT, Expand); - setOperationAction(ISD::UDIV, VT, Expand); - setOperationAction(ISD::SREM, VT, Expand); - setOperationAction(ISD::UREM, VT, Expand); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); - setOperationAction(ISD::FMA, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::SMUL_LOHI, VT, Expand); - setOperationAction(ISD::MULHS, VT, Expand); - setOperationAction(ISD::UMUL_LOHI, VT, Expand); - setOperationAction(ISD::MULHU, VT, Expand); - setOperationAction(ISD::SDIVREM, VT, Expand); - setOperationAction(ISD::UDIVREM, VT, Expand); - setOperationAction(ISD::CTPOP, VT, Expand); - setOperationAction(ISD::CTTZ, VT, Expand); - setOperationAction(ISD::CTLZ, VT, Expand); - setOperationAction(ISD::ROTL, VT, Expand); - setOperationAction(ISD::ROTR, VT, Expand); - setOperationAction(ISD::BSWAP, VT, Expand); - setOperationAction(ISD::SETCC, VT, Expand); - setOperationAction(ISD::FP_TO_UINT, VT, Expand); - setOperationAction(ISD::FP_TO_SINT, VT, Expand); - setOperationAction(ISD::UINT_TO_FP, VT, Expand); - setOperationAction(ISD::SINT_TO_FP, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); - setOperationAction(ISD::TRUNCATE, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND, VT, Expand); - setOperationAction(ISD::ZERO_EXTEND, VT, Expand); - setOperationAction(ISD::ANY_EXTEND, VT, Expand); - setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction({ + // clang-format on + ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, + + ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT, + ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR, + + ISD::FMA, ISD::FFLOOR, ISD::FCEIL, ISD::FTRUNC, + ISD::FRINT, ISD::FNEARBYINT, ISD::SMUL_LOHI, ISD::MULHS, + ISD::UMUL_LOHI, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, + + ISD::CTPOP, ISD::CTTZ, ISD::CTLZ, ISD::ROTL, + ISD::ROTR, ISD::BSWAP, ISD::SETCC, + + ISD::FP_TO_UINT, ISD::FP_TO_SINT, + ISD::UINT_TO_FP, ISD::SINT_TO_FP, + ISD::SIGN_EXTEND_INREG, ISD::TRUNCATE, + ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, + ISD::ANY_EXTEND, ISD::SELECT_CC + // clang-format off + }, + VT, Expand); for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { setTruncStoreAction(InnerVT, VT, Expand); @@ -889,23 +760,16 @@ addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass); - setOperationAction(ISD::FNEG, MVT::v4f32, Custom); - setOperationAction(ISD::FABS, MVT::v4f32, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); - setOperationAction(ISD::VSELECT, MVT::v4f32, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); - setOperationAction(ISD::SELECT, MVT::v4f32, Custom); + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::BUILD_VECTOR, + ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::EXTRACT_VECTOR_ELT, ISD::SELECT}, + MVT::v4f32, Custom); - setOperationAction(ISD::LOAD, MVT::v2f32, Custom); - setOperationAction(ISD::STORE, MVT::v2f32, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + MVT::v4f32, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { @@ -923,234 +787,165 @@ addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass); - for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8, - MVT::v2i16, MVT::v4i16, MVT::v2i32 }) { - setOperationAction(ISD::SDIV, VT, Custom); - setOperationAction(ISD::SREM, VT, Custom); - setOperationAction(ISD::UDIV, VT, Custom); - setOperationAction(ISD::UREM, VT, Custom); - } - - setOperationAction(ISD::MUL, MVT::v2i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i8, Custom); - setOperationAction(ISD::MUL, MVT::v8i8, Custom); - - setOperationAction(ISD::MUL, MVT::v16i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i32, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); - setOperationAction(ISD::MULHU, MVT::v4i32, Custom); - setOperationAction(ISD::MULHS, MVT::v4i32, Custom); - setOperationAction(ISD::MULHU, MVT::v16i8, Custom); - setOperationAction(ISD::MULHS, MVT::v16i8, Custom); - setOperationAction(ISD::MULHU, MVT::v8i16, Legal); - setOperationAction(ISD::MULHS, MVT::v8i16, Legal); - setOperationAction(ISD::MUL, MVT::v8i16, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal); - - setOperationAction(ISD::SMULO, MVT::v16i8, Custom); - setOperationAction(ISD::UMULO, MVT::v16i8, Custom); - - setOperationAction(ISD::FNEG, MVT::v2f64, Custom); - setOperationAction(ISD::FABS, MVT::v2f64, Custom); - setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom); - - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom); - } - - setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom); - - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); - - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::ABS, VT, Custom); + setOperationAction( + {ISD::SDIV, ISD::SREM, ISD::UDIV, ISD::UREM}, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}, + Custom); + + setOperationAction(ISD::MUL, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i64, MVT::v4i32, + MVT::v8i16, MVT::v16i8}, + Custom); + + setOperationAction({ISD::MULHU, ISD::MULHS}, {MVT::v4i32, MVT::v16i8}, + Custom); + setOperationAction({ISD::MULHU, ISD::MULHS, ISD::MUL}, MVT::v8i16, Legal); + setOperationAction(ISD::AVGCEILU, {MVT::v16i8, MVT::v8i16}, Legal); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v16i8, Custom); + + setOperationAction({ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN}, MVT::v2f64, + Custom); + + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SMAX, ISD::SMIN}, VT, + VT == MVT::v8i16 ? Legal : Custom); + setOperationAction({ISD::UMAX, ISD::UMIN}, VT, + VT == MVT::v16i8 ? Legal : Custom); + } + + setOperationAction({ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, ISD::SSUBSAT}, + {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::USUBSAT, {MVT::v4i32, MVT::v2i64}, Custom); + + setOperationAction(ISD::INSERT_VECTOR_ELT, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}, + Custom); + + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::CTPOP, ISD::ABS}, + VT, Custom); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); } - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - } + setOperationAction({ISD::SCALAR_TO_VECTOR, ISD::BUILD_VECTOR, + ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::EXTRACT_VECTOR_ELT}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Custom); - for (auto VT : { MVT::v2f64, MVT::v2i64 }) { - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); + for (auto VT : {MVT::v2f64, MVT::v2i64}) { + setOperationAction( + {ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::VSELECT}, VT, Custom); if (VT == MVT::v2i64 && !Subtarget.is64Bit()) continue; - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, + VT, Custom); } // Custom lower v2i64 and v2f64 selects. - setOperationAction(ISD::SELECT, MVT::v2f64, Custom); - setOperationAction(ISD::SELECT, MVT::v2i64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i32, Custom); - setOperationAction(ISD::SELECT, MVT::v8i16, Custom); - setOperationAction(ISD::SELECT, MVT::v16i8, Custom); - - setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom); + setOperationAction( + ISD::SELECT, + {MVT::v2f64, MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}, Custom); - // Custom legalize these to avoid over promotion or custom promotion. - for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) { - setOperationAction(ISD::FP_TO_SINT, VT, Custom); - setOperationAction(ISD::FP_TO_UINT, VT, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom); - } + setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT}, MVT::v2i32, + Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom); + // Custom legalize these to avoid over promotion or custom promotion. + setOperationAction( + {ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT}, + {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v4i32, + Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v2i32, + Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v2i32, MVT::v4i32}, Custom); // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion. - setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v2f32, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom); - setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND, ISD::FP_ROUND, + ISD::STRICT_FP_ROUND}, + MVT::v2f32, Custom); // We want to legalize this to an f64 load rather than an i64 load on // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for // store. - setOperationAction(ISD::LOAD, MVT::v2i32, Custom); - setOperationAction(ISD::LOAD, MVT::v4i16, Custom); - setOperationAction(ISD::LOAD, MVT::v8i8, Custom); - setOperationAction(ISD::STORE, MVT::v2i32, Custom); - setOperationAction(ISD::STORE, MVT::v4i16, Custom); - setOperationAction(ISD::STORE, MVT::v8i8, Custom); - - setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); - setOperationAction(ISD::BITCAST, MVT::v4i16, Custom); - setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::v2i32, MVT::v4i16, MVT::v8i8}, Custom); + + setOperationAction(ISD::BITCAST, {MVT::v2i32, MVT::v4i16, MVT::v8i8}, + Custom); if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom); + setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, + {MVT::v2i64, MVT::v4i32, MVT::v8i16}, Custom); setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); + setOperationAction( + ISD::TRUNCATE, + {MVT::v2i8, MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}, + Custom); // In the customized shift lowering, the legal v4i32/v2i64 cases // in AVX2 will be recognized. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - if (VT == MVT::v2i64) continue; - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); + for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) { + setOperationAction({ISD::SRL, ISD::SHL, ISD::SRA}, VT, Custom); + if (VT == MVT::v2i64) + continue; + setOperationAction({ISD::ROTL, ISD::ROTR, ISD::FSHL, ISD::FSHR}, VT, + Custom); } - setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); + setOperationAction({ISD::STRICT_FSQRT, ISD::STRICT_FADD, ISD::STRICT_FSUB, + ISD::STRICT_FMUL, ISD::STRICT_FDIV}, + MVT::v2f64, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { - setOperationAction(ISD::ABS, MVT::v16i8, Legal); - setOperationAction(ISD::ABS, MVT::v8i16, Legal); - setOperationAction(ISD::ABS, MVT::v4i32, Legal); + setOperationAction(ISD::ABS, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v8i16, Custom); - setOperationAction(ISD::CTLZ, MVT::v4i32, Custom); - setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); + setOperationAction( + ISD::CTLZ, {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // These might be better off as horizontal vector ops. - setOperationAction(ISD::ADD, MVT::i16, Custom); - setOperationAction(ISD::ADD, MVT::i32, Custom); - setOperationAction(ISD::SUB, MVT::i16, Custom); - setOperationAction(ISD::SUB, MVT::i32, Custom); + setOperationAction({ISD::ADD, ISD::SUB}, {MVT::i16, MVT::i32}, Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { - setOperationAction(ISD::FFLOOR, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal); - setOperationAction(ISD::FCEIL, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal); - setOperationAction(ISD::FTRUNC, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal); - setOperationAction(ISD::FRINT, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal); - setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal); - setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + RoundedTy, Legal); setOperationAction(ISD::FROUND, RoundedTy, Custom); } - setOperationAction(ISD::SMAX, MVT::v16i8, Legal); - setOperationAction(ISD::SMAX, MVT::v4i32, Legal); - setOperationAction(ISD::UMAX, MVT::v8i16, Legal); - setOperationAction(ISD::UMAX, MVT::v4i32, Legal); - setOperationAction(ISD::SMIN, MVT::v16i8, Legal); - setOperationAction(ISD::SMIN, MVT::v4i32, Legal); - setOperationAction(ISD::UMIN, MVT::v8i16, Legal); - setOperationAction(ISD::UMIN, MVT::v4i32, Legal); + setOperationAction({ISD::SMAX, ISD::SMIN}, {MVT::v16i8, MVT::v4i32}, Legal); + setOperationAction({ISD::UMAX, ISD::UMIN}, {MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom); - setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom); - setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom); + setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::v2i64, Custom); // FIXME: Do we need to handle scalar-to-vector here? setOperationAction(ISD::MUL, MVT::v4i32, Legal); @@ -1161,10 +956,9 @@ // SSE41 brings specific instructions for doing vector sign extend even in // cases where we don't have SRA. - for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal); - } + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v8i16, MVT::v4i32, MVT::v2i64}, Legal); // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { @@ -1179,33 +973,32 @@ if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) { // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can // do the pre and post work in the vector domain. - setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::v4i64, + Custom); // We need to mark SINT_TO_FP as Custom even though we want to expand it // so that DAG combine doesn't try to turn it into uint_to_fp. - setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP}, MVT::v4i64, + Custom); } } - if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) { - setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom); - } + if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) + setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom); if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) { - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - } + setOperationAction({ISD::ROTL, ISD::ROTR}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + Custom); // XOP can efficiently perform BITREVERSE with VPPERM. - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); + setOperationAction(ISD::BITREVERSE, + {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Custom); - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); + setOperationAction(ISD::BITREVERSE, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { @@ -1225,24 +1018,15 @@ : &X86::VR256RegClass); for (auto VT : { MVT::v8f32, MVT::v4f64 }) { - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - setOperationAction(ISD::FROUNDEVEN, VT, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + VT, Legal); - setOperationAction(ISD::FROUND, VT, Custom); - - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); + setOperationAction({ISD::FROUND, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN}, + VT, Custom); } // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted @@ -1259,17 +1043,10 @@ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal); + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v32i1, Custom); @@ -1277,120 +1054,84 @@ // In the customized shift lowering, the legal v8i32/v4i64 cases // in AVX2 will be recognized. for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); + setOperationAction({ISD::SRL, ISD::SHL, ISD::SRA}, VT, Custom); if (VT == MVT::v4i64) continue; - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); + setOperationAction({ISD::ROTL, ISD::ROTR, ISD::FSHL, ISD::FSHR}, VT, + Custom); } // These types need custom splitting if their input is a 128-bit vector. - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, + {MVT::v8i64, MVT::v16i32}, Custom); - setOperationAction(ISD::SELECT, MVT::v4f64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i64, Custom); - setOperationAction(ISD::SELECT, MVT::v8i32, Custom); - setOperationAction(ISD::SELECT, MVT::v16i16, Custom); - setOperationAction(ISD::SELECT, MVT::v32i8, Custom); - setOperationAction(ISD::SELECT, MVT::v8f32, Custom); + setOperationAction(ISD::SELECT, + {MVT::v4f64, MVT::v4i64, MVT::v8i32, MVT::v16i16, + MVT::v32i8, MVT::v8f32}, + Custom); - for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - } + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + {MVT::v16i16, MVT::v8i32, MVT::v4i64}, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Custom); + setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::CTLZ, VT, Custom); + for (auto VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}) { + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::CTPOP, ISD::CTLZ}, + VT, Custom); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); - } + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); + } if (Subtarget.hasAnyFMA()) { - for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, - MVT::v2f64, MVT::v4f64 }) { - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); - } + setOperationAction( + {ISD::FMA, ISD::STRICT_FMA}, + {MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Legal); } - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { - setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom); - } + setOperationAction({ISD::ADD, ISD::SUB}, + {MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + HasInt256 ? Legal : Custom); setOperationAction(ISD::MUL, MVT::v4i64, Custom); - setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom); + setOperationAction(ISD::MUL, {MVT::v8i32, MVT::v16i16}, + HasInt256 ? Legal : Custom); setOperationAction(ISD::MUL, MVT::v32i8, Custom); - setOperationAction(ISD::MULHU, MVT::v8i32, Custom); - setOperationAction(ISD::MULHS, MVT::v8i32, Custom); - setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MULHU, MVT::v32i8, Custom); - setOperationAction(ISD::MULHS, MVT::v32i8, Custom); - setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom); - - setOperationAction(ISD::SMULO, MVT::v32i8, Custom); - setOperationAction(ISD::UMULO, MVT::v32i8, Custom); - - setOperationAction(ISD::ABS, MVT::v4i64, Custom); - setOperationAction(ISD::SMAX, MVT::v4i64, Custom); - setOperationAction(ISD::UMAX, MVT::v4i64, Custom); - setOperationAction(ISD::SMIN, MVT::v4i64, Custom); - setOperationAction(ISD::UMIN, MVT::v4i64, Custom); - - setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom); - setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom); - setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom); - - for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { - setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom); - } - - for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); - } + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v8i32, Custom); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v16i16, + HasInt256 ? Legal : Custom); + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v32i8, Custom); + setOperationAction(ISD::AVGCEILU, {MVT::v16i16, MVT::v32i8}, + HasInt256 ? Legal : Custom); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v32i8, Custom); + + setOperationAction({ISD::ABS, ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN}, + MVT::v4i64, Custom); + + setOperationAction({ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, ISD::SSUBSAT}, + {MVT::v32i8, MVT::v16i16}, HasInt256 ? Legal : Custom); + setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, {MVT::v8i32, MVT::v4i64}, + Custom); + + setOperationAction({ISD::ABS, ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN}, + {MVT::v32i8, MVT::v16i16, MVT::v8i32}, + HasInt256 ? Legal : Custom); + + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v16i16, MVT::v8i32, MVT::v4i64}, Custom); if (HasInt256) { // The custom lowering for UINT_TO_FP for v8i32 becomes interesting // when we have a 256bit-wide blend with immediate. - setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom); + setOperationAction({ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, MVT::v8i32, + Custom); // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { @@ -1411,31 +1152,27 @@ // Extract subvector is special because the value type // (result) is 128-bit but the source is 256-bit wide. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v4f32, MVT::v2f64 }) { - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); - } + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v4f32, MVT::v2f64}, + Legal); // Custom lower several nodes for 256-bit types. for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64 }) { - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::STORE, VT, Custom); + setOperationAction({ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, ISD::VSELECT, + ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, + ISD::SCALAR_TO_VECTOR, ISD::CONCAT_VECTORS, + ISD::STORE}, + VT, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); } if (HasInt256) { setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MGATHER, MVT::v2f32, Custom); - setOperationAction(ISD::MGATHER, MVT::v2i32, Custom); + setOperationAction(ISD::MGATHER, {MVT::v2f32, MVT::v2i32}, Custom); for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) @@ -1453,9 +1190,9 @@ addRegisterClass(MVT::v8i1, &X86::VK8RegClass); addRegisterClass(MVT::v16i1, &X86::VK16RegClass); - setOperationAction(ISD::SELECT, MVT::v1i1, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom); - setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom); + setOperationAction( + {ISD::SELECT, ISD::EXTRACT_VECTOR_ELT, ISD::BUILD_VECTOR}, MVT::v1i1, + Custom); setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32); setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32); @@ -1465,51 +1202,33 @@ setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32); setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32); setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32); - setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v2i1, Custom); // There is no byte sized k-register load or store without AVX512DQ. - if (!Subtarget.hasDQI()) { - setOperationAction(ISD::LOAD, MVT::v1i1, Custom); - setOperationAction(ISD::LOAD, MVT::v2i1, Custom); - setOperationAction(ISD::LOAD, MVT::v4i1, Custom); - setOperationAction(ISD::LOAD, MVT::v8i1, Custom); - - setOperationAction(ISD::STORE, MVT::v1i1, Custom); - setOperationAction(ISD::STORE, MVT::v2i1, Custom); - setOperationAction(ISD::STORE, MVT::v4i1, Custom); - setOperationAction(ISD::STORE, MVT::v8i1, Custom); - } + if (!Subtarget.hasDQI()) + setOperationAction({ISD::LOAD, ISD::STORE}, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { - setOperationAction(ISD::SIGN_EXTEND, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND, VT, Custom); - setOperationAction(ISD::ANY_EXTEND, VT, Custom); - } - - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) - setOperationAction(ISD::VSELECT, VT, Expand); + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, + Custom); - for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) { - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::TRUNCATE, VT, Custom); + setOperationAction(ISD::VSELECT, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1}, + Expand); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - } + setOperationAction({ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, + ISD::SELECT, ISD::TRUNCATE, ISD::BUILD_VECTOR, + ISD::CONCAT_VECTORS, ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_SUBVECTOR, ISD::INSERT_VECTOR_ELT, + ISD::VECTOR_SHUFFLE}, + {MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1}, Custom); - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 }) - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); } // This block controls legalization for 512-bit operations with 32/64 bit @@ -1536,10 +1255,8 @@ } for (MVT VT : { MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); + setOperationAction({ISD::FNEG, ISD::FABS}, VT, Custom); + setOperationAction({ISD::FMA, ISD::STRICT_FMA}, VT, Legal); setOperationAction(ISD::FCOPYSIGN, VT, Custom); } @@ -1549,25 +1266,15 @@ setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32); setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32); } - setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal); - - setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT, + ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v16i32, Legal); + + setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, + {MVT::v16f32, MVT::v8f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal); @@ -1582,135 +1289,92 @@ // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE // to 512-bit rather than use the AVX2 instructions so that we can use // k-masks. - if (!Subtarget.hasVLX()) { - for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, - MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) { - setOperationAction(ISD::MLOAD, VT, Custom); - setOperationAction(ISD::MSTORE, VT, Custom); - } - } + if (!Subtarget.hasVLX()) + setOperationAction({ISD::MLOAD, ISD::MSTORE}, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, + MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal); - setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal); + setOperationAction(ISD::TRUNCATE, {MVT::v8i32, MVT::v16i16}, Legal); setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom); setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); - - if (HasBWI) { + setOperationAction({ISD::ZERO_EXTEND, ISD::ANY_EXTEND, ISD::SIGN_EXTEND}, + {MVT::v32i16, MVT::v16i32, MVT::v8i64}, Custom); + + if (HasBWI) // Extends from v64i1 masks to 512-bit vectors. - setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom); - } + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + MVT::v64i8, Custom); for (auto VT : { MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - setOperationAction(ISD::FROUNDEVEN, VT, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal); + setOperationAction({ISD::FFLOOR, ISD::STRICT_FFLOOR, ISD::FCEIL, + ISD::STRICT_FCEIL, ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, ISD::FNEARBYINT, + ISD::STRICT_FNEARBYINT, ISD::FROUNDEVEN, + ISD::STRICT_FROUNDEVEN}, + VT, Legal); setOperationAction(ISD::FROUND, VT, Custom); } - for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) { - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); - } + setOperationAction( + {ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG}, + {MVT::v32i16, MVT::v16i32, MVT::v8i64}, Custom); - setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom); + setOperationAction({ISD::ADD, ISD::SUB}, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v8i64, Custom); + setOperationAction(ISD::MUL, MVT::v8i64, Custom); setOperationAction(ISD::MUL, MVT::v16i32, Legal); setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v64i8, Custom); - - setOperationAction(ISD::MULHU, MVT::v16i32, Custom); - setOperationAction(ISD::MULHS, MVT::v16i32, Custom); - setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::MULHS, MVT::v64i8, Custom); - setOperationAction(ISD::MULHU, MVT::v64i8, Custom); - setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom); - - setOperationAction(ISD::SMULO, MVT::v64i8, Custom); - setOperationAction(ISD::UMULO, MVT::v64i8, Custom); - - setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom); - - for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::SRL, VT, Custom); - setOperationAction(ISD::SHL, VT, Custom); - setOperationAction(ISD::SRA, VT, Custom); - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::MUL, MVT::v64i8, Custom); + + setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::v16i32, Custom); + setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::v32i16, + HasBWI ? Legal : Custom); + setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::v64i8, Custom); + setOperationAction(ISD::AVGCEILU, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); + + setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::v64i8, Custom); + + setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom); + + for (auto VT : {MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64}) { + setOperationAction( + {ISD::SRL, ISD::SHL, ISD::SRA, ISD::ROTL, ISD::ROTR, ISD::SETCC}, VT, + Custom); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. - setCondCodeAction(ISD::SETLT, VT, Custom); - setCondCodeAction(ISD::SETLE, VT, Custom); + setCondCodeAction({ISD::SETLT, ISD::SETLE}, VT, Custom); } for (auto VT : { MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - setOperationAction(ISD::CTPOP, VT, Custom); - setOperationAction(ISD::STRICT_FSETCC, VT, Custom); - setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, ISD::ABS}, + VT, Legal); + setOperationAction({ISD::CTPOP, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, + VT, Custom); } for (auto VT : { MVT::v64i8, MVT::v32i16 }) { setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom); setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom); setOperationAction(ISD::CTLZ, VT, Custom); - setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom); - } - - setOperationAction(ISD::FSHL, MVT::v64i8, Custom); - setOperationAction(ISD::FSHR, MVT::v64i8, Custom); - setOperationAction(ISD::FSHL, MVT::v32i16, Custom); - setOperationAction(ISD::FSHR, MVT::v32i16, Custom); - setOperationAction(ISD::FSHL, MVT::v16i32, Custom); - setOperationAction(ISD::FSHR, MVT::v16i32, Custom); + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, + ISD::UADDSAT, ISD::SADDSAT, ISD::USUBSAT, + ISD::SSUBSAT}, + VT, HasBWI ? Legal : Custom); + } + + setOperationAction({ISD::FSHL, ISD::FSHR}, + {MVT::v64i8, MVT::v32i16, MVT::v16i32}, Custom); if (Subtarget.hasDQI()) { - setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal); - setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal); - setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v8i64, Legal); setOperationAction(ISD::MUL, MVT::v8i64, Legal); } @@ -1738,43 +1402,32 @@ MVT::v16f32, MVT::v8f64 }) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::VSELECT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction({ISD::SELECT, ISD::VSELECT, ISD::BUILD_VECTOR, + ISD::EXTRACT_VECTOR_ELT, ISD::VECTOR_SHUFFLE, + ISD::SCALAR_TO_VECTOR, ISD::INSERT_VECTOR_ELT}, + VT, Custom); } for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) { - setOperationAction(ISD::MLOAD, VT, Legal); - setOperationAction(ISD::MSTORE, VT, Legal); - setOperationAction(ISD::MGATHER, VT, Custom); - setOperationAction(ISD::MSCATTER, VT, Custom); - } - if (HasBWI) { - for (auto VT : { MVT::v64i8, MVT::v32i16 }) { - setOperationAction(ISD::MLOAD, VT, Legal); - setOperationAction(ISD::MSTORE, VT, Legal); - } - } else { - setOperationAction(ISD::STORE, MVT::v32i16, Custom); - setOperationAction(ISD::STORE, MVT::v64i8, Custom); + setOperationAction({ISD::MLOAD, ISD::MSTORE}, VT, Legal); + setOperationAction({ISD::MGATHER, ISD::MSCATTER}, VT, Custom); } + if (HasBWI) + setOperationAction({ISD::MLOAD, ISD::MSTORE}, {MVT::v64i8, MVT::v32i16}, + Legal); + else + setOperationAction(ISD::STORE, {MVT::v32i16, MVT::v64i8}, Custom); if (Subtarget.hasVBMI2()) { - for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v16i16, MVT::v8i32, MVT::v4i64, - MVT::v32i16, MVT::v16i32, MVT::v8i64 }) { - setOperationAction(ISD::FSHL, VT, Custom); - setOperationAction(ISD::FSHR, VT, Custom); - } + setOperationAction({ISD::FSHL, ISD::FSHR}, + {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v16i16, + MVT::v8i32, MVT::v4i64, MVT::v32i16, MVT::v16i32, + MVT::v8i64}, + Custom); setOperationAction(ISD::ROTL, MVT::v32i16, Custom); - setOperationAction(ISD::ROTR, MVT::v8i16, Custom); - setOperationAction(ISD::ROTR, MVT::v16i16, Custom); - setOperationAction(ISD::ROTR, MVT::v32i16, Custom); + setOperationAction(ISD::ROTR, {MVT::v8i16, MVT::v16i16, MVT::v32i16}, + Custom); } }// useAVX512Regs @@ -1785,23 +1438,11 @@ // These operations are handled on non-VLX by artificially widening in // isel patterns. - setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, + setOperationAction({ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v8i32, MVT::v4i32}, Subtarget.hasVLX() ? Legal : Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom); if (Subtarget.hasDQI()) { // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion. @@ -1810,65 +1451,44 @@ isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!"); // v2i64 FP_TO_S/UINT(v2f32) custom conversion. - setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + MVT::v2f32, Custom); } - for (auto VT : { MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); - setOperationAction(ISD::ABS, VT, Legal); - } + setOperationAction({ISD::SMAX, ISD::UMAX, ISD::SMIN, ISD::UMIN, ISD::ABS}, + {MVT::v2i64, MVT::v4i64}, Legal); - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::ROTL, VT, Custom); - setOperationAction(ISD::ROTR, VT, Custom); - } + setOperationAction({ISD::ROTL, ISD::ROTR}, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, + Custom); // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom); - setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom); + setOperationAction(ISD::MSCATTER, {MVT::v2f32, MVT::v2i32}, Custom); - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, - MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) - setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::MSCATTER, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, + MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Custom); if (Subtarget.hasDQI()) { for (auto VT : { MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::SINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::UINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_SINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::FP_TO_UINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, VT, - Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, VT, - Subtarget.hasVLX() ? Legal : Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, + ISD::STRICT_SINT_TO_FP, ISD::STRICT_UINT_TO_FP, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, + ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}, + VT, Subtarget.hasVLX() ? Legal : Custom); setOperationAction(ISD::MUL, VT, Legal); } } - if (Subtarget.hasCDI()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::CTLZ, VT, Legal); - } - } // Subtarget.hasCDI() + if (Subtarget.hasCDI()) + setOperationAction( + ISD::CTLZ, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); - if (Subtarget.hasVPOPCNTDQ()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) - setOperationAction(ISD::CTPOP, VT, Legal); - } + if (Subtarget.hasVPOPCNTDQ()) + setOperationAction( + ISD::CTPOP, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); } // This block control legalization of v32i1/v64i1 which are available with @@ -1880,121 +1500,89 @@ for (auto VT : { MVT::v32i1, MVT::v64i1 }) { setOperationAction(ISD::VSELECT, VT, Expand); - setOperationAction(ISD::TRUNCATE, VT, Custom); - setOperationAction(ISD::SETCC, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); - setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); - setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); + setOperationAction({ISD::TRUNCATE, ISD::SETCC, ISD::EXTRACT_VECTOR_ELT, + ISD::INSERT_VECTOR_ELT, ISD::SELECT, + ISD::BUILD_VECTOR, ISD::VECTOR_SHUFFLE, + ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR}, + VT, Custom); } - for (auto VT : { MVT::v16i1, MVT::v32i1 }) - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, {MVT::v16i1, MVT::v32i1}, + Custom); // Extends from v32i1 masks to 256-bit vectors. - setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom); + setOperationAction({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, ISD::ANY_EXTEND}, + MVT::v32i8, Custom); - for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { - setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); - setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); - } + setOperationAction({ISD::MLOAD, ISD::MSTORE}, + {MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16}, + Subtarget.hasVLX() ? Legal : Custom); // These operations are handled on non-VLX by artificially widening in // isel patterns. // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? - if (Subtarget.hasBITALG()) { - for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 }) - setOperationAction(ISD::CTPOP, VT, Legal); - } + if (Subtarget.hasBITALG()) + setOperationAction( + ISD::CTPOP, {MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16}, Legal); } if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) { - auto setGroup = [&] (MVT VT) { - setOperationAction(ISD::FADD, VT, Legal); - setOperationAction(ISD::STRICT_FADD, VT, Legal); - setOperationAction(ISD::FSUB, VT, Legal); - setOperationAction(ISD::STRICT_FSUB, VT, Legal); - setOperationAction(ISD::FMUL, VT, Legal); - setOperationAction(ISD::STRICT_FMUL, VT, Legal); - setOperationAction(ISD::FDIV, VT, Legal); - setOperationAction(ISD::STRICT_FDIV, VT, Legal); - setOperationAction(ISD::FSQRT, VT, Legal); - setOperationAction(ISD::STRICT_FSQRT, VT, Legal); - - setOperationAction(ISD::FFLOOR, VT, Legal); - setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); - setOperationAction(ISD::FCEIL, VT, Legal); - setOperationAction(ISD::STRICT_FCEIL, VT, Legal); - setOperationAction(ISD::FTRUNC, VT, Legal); - setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); - setOperationAction(ISD::FRINT, VT, Legal); - setOperationAction(ISD::STRICT_FRINT, VT, Legal); - setOperationAction(ISD::FNEARBYINT, VT, Legal); - setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); - - setOperationAction(ISD::LOAD, VT, Legal); - setOperationAction(ISD::STORE, VT, Legal); - - setOperationAction(ISD::FMA, VT, Legal); - setOperationAction(ISD::STRICT_FMA, VT, Legal); - setOperationAction(ISD::VSELECT, VT, Legal); - setOperationAction(ISD::BUILD_VECTOR, VT, Custom); - setOperationAction(ISD::SELECT, VT, Custom); - - setOperationAction(ISD::FNEG, VT, Custom); - setOperationAction(ISD::FABS, VT, Custom); - setOperationAction(ISD::FCOPYSIGN, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + auto setGroup = [&](MVT VT) { + setOperationAction({ISD::FADD, ISD::STRICT_FADD, + ISD::FSUB, ISD::STRICT_FSUB, + ISD::FMUL, ISD::STRICT_FMUL, + ISD::FDIV, ISD::STRICT_FDIV, + ISD::FSQRT, ISD::STRICT_FSQRT, + ISD::FFLOOR, ISD::STRICT_FFLOOR, + ISD::FCEIL, ISD::STRICT_FCEIL, + ISD::FTRUNC, ISD::STRICT_FTRUNC, + ISD::FRINT, ISD::STRICT_FRINT, + ISD::FNEARBYINT, ISD::STRICT_FNEARBYINT, + ISD::LOAD, ISD::STORE, + ISD::FMA, ISD::STRICT_FMA, + ISD::VSELECT}, + VT, Legal); + + setOperationAction({ISD::BUILD_VECTOR, ISD::SELECT, ISD::FNEG, ISD::FABS, + ISD::FCOPYSIGN, ISD::EXTRACT_VECTOR_ELT, + ISD::VECTOR_SHUFFLE}, + VT, Custom); }; // AVX512_FP16 scalar operations setGroup(MVT::f16); addRegisterClass(MVT::f16, &X86::FR16XRegClass); - setOperationAction(ISD::FREM, MVT::f16, Promote); - setOperationAction(ISD::STRICT_FREM, MVT::f16, Promote); - setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); - setOperationAction(ISD::BR_CC, MVT::f16, Expand); - setOperationAction(ISD::SETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); - setOperationAction(ISD::FROUND, MVT::f16, Custom); + setOperationAction({ISD::FREM, ISD::STRICT_FREM}, MVT::f16, Promote); + setOperationAction({ISD::SELECT_CC, ISD::BR_CC}, MVT::f16, Expand); + setOperationAction( + {ISD::SETCC, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS, ISD::FROUND}, + MVT::f16, Custom); setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote); - setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal); - setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Legal); - setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); + setOperationAction({ISD::FROUNDEVEN, ISD::STRICT_FROUNDEVEN}, MVT::f16, + Legal); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, MVT::f16, Custom); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); - if (isTypeLegal(MVT::f80)) { - setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom); - } + if (isTypeLegal(MVT::f80)) + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, MVT::f80, + Custom); - setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand); - setCondCodeAction(ISD::SETUNE, MVT::f16, Expand); + setCondCodeAction({ISD::SETOEQ, ISD::SETUNE}, MVT::f16, Expand); if (Subtarget.useAVX512Regs()) { setGroup(MVT::v32f16); addRegisterClass(MVT::v32f16, &X86::VR512RegClass); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v32i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v32i16, Legal); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + MVT::v32i16, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32f16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v32i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v32i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v32i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v32i16, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + MVT::v32i16, Custom); setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i8, MVT::v32i16); setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8, MVT::v32i16); @@ -2015,8 +1603,8 @@ setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Legal); setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal); - setOperationAction(ISD::STRICT_FSETCC, MVT::v32i1, Custom); - setOperationAction(ISD::STRICT_FSETCCS, MVT::v32i1, Custom); + setOperationAction({ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}, MVT::v32i1, + Custom); } if (Subtarget.hasVLX()) { @@ -2027,25 +1615,19 @@ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal); - - setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v16i16, MVT::v8i16}, Legal); + + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + MVT::v8i16, Custom); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal); // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v8f16, MVT::v16f16}, + Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal); @@ -2057,8 +1639,7 @@ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal); // Need to custom widen these to prevent scalarization. - setOperationAction(ISD::LOAD, MVT::v4f16, Custom); - setOperationAction(ISD::STORE, MVT::v4f16, Custom); + setOperationAction({ISD::LOAD, ISD::STORE}, MVT::v4f16, Custom); } // Support fp16 0 immediate @@ -2085,38 +1666,23 @@ if (Subtarget.hasFP16()) { // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64 - setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom); + setOperationAction({ISD::FP_TO_SINT, ISD::STRICT_FP_TO_SINT, + ISD::FP_TO_UINT, ISD::STRICT_FP_TO_UINT}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16 - setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom); + setOperationAction({ISD::SINT_TO_FP, ISD::STRICT_SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_UINT_TO_FP}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16 - setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom); + setOperationAction({ISD::FP_ROUND, ISD::STRICT_FP_ROUND}, + {MVT::v2f16, MVT::v4f16}, Custom); // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32 - setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom); + setOperationAction({ISD::FP_EXTEND, ISD::STRICT_FP_EXTEND}, + {MVT::v2f16, MVT::v4f16}, Custom); } - setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i32, MVT::v8i64, MVT::v16i64}, + Custom); } if (Subtarget.hasAMXTILE()) { @@ -2124,12 +1690,11 @@ } // We want to custom lower some of our intrinsics. - setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); - setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); - if (!Subtarget.is64Bit()) { + setOperationAction( + {ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, + MVT::Other, Custom); + if (!Subtarget.is64Bit()) setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); - } // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't // handle type legalization for these operations here. @@ -2141,27 +1706,21 @@ if (VT == MVT::i64 && !Subtarget.is64Bit()) continue; // Add/Sub/Mul with overflow operations are custom lowered. - setOperationAction(ISD::SADDO, VT, Custom); - setOperationAction(ISD::UADDO, VT, Custom); - setOperationAction(ISD::SSUBO, VT, Custom); - setOperationAction(ISD::USUBO, VT, Custom); - setOperationAction(ISD::SMULO, VT, Custom); - setOperationAction(ISD::UMULO, VT, Custom); + setOperationAction({ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO, + ISD::SMULO, ISD::UMULO}, + VT, Custom); // Support carry in as value rather than glue. - setOperationAction(ISD::ADDCARRY, VT, Custom); - setOperationAction(ISD::SUBCARRY, VT, Custom); - setOperationAction(ISD::SETCCCARRY, VT, Custom); - setOperationAction(ISD::SADDO_CARRY, VT, Custom); - setOperationAction(ISD::SSUBO_CARRY, VT, Custom); + setOperationAction({ISD::ADDCARRY, ISD::SUBCARRY, ISD::SETCCCARRY, + ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, + VT, Custom); } if (!Subtarget.is64Bit()) { // These libcalls are not available in 32-bit. - setLibcallName(RTLIB::SHL_I128, nullptr); - setLibcallName(RTLIB::SRL_I128, nullptr); - setLibcallName(RTLIB::SRA_I128, nullptr); - setLibcallName(RTLIB::MUL_I128, nullptr); + setLibcallName( + {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128}, + nullptr); // The MULO libcall is not part of libgcc, only compiler-rt. setLibcallName(RTLIB::MULO_I64, nullptr); } @@ -2171,25 +1730,17 @@ // Combine sin / cos into _sincos_stret if it is available. if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { - setOperationAction(ISD::FSINCOS, MVT::f64, Custom); - setOperationAction(ISD::FSINCOS, MVT::f32, Custom); - } - - if (Subtarget.isTargetWin64()) { - setOperationAction(ISD::SDIV, MVT::i128, Custom); - setOperationAction(ISD::UDIV, MVT::i128, Custom); - setOperationAction(ISD::SREM, MVT::i128, Custom); - setOperationAction(ISD::UREM, MVT::i128, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom); + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Custom); } + if (Subtarget.isTargetWin64()) + setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, + ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, + ISD::UINT_TO_FP, ISD::STRICT_FP_TO_SINT, + ISD::STRICT_FP_TO_UINT, ISD::STRICT_SINT_TO_FP, + ISD::STRICT_UINT_TO_FP}, + MVT::i128, Custom); + // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)` // is. We should promote the value to 64-bits to solve this. // This is what the CRT headers do - `fmodf` is an inline header