diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -198,16 +198,13 @@ // Integer absolute. if (Subtarget.canUseCMOV()) { - setOperationAction(ISD::ABS , MVT::i16 , Custom); - setOperationAction(ISD::ABS , MVT::i32 , Custom); + setOperationAction(ISD::ABS, {MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::ABS , MVT::i64 , Custom); } // Signed saturation subtraction. - setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom); - setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom); + setOperationAction(ISD::SSUBSAT, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom); @@ -216,8 +213,7 @@ // For slow shld targets we only lower for code size. LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal; - setOperationAction(ShiftOp , MVT::i8 , Custom); - setOperationAction(ShiftOp , MVT::i16 , Custom); + setOperationAction(ShiftOp, {MVT::i8, MVT::i16}, Custom); setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction); if (Subtarget.is64Bit()) setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction); @@ -226,10 +222,8 @@ if (!Subtarget.useSoftFloat()) { // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this // operation. - setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote); - setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote); + setOperationAction(ISD::UINT_TO_FP, {MVT::i8, MVT::i16}, Promote); + setOperationAction(ISD::STRICT_UINT_TO_FP, {MVT::i8, MVT::i16}, Promote); // We have an algorithm for SSE2, and we turn this into a 64-bit // FILD or VCVTUSI2SS/SD for other targets. setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); @@ -260,10 +254,8 @@ setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); + setOperationAction(ISD::FP_TO_SINT, {MVT::i16, MVT::i32}, Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, {MVT::i16, MVT::i32}, Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); @@ -277,15 +269,11 @@ setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); // FIXME: This doesn't generate invalid exception when it should. PR44019. setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); + setOperationAction(ISD::FP_TO_UINT, {MVT::i32, MVT::i64}, Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, {MVT::i32, MVT::i64}, Custom); - setOperationAction(ISD::LRINT, MVT::f32, Custom); - setOperationAction(ISD::LRINT, MVT::f64, Custom); - setOperationAction(ISD::LLRINT, MVT::f32, Custom); - setOperationAction(ISD::LLRINT, MVT::f64, Custom); + setOperationAction(ISD::LRINT, {MVT::f32, MVT::f64}, Custom); + setOperationAction(ISD::LLRINT, {MVT::f32, MVT::f64}, Custom); if (!Subtarget.is64Bit()) { setOperationAction(ISD::LRINT, MVT::i64, Custom); @@ -307,13 +295,11 @@ } // Handle address space casts between mixed sized pointers. - setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); - setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); + setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom); // TODO: when we have SSE, these could be more efficient, by using movd/movq. if (!Subtarget.hasSSE2()) { - setOperationAction(ISD::BITCAST , MVT::f32 , Expand); - setOperationAction(ISD::BITCAST , MVT::i32 , Expand); + setOperationAction(ISD::BITCAST, {MVT::f32, MVT::i32}, Expand); if (Subtarget.is64Bit()) { setOperationAction(ISD::BITCAST , MVT::f64 , Expand); // Without SSE, i64->f64 goes through memory. @@ -350,14 +336,11 @@ } if (Subtarget.is64Bit()) setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); - setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i16, MVT::i8}, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); - setOperationAction(ISD::FREM , MVT::f32 , Expand); - setOperationAction(ISD::FREM , MVT::f64 , Expand); - setOperationAction(ISD::FREM , MVT::f80 , Expand); - setOperationAction(ISD::FREM , MVT::f128 , Expand); + setOperationAction(ISD::FREM, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) { setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); @@ -374,10 +357,8 @@ // is enabled. setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTTZ, MVT::i16, Custom); - setOperationAction(ISD::CTTZ , MVT::i32 , Custom); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal); + setOperationAction(ISD::CTTZ, {MVT::i16, MVT::i32}, Custom); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, {MVT::i16, MVT::i32}, Legal); if (Subtarget.is64Bit()) { setOperationAction(ISD::CTTZ , MVT::i64 , Custom); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal); @@ -407,9 +388,7 @@ Op, MVT::f32, (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand); // There's never any support for operations beyond MVT::f32. - setOperationAction(Op, MVT::f64, Expand); - setOperationAction(Op, MVT::f80, Expand); - setOperationAction(Op, MVT::f128, Expand); + setOperationAction(Op, {MVT::f64, MVT::f80, MVT::f128}, Expand); } setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); @@ -421,9 +400,7 @@ setTruncStoreAction(MVT::f80, MVT::f16, Expand); setTruncStoreAction(MVT::f128, MVT::f16, Expand); - setOperationAction(ISD::PARITY, MVT::i8, Custom); - setOperationAction(ISD::PARITY, MVT::i16, Custom); - setOperationAction(ISD::PARITY, MVT::i32, Custom); + setOperationAction(ISD::PARITY, {MVT::i8, MVT::i16, MVT::i32}, Custom); if (Subtarget.is64Bit()) setOperationAction(ISD::PARITY, MVT::i64, Custom); if (Subtarget.hasPOPCNT()) { @@ -432,9 +409,7 @@ // on the dest that popcntl hasn't had since Cannon Lake. setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32); } else { - setOperationAction(ISD::CTPOP , MVT::i8 , Expand); - setOperationAction(ISD::CTPOP , MVT::i16 , Expand); - setOperationAction(ISD::CTPOP , MVT::i32 , Expand); + setOperationAction(ISD::CTPOP, {MVT::i8, MVT::i16, MVT::i32}, Expand); if (Subtarget.is64Bit()) setOperationAction(ISD::CTPOP , MVT::i64 , Expand); else @@ -523,8 +498,7 @@ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); } - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); - setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); + setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, {MVT::i32, MVT::i64}, Custom); setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); @@ -587,8 +561,7 @@ } // Lower this to MOVMSK plus an AND. - setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); - setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); + setOperationAction(ISD::FGETSIGN, {MVT::i64, MVT::i32}, Custom); } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() && (UseX87 || Is64Bit)) { @@ -661,23 +634,16 @@ addLegalFPImmediate(APFloat(+0.0)); // xorpd } // Handle constrained floating-point operations of scalar. - setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FADD, {MVT::f32, MVT::f64}, Legal); + setOperationAction(ISD::STRICT_FSUB, {MVT::f32, MVT::f64}, Legal); + setOperationAction(ISD::STRICT_FMUL, {MVT::f32, MVT::f64}, Legal); + setOperationAction(ISD::STRICT_FDIV, {MVT::f32, MVT::f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FP_ROUND, {MVT::f32, MVT::f64}, Legal); + setOperationAction(ISD::STRICT_FSQRT, {MVT::f32, MVT::f64}, Legal); // We don't support FMA. - setOperationAction(ISD::FMA, MVT::f64, Expand); - setOperationAction(ISD::FMA, MVT::f32, Expand); + setOperationAction(ISD::FMA, {MVT::f64, MVT::f32}, Expand); // f80 always uses X87. if (UseX87) { @@ -787,10 +753,8 @@ } // Always use a library call for pow. - setOperationAction(ISD::FPOW , MVT::f32 , Expand); - setOperationAction(ISD::FPOW , MVT::f64 , Expand); - setOperationAction(ISD::FPOW , MVT::f80 , Expand); - setOperationAction(ISD::FPOW , MVT::f128 , Expand); + setOperationAction(ISD::FPOW, {MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); setOperationAction(ISD::FLOG, MVT::f80, Expand); setOperationAction(ISD::FLOG2, MVT::f80, Expand); @@ -931,22 +895,15 @@ setOperationAction(ISD::UREM, VT, Custom); } - setOperationAction(ISD::MUL, MVT::v2i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i8, Custom); - setOperationAction(ISD::MUL, MVT::v8i8, Custom); + setOperationAction(ISD::MUL, {MVT::v2i8, MVT::v4i8, MVT::v8i8}, Custom); - setOperationAction(ISD::MUL, MVT::v16i8, Custom); - setOperationAction(ISD::MUL, MVT::v4i32, Custom); - setOperationAction(ISD::MUL, MVT::v2i64, Custom); - setOperationAction(ISD::MULHU, MVT::v4i32, Custom); - setOperationAction(ISD::MULHS, MVT::v4i32, Custom); - setOperationAction(ISD::MULHU, MVT::v16i8, Custom); - setOperationAction(ISD::MULHS, MVT::v16i8, Custom); + setOperationAction(ISD::MUL, {MVT::v16i8, MVT::v4i32, MVT::v2i64}, Custom); + setOperationAction(ISD::MULHU, {MVT::v4i32, MVT::v16i8}, Custom); + setOperationAction(ISD::MULHS, {MVT::v4i32, MVT::v16i8}, Custom); setOperationAction(ISD::MULHU, MVT::v8i16, Legal); setOperationAction(ISD::MULHS, MVT::v8i16, Legal); setOperationAction(ISD::MUL, MVT::v8i16, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal); - setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal); + setOperationAction(ISD::AVGCEILU, {MVT::v16i8, MVT::v8i16}, Legal); setOperationAction(ISD::SMULO, MVT::v16i8, Custom); setOperationAction(ISD::UMULO, MVT::v16i8, Custom); @@ -962,21 +919,15 @@ setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom); } - setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal); - setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal); - setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal); - setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom); + setOperationAction(ISD::UADDSAT, {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::SADDSAT, {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::USUBSAT, {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::SSUBSAT, {MVT::v16i8, MVT::v8i16}, Legal); + setOperationAction(ISD::USUBSAT, {MVT::v4i32, MVT::v2i64}, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}, + Custom); for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { setOperationAction(ISD::SETCC, VT, Custom); @@ -1012,11 +963,9 @@ } // Custom lower v2i64 and v2f64 selects. - setOperationAction(ISD::SELECT, MVT::v2f64, Custom); - setOperationAction(ISD::SELECT, MVT::v2i64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i32, Custom); - setOperationAction(ISD::SELECT, MVT::v8i16, Custom); - setOperationAction(ISD::SELECT, MVT::v16i8, Custom); + setOperationAction( + ISD::SELECT, + {MVT::v2f64, MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom); @@ -1038,11 +987,9 @@ setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom); - - setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom); + setOperationAction(ISD::UINT_TO_FP, {MVT::v2i32, MVT::v4i32}, Custom); + setOperationAction(ISD::STRICT_UINT_TO_FP, {MVT::v2i32, MVT::v4i32}, + Custom); // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion. setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom); @@ -1058,31 +1005,23 @@ // We want to legalize this to an f64 load rather than an i64 load on // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for // store. - setOperationAction(ISD::LOAD, MVT::v2i32, Custom); - setOperationAction(ISD::LOAD, MVT::v4i16, Custom); - setOperationAction(ISD::LOAD, MVT::v8i8, Custom); - setOperationAction(ISD::STORE, MVT::v2i32, Custom); - setOperationAction(ISD::STORE, MVT::v4i16, Custom); - setOperationAction(ISD::STORE, MVT::v8i8, Custom); - - setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); - setOperationAction(ISD::BITCAST, MVT::v4i16, Custom); - setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); + setOperationAction(ISD::LOAD, {MVT::v2i32, MVT::v4i16, MVT::v8i8}, Custom); + setOperationAction(ISD::STORE, {MVT::v2i32, MVT::v4i16, MVT::v8i8}, Custom); + + setOperationAction(ISD::BITCAST, {MVT::v2i32, MVT::v4i16, MVT::v8i8}, + Custom); if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom); + setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, + {MVT::v2i64, MVT::v4i32, MVT::v8i16}, Custom); setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); + setOperationAction( + ISD::TRUNCATE, + {MVT::v2i8, MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}, + Custom); // In the customized shift lowering, the legal v4i32/v2i64 cases // in AVX2 will be recognized. @@ -1105,20 +1044,14 @@ } if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { - setOperationAction(ISD::ABS, MVT::v16i8, Legal); - setOperationAction(ISD::ABS, MVT::v8i16, Legal); - setOperationAction(ISD::ABS, MVT::v4i32, Legal); + setOperationAction(ISD::ABS, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v16i8, Custom); - setOperationAction(ISD::CTLZ, MVT::v8i16, Custom); - setOperationAction(ISD::CTLZ, MVT::v4i32, Custom); - setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); + setOperationAction( + ISD::CTLZ, {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}, Custom); // These might be better off as horizontal vector ops. - setOperationAction(ISD::ADD, MVT::i16, Custom); - setOperationAction(ISD::ADD, MVT::i32, Custom); - setOperationAction(ISD::SUB, MVT::i16, Custom); - setOperationAction(ISD::SUB, MVT::i32, Custom); + setOperationAction(ISD::ADD, {MVT::i16, MVT::i32}, Custom); + setOperationAction(ISD::SUB, {MVT::i16, MVT::i32}, Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { @@ -1139,14 +1072,10 @@ setOperationAction(ISD::FROUND, RoundedTy, Custom); } - setOperationAction(ISD::SMAX, MVT::v16i8, Legal); - setOperationAction(ISD::SMAX, MVT::v4i32, Legal); - setOperationAction(ISD::UMAX, MVT::v8i16, Legal); - setOperationAction(ISD::UMAX, MVT::v4i32, Legal); - setOperationAction(ISD::SMIN, MVT::v16i8, Legal); - setOperationAction(ISD::SMIN, MVT::v4i32, Legal); - setOperationAction(ISD::UMIN, MVT::v8i16, Legal); - setOperationAction(ISD::UMIN, MVT::v4i32, Legal); + setOperationAction(ISD::SMAX, {MVT::v16i8, MVT::v4i32}, Legal); + setOperationAction(ISD::SMIN, {MVT::v16i8, MVT::v4i32}, Legal); + setOperationAction(ISD::UMAX, {MVT::v8i16, MVT::v4i32}, Legal); + setOperationAction(ISD::UMIN, {MVT::v8i16, MVT::v4i32}, Legal); setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom); setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom); @@ -1200,12 +1129,13 @@ } // XOP can efficiently perform BITREVERSE with VPPERM. - for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); + setOperationAction(ISD::BITREVERSE, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, + Custom); - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) - setOperationAction(ISD::BITREVERSE, VT, Custom); + setOperationAction(ISD::BITREVERSE, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64}, + Custom); } if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { @@ -1259,17 +1189,12 @@ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal); + setOperationAction(ISD::STRICT_FADD, {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FSUB, {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FMUL, {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FDIV, {MVT::v8f32, MVT::v4f64}, Legal); + setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal); + setOperationAction(ISD::STRICT_FSQRT, {MVT::v8f32, MVT::v4f64}, Legal); if (!Subtarget.hasAVX512()) setOperationAction(ISD::BITCAST, MVT::v32i1, Custom); @@ -1288,17 +1213,13 @@ } // These types need custom splitting if their input is a 128-bit vector. - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); - - setOperationAction(ISD::SELECT, MVT::v4f64, Custom); - setOperationAction(ISD::SELECT, MVT::v4i64, Custom); - setOperationAction(ISD::SELECT, MVT::v8i32, Custom); - setOperationAction(ISD::SELECT, MVT::v16i16, Custom); - setOperationAction(ISD::SELECT, MVT::v32i8, Custom); - setOperationAction(ISD::SELECT, MVT::v8f32, Custom); + setOperationAction(ISD::SIGN_EXTEND, {MVT::v8i64, MVT::v16i32}, Custom); + setOperationAction(ISD::ZERO_EXTEND, {MVT::v8i64, MVT::v16i32}, Custom); + + setOperationAction(ISD::SELECT, + {MVT::v4f64, MVT::v4i64, MVT::v8i32, MVT::v16i16, + MVT::v32i8, MVT::v8f32}, + Custom); for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { setOperationAction(ISD::SIGN_EXTEND, VT, Custom); @@ -1306,10 +1227,9 @@ setOperationAction(ISD::ANY_EXTEND, VT, Custom); } - setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); - setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i8, MVT::v8i16, MVT::v4i32}, + Custom); + setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { setOperationAction(ISD::SETCC, VT, Custom); @@ -1337,10 +1257,9 @@ setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom); } - setOperationAction(ISD::MUL, MVT::v4i64, Custom); - setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::MUL, MVT::v32i8, Custom); + setOperationAction(ISD::MUL, {MVT::v4i64, MVT::v32i8}, Custom); + setOperationAction(ISD::MUL, {MVT::v8i32, MVT::v16i16}, + HasInt256 ? Legal : Custom); setOperationAction(ISD::MULHU, MVT::v8i32, Custom); setOperationAction(ISD::MULHS, MVT::v8i32, Custom); @@ -1360,18 +1279,16 @@ setOperationAction(ISD::SMIN, MVT::v4i64, Custom); setOperationAction(ISD::UMIN, MVT::v4i64, Custom); - setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); - setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom); - setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom); - setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom); - setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom); + setOperationAction(ISD::UADDSAT, {MVT::v32i8, MVT::v16i16}, + HasInt256 ? Legal : Custom); + setOperationAction(ISD::SADDSAT, {MVT::v32i8, MVT::v16i16}, + HasInt256 ? Legal : Custom); + setOperationAction(ISD::USUBSAT, {MVT::v32i8, MVT::v16i16}, + HasInt256 ? Legal : Custom); + setOperationAction(ISD::SSUBSAT, {MVT::v32i8, MVT::v16i16}, + HasInt256 ? Legal : Custom); + setOperationAction(ISD::UADDSAT, {MVT::v8i32, MVT::v4i64}, Custom); + setOperationAction(ISD::USUBSAT, {MVT::v8i32, MVT::v4i64}, Custom); for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom); @@ -1411,10 +1328,10 @@ // Extract subvector is special because the value type // (result) is 128-bit but the source is 256-bit wide. - for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v4f32, MVT::v2f64 }) { - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); - } + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, + MVT::v4f32, MVT::v2f64}, + Legal); // Custom lower several nodes for 256-bit types. for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, @@ -1434,8 +1351,7 @@ setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MGATHER, MVT::v2f32, Custom); - setOperationAction(ISD::MGATHER, MVT::v2i32, Custom); + setOperationAction(ISD::MGATHER, {MVT::v2f32, MVT::v2i32}, Custom); for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) @@ -1472,15 +1388,11 @@ // There is no byte sized k-register load or store without AVX512DQ. if (!Subtarget.hasDQI()) { - setOperationAction(ISD::LOAD, MVT::v1i1, Custom); - setOperationAction(ISD::LOAD, MVT::v2i1, Custom); - setOperationAction(ISD::LOAD, MVT::v4i1, Custom); - setOperationAction(ISD::LOAD, MVT::v8i1, Custom); + setOperationAction(ISD::LOAD, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); - setOperationAction(ISD::STORE, MVT::v1i1, Custom); - setOperationAction(ISD::STORE, MVT::v2i1, Custom); - setOperationAction(ISD::STORE, MVT::v4i1, Custom); - setOperationAction(ISD::STORE, MVT::v8i1, Custom); + setOperationAction(ISD::STORE, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); } // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors. @@ -1490,8 +1402,9 @@ setOperationAction(ISD::ANY_EXTEND, VT, Custom); } - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) - setOperationAction(ISD::VSELECT, VT, Expand); + setOperationAction(ISD::VSELECT, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1}, + Expand); for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) { setOperationAction(ISD::SETCC, VT, Custom); @@ -1508,8 +1421,8 @@ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); } - for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 }) - setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + setOperationAction(ISD::EXTRACT_SUBVECTOR, + {MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}, Custom); } // This block controls legalization for 512-bit operations with 32/64 bit @@ -1558,16 +1471,11 @@ setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal); setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal); - setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal); + setOperationAction(ISD::STRICT_FADD, {MVT::v16f32, MVT::v8f64}, Legal); + setOperationAction(ISD::STRICT_FSUB, {MVT::v16f32, MVT::v8f64}, Legal); + setOperationAction(ISD::STRICT_FMUL, {MVT::v16f32, MVT::v8f64}, Legal); + setOperationAction(ISD::STRICT_FDIV, {MVT::v16f32, MVT::v8f64}, Legal); + setOperationAction(ISD::STRICT_FSQRT, {MVT::v16f32, MVT::v8f64}, Legal); setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal); @@ -1590,19 +1498,15 @@ } } - setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal); - setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal); + setOperationAction(ISD::TRUNCATE, {MVT::v8i32, MVT::v16i16}, Legal); setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom); setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); - setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); + setOperationAction(ISD::ZERO_EXTEND, {MVT::v32i16, MVT::v16i32, MVT::v8i64}, + Custom); + setOperationAction(ISD::ANY_EXTEND, {MVT::v32i16, MVT::v16i32, MVT::v8i64}, + Custom); + setOperationAction(ISD::SIGN_EXTEND, {MVT::v32i16, MVT::v16i32, MVT::v8i64}, + Custom); if (HasBWI) { // Extends from v64i1 masks to 512-bit vectors. @@ -1633,10 +1537,10 @@ setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); } - setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom); - setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom); + setOperationAction(ISD::ADD, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); + setOperationAction(ISD::SUB, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); setOperationAction(ISD::MUL, MVT::v8i64, Custom); setOperationAction(ISD::MUL, MVT::v16i32, Legal); @@ -1649,8 +1553,8 @@ setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom); setOperationAction(ISD::MULHS, MVT::v64i8, Custom); setOperationAction(ISD::MULHU, MVT::v64i8, Custom); - setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom); - setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom); + setOperationAction(ISD::AVGCEILU, {MVT::v32i16, MVT::v64i8}, + HasBWI ? Legal : Custom); setOperationAction(ISD::SMULO, MVT::v64i8, Custom); setOperationAction(ISD::UMULO, MVT::v64i8, Custom); @@ -1695,12 +1599,10 @@ setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom); } - setOperationAction(ISD::FSHL, MVT::v64i8, Custom); - setOperationAction(ISD::FSHR, MVT::v64i8, Custom); - setOperationAction(ISD::FSHL, MVT::v32i16, Custom); - setOperationAction(ISD::FSHR, MVT::v32i16, Custom); - setOperationAction(ISD::FSHL, MVT::v16i32, Custom); - setOperationAction(ISD::FSHR, MVT::v16i32, Custom); + setOperationAction(ISD::FSHL, {MVT::v64i8, MVT::v32i16, MVT::v16i32}, + Custom); + setOperationAction(ISD::FSHR, {MVT::v64i8, MVT::v32i16, MVT::v16i32}, + Custom); if (Subtarget.hasDQI()) { setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal); @@ -1753,15 +1655,13 @@ setOperationAction(ISD::MGATHER, VT, Custom); setOperationAction(ISD::MSCATTER, VT, Custom); } - if (HasBWI) { + if (HasBWI) for (auto VT : { MVT::v64i8, MVT::v32i16 }) { setOperationAction(ISD::MLOAD, VT, Legal); setOperationAction(ISD::MSTORE, VT, Legal); } - } else { - setOperationAction(ISD::STORE, MVT::v32i16, Custom); - setOperationAction(ISD::STORE, MVT::v64i8, Custom); - } + else + setOperationAction(ISD::STORE, {MVT::v32i16, MVT::v64i8}, Custom); if (Subtarget.hasVBMI2()) { for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64, @@ -1772,9 +1672,8 @@ } setOperationAction(ISD::ROTL, MVT::v32i16, Custom); - setOperationAction(ISD::ROTR, MVT::v8i16, Custom); - setOperationAction(ISD::ROTR, MVT::v16i16, Custom); - setOperationAction(ISD::ROTR, MVT::v32i16, Custom); + setOperationAction(ISD::ROTR, {MVT::v8i16, MVT::v16i16, MVT::v32i16}, + Custom); } }// useAVX512Regs @@ -1830,12 +1729,12 @@ } // Custom legalize 2x32 to get a little better code. - setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom); - setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom); + setOperationAction(ISD::MSCATTER, {MVT::v2f32, MVT::v2i32}, Custom); - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, - MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) - setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::MSCATTER, + {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, + MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}, + Custom); if (Subtarget.hasDQI()) { for (auto VT : { MVT::v2i64, MVT::v4i64 }) { @@ -1859,16 +1758,13 @@ } } - if (Subtarget.hasCDI()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { - setOperationAction(ISD::CTLZ, VT, Legal); - } - } // Subtarget.hasCDI() + if (Subtarget.hasCDI()) + setOperationAction( + ISD::CTLZ, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); - if (Subtarget.hasVPOPCNTDQ()) { - for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) - setOperationAction(ISD::CTPOP, VT, Legal); - } + if (Subtarget.hasVPOPCNTDQ()) + setOperationAction( + ISD::CTPOP, {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64}, Legal); } // This block control legalization of v32i1/v64i1 which are available with @@ -2027,14 +1923,12 @@ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal); setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal); - setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal); + setOperationAction(ISD::SINT_TO_FP, {MVT::v16i16, MVT::v8i16}, Legal); + setOperationAction(ISD::STRICT_SINT_TO_FP, {MVT::v16i16, MVT::v8i16}, + Legal); + setOperationAction(ISD::UINT_TO_FP, {MVT::v16i16, MVT::v8i16}, Legal); + setOperationAction(ISD::STRICT_UINT_TO_FP, {MVT::v16i16, MVT::v8i16}, + Legal); setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom); @@ -2044,8 +1938,8 @@ setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal); // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom); - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom); + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v8f16, MVT::v16f16}, + Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal); setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal); @@ -2085,38 +1979,31 @@ if (Subtarget.hasFP16()) { // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64 - setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom); + setOperationAction(ISD::FP_TO_SINT, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, {MVT::v2f16, MVT::v4f16}, + Custom); + setOperationAction(ISD::FP_TO_UINT, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, {MVT::v2f16, MVT::v4f16}, + Custom); // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16 - setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom); - setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom); + setOperationAction(ISD::SINT_TO_FP, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_SINT_TO_FP, {MVT::v2f16, MVT::v4f16}, + Custom); + setOperationAction(ISD::UINT_TO_FP, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_UINT_TO_FP, {MVT::v2f16, MVT::v4f16}, + Custom); // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16 - setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom); + setOperationAction(ISD::FP_ROUND, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_FP_ROUND, {MVT::v2f16, MVT::v4f16}, + Custom); // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32 - setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom); - setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom); - setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom); + setOperationAction(ISD::FP_EXTEND, {MVT::v2f16, MVT::v4f16}, Custom); + setOperationAction(ISD::STRICT_FP_EXTEND, {MVT::v2f16, MVT::v4f16}, + Custom); } - setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom); - setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom); + setOperationAction(ISD::TRUNCATE, {MVT::v16i32, MVT::v8i64, MVT::v16i64}, + Custom); } if (Subtarget.hasAMXTILE()) { @@ -2171,8 +2058,7 @@ // Combine sin / cos into _sincos_stret if it is available. if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { - setOperationAction(ISD::FSINCOS, MVT::f64, Custom); - setOperationAction(ISD::FSINCOS, MVT::f32, Custom); + setOperationAction(ISD::FSINCOS, {MVT::f64, MVT::f32}, Custom); } if (Subtarget.isTargetWin64()) {