Index: llvm/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.h +++ llvm/lib/Target/X86/X86ISelLowering.h @@ -1315,7 +1315,8 @@ unsigned getAddressSpace(void) const; - SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool isSigned) const; + SDValue FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool isSigned, + SDValue &Chain) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -249,19 +249,27 @@ // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have // this operation. - setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); - setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); + setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); + // FIXME: setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 // are Legal, f80 is custom lowered. - setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom); // Handle FP_TO_UINT by promoting the destination to a larger signed // conversion. - setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); - setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); + setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); + // FIXME: setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote); + setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); + // FIXME: setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); + setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom); } // TODO: when we have SSE, these could be more efficient, by using movd/movq. @@ -18899,11 +18907,13 @@ // result. SDValue X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, - bool IsSigned) const { + bool IsSigned, SDValue &Chain) const { + bool IsStrict = Op->isStrictFPOpcode(); SDLoc DL(Op); EVT DstTy = Op.getValueType(); - EVT TheVT = Op.getOperand(0).getValueType(); + SDValue Value = Op.getOperand(IsStrict ? 1 : 0); + EVT TheVT = Value.getValueType(); auto PtrVT = getPointerTy(DAG.getDataLayout()); if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) { @@ -18935,8 +18945,11 @@ int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); - SDValue Chain = DAG.getEntryNode(); - SDValue Value = Op.getOperand(0); + if (IsStrict) + Chain = Op.getOperand(0); + else + Chain = DAG.getEntryNode(); + SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment. if (UnsignedFixup) { @@ -18980,7 +18993,14 @@ DAG.getConstant(0, DL, MVT::i64), DAG.getConstant(APInt::getSignMask(64), DL, MVT::i64)); - SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal); + SDValue Sub; + if (IsStrict) { + Sub = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other}, + { Chain, Value, ThreshVal }); + Chain = Sub.getValue(1); + } else + Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal); + Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), TheVT), Value, ThreshVal, ISD::SETLT); @@ -19014,6 +19034,7 @@ Ops, DstTy, MMO); SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI); + Chain = Res.getValue(1); // If we need an unsigned fixup, XOR the result with adjust. if (UnsignedFixup) @@ -19509,9 +19530,11 @@ } SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { - bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT; + bool IsStrict = Op->isStrictFPOpcode(); + bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT || + Op.getOpcode() == ISD::STRICT_FP_TO_SINT; MVT VT = Op.getSimpleValueType(); - SDValue Src = Op.getOperand(0); + SDValue Src = Op.getOperand(IsStrict ? 1 : 0); MVT SrcVT = Src.getSimpleValueType(); SDLoc dl(Op); @@ -19522,6 +19545,8 @@ else LC = RTLIB::getFPTOUINT(SrcVT, VT); + // FIXME: Strict fp! + assert(!IsStrict && "Unhandled strict operation!"); MakeLibCallOptions CallOptions; return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first; } @@ -19540,6 +19565,8 @@ DAG.getUNDEF(MVT::v8f64), Src, DAG.getIntPtrConstant(0, dl)); } + // FIXME: Strict fp! + assert(!IsStrict && "Unhandled strict operation!"); SDValue Res = DAG.getNode(Opc, dl, ResVT, Src); Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res); return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res, @@ -19548,6 +19575,8 @@ assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!"); if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) { + // FIXME: Strict fp! + assert(!IsStrict && "Unhandled strict operation!"); return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, DAG.getUNDEF(MVT::v2f32))); @@ -19573,8 +19602,18 @@ // Promote i32 to i64 and use a signed operation on 64-bit targets. if (Subtarget.is64Bit()) { - SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src); - return DAG.getNode(ISD::TRUNCATE, dl, VT, Res); + SDValue Res, Chain; + if (IsStrict) { + Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other}, + { Op.getOperand(0), Src }); + Chain = Res.getValue(1); + } else + Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src); + + Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res); + if (IsStrict) + return DAG.getMergeValues({ Res, Chain }, dl); + return Res; } // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can @@ -19586,8 +19625,18 @@ // Promote i16 to i32 if we can use a SSE operation. if (VT == MVT::i16 && UseSSEReg) { assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!"); - SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src); - return DAG.getNode(ISD::TRUNCATE, dl, VT, Res); + SDValue Res, Chain; + if (IsStrict) { + Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other}, + { Op.getOperand(0), Src }); + Chain = Res.getValue(1); + } else + Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src); + + Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res); + if (IsStrict) + return DAG.getMergeValues({ Res, Chain }, dl); + return Res; } // If this is a FP_TO_SINT using SSEReg we're done. @@ -19595,8 +19644,12 @@ return Op; // Fall back to X87. - if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned)) + SDValue Chain; + if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) { + if (IsStrict) + return DAG.getMergeValues({V, Chain}, dl); return V; + } llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases."); } @@ -27716,7 +27769,9 @@ case ISD::SIGN_EXTEND_VECTOR_INREG: return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG); case ISD::FP_TO_SINT: - case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); + case ISD::STRICT_FP_TO_SINT: + case ISD::FP_TO_UINT: + case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); case ISD::STRICT_FP_ROUND: return LowerSTRICT_FP_ROUND(Op, DAG); @@ -28130,10 +28185,14 @@ return; } case ISD::FP_TO_SINT: - case ISD::FP_TO_UINT: { - bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; + case ISD::STRICT_FP_TO_SINT: + case ISD::FP_TO_UINT: + case ISD::STRICT_FP_TO_UINT: { + bool IsStrict = N->isStrictFPOpcode(); + bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT || + N->getOpcode() == ISD::STRICT_FP_TO_SINT; EVT VT = N->getValueType(0); - SDValue Src = N->getOperand(0); + SDValue Src = N->getOperand(IsStrict ? 1 : 0); EVT SrcVT = Src.getValueType(); if (VT.isVector() && VT.getScalarSizeInBits() < 32) { @@ -28144,13 +28203,20 @@ unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U); MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth), VT.getVectorNumElements()); - SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src); + SDValue Res; + SDValue Chain; + if (IsStrict) { + SDVTList Tys = DAG.getVTList(PromoteVT, MVT::Other); + Res = DAG.getNode(ISD::FP_TO_SINT, dl, { PromoteVT, MVT::Other }, + { N->getOperand(0), Src }); + Chain = Res.getValue(1); + } else + Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src); // Preserve what we know about the size of the original result. Except // when the result is v2i32 since we can't widen the assert. if (PromoteVT != MVT::v2i32) - Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext - : ISD::AssertSext, + Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl, PromoteVT, Res, DAG.getValueType(VT.getVectorElementType())); @@ -28165,6 +28231,8 @@ ConcatOps[0] = Res; Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps); Results.push_back(Res); + if (IsStrict) + Results.push_back(Chain); return; } @@ -28183,6 +28251,8 @@ // legalization to v8i32<-v8f64. return; } + // FIXME: Strict fp. + assert(!IsStrict && "Missing STRICT_FP_TO_SINT support!"); unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI; SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src); Results.push_back(Res); @@ -28210,14 +28280,26 @@ SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT, DAG.getConstantFP(0.0, dl, VecInVT), Src, ZeroIdx); - Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res); + SDValue Chain; + if (IsStrict) { + SDVTList Tys = DAG.getVTList(VecVT, MVT::Other); + Res = DAG.getNode(N->getOpcode(), SDLoc(N), Tys, N->getOperand(0), Res); + Chain = Res.getValue(1); + } else + Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res); Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx); Results.push_back(Res); + if (IsStrict) + Results.push_back(Chain); return; } - if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned)) + SDValue Chain; + if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) { Results.push_back(V); + if (IsStrict) + Results.push_back(Chain); + } return; } case ISD::SINT_TO_FP: { Index: llvm/test/CodeGen/X86/fp-intrinsics-x86_64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/fp-intrinsics-x86_64.ll @@ -0,0 +1,57 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ + +; TODO: Legalize these properly in 32-bit mode + +define i64 @f25(double %x) #0 { +; COMMON-LABEL: f25: +; COMMON: # %bb.0: # %entry +; COMMON-NEXT: jmp llrint # TAILCALL +entry: + %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret i64 %result +} + +define i64 @f26(float %x) { +; COMMON-LABEL: f26: +; COMMON: # %bb.0: # %entry +; COMMON-NEXT: jmp llrintf # TAILCALL +entry: + %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret i64 %result +} + +define i64 @f29(double %x) #0 { +; COMMON-LABEL: f29: +; COMMON: # %bb.0: # %entry +; COMMON-NEXT: jmp llround # TAILCALL +entry: + %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, + metadata !"fpexcept.strict") #0 + ret i64 %result +} + +define i64 @f30(float %x) #0 { +; COMMON-LABEL: f30: +; COMMON: # %bb.0: # %entry +; COMMON-NEXT: jmp llroundf # TAILCALL +entry: + %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, + metadata !"fpexcept.strict") #0 + ret i64 %result +} + +attributes #0 = { strictfp } + +@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata" +declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) +declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) +declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) +declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) Index: llvm/test/CodeGen/X86/fp-intrinsics.ll =================================================================== --- llvm/test/CodeGen/X86/fp-intrinsics.ll +++ llvm/test/CodeGen/X86/fp-intrinsics.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=i686-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=X87 ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F @@ -13,6 +14,12 @@ ; } ; define double @f1() #0 { +; X87-LABEL: f1: +; X87: # %bb.0: # %entry +; X87-NEXT: fld1 +; X87-NEXT: fdivs {{\.LCPI.*}} +; X87-NEXT: retl +; ; SSE-LABEL: f1: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -42,6 +49,12 @@ ; } ; define double @f2(double %a) #0 { +; X87-LABEL: f2: +; X87: # %bb.0: # %entry +; X87-NEXT: fldz +; X87-NEXT: fsubrl {{[0-9]+}}(%esp) +; X87-NEXT: retl +; ; SSE-LABEL: f2: ; SSE: # %bb.0: # %entry ; SSE-NEXT: xorpd %xmm1, %xmm1 @@ -72,6 +85,16 @@ ; } ; define double @f3(double %a, double %b) #0 { +; X87-LABEL: f3: +; X87: # %bb.0: # %entry +; X87-NEXT: fldz +; X87-NEXT: fchs +; X87-NEXT: fld %st(0) +; X87-NEXT: fsubl {{[0-9]+}}(%esp) +; X87-NEXT: fmull {{[0-9]+}}(%esp) +; X87-NEXT: fsubrp %st, %st(1) +; X87-NEXT: retl +; ; SSE-LABEL: f3: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero @@ -118,6 +141,17 @@ ; ; define double @f4(i32 %n, double %a) #0 { +; X87-LABEL: f4: +; X87: # %bb.0: # %entry +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: cmpl $0, {{[0-9]+}}(%esp) +; X87-NEXT: jle .LBB3_2 +; X87-NEXT: # %bb.1: # %if.then +; X87-NEXT: fld1 +; X87-NEXT: faddp %st, %st(1) +; X87-NEXT: .LBB3_2: # %if.end +; X87-NEXT: retl +; ; SSE-LABEL: f4: ; SSE: # %bb.0: # %entry ; SSE-NEXT: testl %edi, %edi @@ -153,6 +187,12 @@ ; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown. define double @f5() #0 { +; X87-LABEL: f5: +; X87: # %bb.0: # %entry +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fsqrt +; X87-NEXT: retl +; ; SSE-LABEL: f5: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -173,6 +213,19 @@ ; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown. define double @f6() #0 { +; X87-LABEL: f6: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $28, %esp +; X87-NEXT: .cfi_def_cfa_offset 32 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl {{[0-9]+}}(%esp) +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll pow +; X87-NEXT: addl $28, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f6: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -194,6 +247,18 @@ ; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown. define double @f7() #0 { +; X87-LABEL: f7: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: movl $3, {{[0-9]+}}(%esp) +; X87-NEXT: calll __powidf2 +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f7: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -215,6 +280,17 @@ ; Verify that sin(42.0) isn't simplified when the rounding mode is unknown. define double @f8() #0 { +; X87-LABEL: f8: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll sin +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f8: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -233,6 +309,17 @@ ; Verify that cos(42.0) isn't simplified when the rounding mode is unknown. define double @f9() #0 { +; X87-LABEL: f9: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll cos +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f9: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -251,6 +338,17 @@ ; Verify that exp(42.0) isn't simplified when the rounding mode is unknown. define double @f10() #0 { +; X87-LABEL: f10: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll exp +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f10: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -269,6 +367,17 @@ ; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown. define double @f11() #0 { +; X87-LABEL: f11: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll exp2 +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f11: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -287,6 +396,17 @@ ; Verify that log(42.0) isn't simplified when the rounding mode is unknown. define double @f12() #0 { +; X87-LABEL: f12: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll log +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f12: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -305,6 +425,17 @@ ; Verify that log10(42.0) isn't simplified when the rounding mode is unknown. define double @f13() #0 { +; X87-LABEL: f13: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll log10 +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f13: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -323,6 +454,17 @@ ; Verify that log2(42.0) isn't simplified when the rounding mode is unknown. define double @f14() #0 { +; X87-LABEL: f14: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll log2 +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f14: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -341,6 +483,17 @@ ; Verify that rint(42.1) isn't simplified when the rounding mode is unknown. define double @f15() #0 { +; X87-LABEL: f15: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll rint +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f15: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -361,6 +514,17 @@ ; Verify that nearbyint(42.1) isn't simplified when the rounding mode is ; unknown. define double @f16() #0 { +; X87-LABEL: f16: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 16 +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstpl (%esp) +; X87-NEXT: calll nearbyint +; X87-NEXT: addl $12, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f16: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -380,6 +544,19 @@ } define double @f19() #0 { +; X87-LABEL: f19: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $28, %esp +; X87-NEXT: .cfi_def_cfa_offset 32 +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fstpl {{[0-9]+}}(%esp) +; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000 +; X87-NEXT: movl $0, (%esp) +; X87-NEXT: calll fmod +; X87-NEXT: addl $28, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f19: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -403,7 +580,62 @@ ; Verify that fptosi(%x) isn't simplified when the rounding mode is ; unknown. The expansion should have only one conversion instruction. ; Verify that no gross errors happen. +define i16 @f20s16(double %x) #0 { +; X87-LABEL: f20s16: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $8, %esp +; X87-NEXT: .cfi_def_cfa_offset 12 +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: fnstcw {{[0-9]+}}(%esp) +; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X87-NEXT: orl $3072, %eax # imm = 0xC00 +; X87-NEXT: movw %ax, {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: fistps {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X87-NEXT: addl $8, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; +; SSE-LABEL: f20s16: +; SSE: # %bb.0: # %entry +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: # kill: def $ax killed $ax killed $eax +; SSE-NEXT: retq +; +; AVX-LABEL: f20s16: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: # kill: def $ax killed $ax killed $eax +; AVX-NEXT: retq +entry: + %result = call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %x, + metadata !"fpexcept.strict") #0 + ret i16 %result +} + +; Verify that fptosi(%x) isn't simplified when the rounding mode is +; unknown. The expansion should have only one conversion instruction. +; Verify that no gross errors happen. define i32 @f20s(double %x) #0 { +; X87-LABEL: f20s: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $8, %esp +; X87-NEXT: .cfi_def_cfa_offset 12 +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: fnstcw (%esp) +; X87-NEXT: movzwl (%esp), %eax +; X87-NEXT: orl $3072, %eax # imm = 0xC00 +; X87-NEXT: movw %ax, {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: fistpl {{[0-9]+}}(%esp) +; X87-NEXT: fldcw (%esp) +; X87-NEXT: movl {{[0-9]+}}(%esp), %eax +; X87-NEXT: addl $8, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f20s: ; SSE: # %bb.0: # %entry ; SSE-NEXT: cvttsd2si %xmm0, %eax @@ -419,12 +651,127 @@ ret i32 %result } +; Verify that fptosi(%x) isn't simplified when the rounding mode is +; unknown. The expansion should have only one conversion instruction. +; Verify that no gross errors happen. +define i64 @f20s64(double %x) #0 { +; X87-LABEL: f20s64: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 24 +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: fnstcw {{[0-9]+}}(%esp) +; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X87-NEXT: orl $3072, %eax # imm = 0xC00 +; X87-NEXT: movw %ax, {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: fistpll {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: movl {{[0-9]+}}(%esp), %eax +; X87-NEXT: movl {{[0-9]+}}(%esp), %edx +; X87-NEXT: addl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; +; SSE-LABEL: f20s64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: cvttsd2si %xmm0, %rax +; SSE-NEXT: retq +; +; AVX-LABEL: f20s64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vcvttsd2si %xmm0, %rax +; AVX-NEXT: retq +entry: + %result = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, + metadata !"fpexcept.strict") #0 + ret i64 %result +} + ; Verify that fptoui(%x) isn't simplified when the rounding mode is ; unknown. The expansion should have only one conversion instruction. ; Verify that no gross errors happen. define i32 @f20u(double %x) #0 { +; X87-LABEL: f20u: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 24 +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: fnstcw {{[0-9]+}}(%esp) +; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X87-NEXT: orl $3072, %eax # imm = 0xC00 +; X87-NEXT: movw %ax, {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: fistpll {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: movl {{[0-9]+}}(%esp), %eax +; X87-NEXT: addl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f20u: ; SSE: # %bb.0: # %entry +; SSE-NEXT: cvttsd2si %xmm0, %rax +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: retq +; +; AVX1-LABEL: f20u: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si %xmm0, %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax +; AVX1-NEXT: retq +; +; AVX512-LABEL: f20u: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: retq +entry: + %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, + metadata !"fpexcept.strict") #0 + ret i32 %result +} + +; Verify that fptoui(%x) isn't simplified when the rounding mode is +; unknown. The expansion should have only one conversion instruction. +; Verify that no gross errors happen. +define i64 @f20u64(double %x) #0 { +; X87-LABEL: f20u64: +; X87: # %bb.0: # %entry +; X87-NEXT: subl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 24 +; X87-NEXT: fldl {{[0-9]+}}(%esp) +; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: fld %st(1) +; X87-NEXT: fsub %st(1), %st +; X87-NEXT: fxch %st(1) +; X87-NEXT: fucomp %st(2) +; X87-NEXT: fnstsw %ax +; X87-NEXT: # kill: def $ah killed $ah killed $ax +; X87-NEXT: sahf +; X87-NEXT: ja .LBB21_2 +; X87-NEXT: # %bb.1: # %entry +; X87-NEXT: fstp %st(1) +; X87-NEXT: fldz +; X87-NEXT: .LBB21_2: # %entry +; X87-NEXT: fstp %st(0) +; X87-NEXT: setbe %al +; X87-NEXT: fnstcw {{[0-9]+}}(%esp) +; X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; X87-NEXT: orl $3072, %ecx # imm = 0xC00 +; X87-NEXT: movw %cx, {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: fistpll {{[0-9]+}}(%esp) +; X87-NEXT: fldcw {{[0-9]+}}(%esp) +; X87-NEXT: movzbl %al, %edx +; X87-NEXT: shll $31, %edx +; X87-NEXT: xorl {{[0-9]+}}(%esp), %edx +; X87-NEXT: movl {{[0-9]+}}(%esp), %eax +; X87-NEXT: addl $20, %esp +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; +; SSE-LABEL: f20u64: +; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movapd %xmm0, %xmm2 ; SSE-NEXT: cmpltsd %xmm1, %xmm2 @@ -435,49 +782,51 @@ ; SSE-NEXT: subsd %xmm1, %xmm0 ; SSE-NEXT: andnpd %xmm0, %xmm3 ; SSE-NEXT: orpd %xmm3, %xmm2 -; SSE-NEXT: cvttsd2si %xmm2, %ecx +; SSE-NEXT: cvttsd2si %xmm2, %rcx ; SSE-NEXT: setae %al -; SSE-NEXT: shll $31, %eax -; SSE-NEXT: xorl %ecx, %eax +; SSE-NEXT: shlq $63, %rax +; SSE-NEXT: xorq %rcx, %rax ; SSE-NEXT: retq ; -; AVX1-LABEL: f20u: +; AVX1-LABEL: f20u64: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vcmpltsd %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm3 ; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm3, %xmm2 -; AVX1-NEXT: vcvttsd2si %xmm2, %ecx +; AVX1-NEXT: vcvttsd2si %xmm2, %rcx ; AVX1-NEXT: xorl %eax, %eax ; AVX1-NEXT: vucomisd %xmm1, %xmm0 ; AVX1-NEXT: setae %al -; AVX1-NEXT: shll $31, %eax -; AVX1-NEXT: xorl %ecx, %eax +; AVX1-NEXT: shlq $63, %rax +; AVX1-NEXT: xorq %rcx, %rax ; AVX1-NEXT: retq ; -; AVX512-LABEL: f20u: +; AVX512-LABEL: f20u64: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vsubsd %xmm1, %xmm0, %xmm2 -; AVX512-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1} -; AVX512-NEXT: vcvttsd2si %xmm2, %ecx -; AVX512-NEXT: xorl %eax, %eax -; AVX512-NEXT: vucomisd %xmm1, %xmm0 -; AVX512-NEXT: setae %al -; AVX512-NEXT: shll $31, %eax -; AVX512-NEXT: xorl %ecx, %eax +; AVX512-NEXT: vcvttsd2usi %xmm0, %rax ; AVX512-NEXT: retq entry: - %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, + %result = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0 - ret i32 %result + ret i64 %result } ; Verify that round(42.1) isn't simplified when the rounding mode is ; unknown. ; Verify that no gross errors happen. define float @f21() #0 { +; X87-LABEL: f21: +; X87: # %bb.0: # %entry +; X87-NEXT: pushl %eax +; X87-NEXT: .cfi_def_cfa_offset 8 +; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fstps (%esp) +; X87-NEXT: flds (%esp) +; X87-NEXT: popl %eax +; X87-NEXT: .cfi_def_cfa_offset 4 +; X87-NEXT: retl +; ; SSE-LABEL: f21: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -498,6 +847,11 @@ } define double @f22(float %x) #0 { +; X87-LABEL: f22: +; X87: # %bb.0: # %entry +; X87-NEXT: flds {{[0-9]+}}(%esp) +; X87-NEXT: retl +; ; SSE-LABEL: f22: ; SSE: # %bb.0: # %entry ; SSE-NEXT: cvtss2sd %xmm0, %xmm0 @@ -535,28 +889,6 @@ ret i32 %result } -define i64 @f25(double %x) #0 { -; COMMON-LABEL: f25: -; COMMON: # %bb.0: # %entry -; COMMON-NEXT: jmp llrint # TAILCALL -entry: - %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x, - metadata !"round.dynamic", - metadata !"fpexcept.strict") #0 - ret i64 %result -} - -define i64 @f26(float %x) { -; COMMON-LABEL: f26: -; COMMON: # %bb.0: # %entry -; COMMON-NEXT: jmp llrintf # TAILCALL -entry: - %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x, - metadata !"round.dynamic", - metadata !"fpexcept.strict") #0 - ret i64 %result -} - define i32 @f27(double %x) #0 { ; COMMON-LABEL: f27: ; COMMON: # %bb.0: # %entry @@ -577,26 +909,6 @@ ret i32 %result } -define i64 @f29(double %x) #0 { -; COMMON-LABEL: f29: -; COMMON: # %bb.0: # %entry -; COMMON-NEXT: jmp llround # TAILCALL -entry: - %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x, - metadata !"fpexcept.strict") #0 - ret i64 %result -} - -define i64 @f30(float %x) #0 { -; COMMON-LABEL: f30: -; COMMON: # %bb.0: # %entry -; COMMON-NEXT: jmp llroundf # TAILCALL -entry: - %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x, - metadata !"fpexcept.strict") #0 - ret i64 %result -} - attributes #0 = { strictfp } @llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata" @@ -617,15 +929,14 @@ declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) +declare i16 @llvm.experimental.constrained.fptosi.i16.f64(double, metadata) declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) +declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata) declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata) -declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata) -declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata) declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata) -declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata) -declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata) Index: llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll =================================================================== --- llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll +++ llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll @@ -4365,13 +4365,20 @@ define <1 x i32> @constrained_vector_fptoui_v1i32_v1f32() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v1i32_v1f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v1i32_v1f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: retq entry: %result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32( <1 x float>, @@ -4382,20 +4389,28 @@ define <2 x i32> @constrained_vector_fptoui_v2i32_v2f32() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v2i32_v2f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v2i32_v2f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32( <2 x float>, @@ -4406,25 +4421,35 @@ define <3 x i32> @constrained_vector_fptoui_v3i32_v3f32() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v3i32_v3f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v3i32_v3f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f32( <3 x float> @constrained_vector_fptoui_v4i32_v4f32() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm2 -; CHECK-NEXT: cvttss2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32( <4 x float> @llvm.experimental.constrained.fptoui.v1i64.v1f32( <1 x float>, @@ -4495,14 +4537,23 @@ ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f32: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttss2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f32: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v2i64_v2f32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: retq entry: %result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32( <2 x float>, @@ -4532,12 +4583,12 @@ ; ; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f32: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: retq @@ -4581,14 +4632,14 @@ ; ; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f32: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 -; AVX512-NEXT: vcvttss2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttss2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm2 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 @@ -4604,13 +4655,20 @@ define <1 x i32> @constrained_vector_fptoui_v1i32_v1f64() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v1i32_v1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax +; CHECK-NEXT: # kill: def $eax killed $eax killed $rax ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v1i32_v1f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v1i32_v1f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: # kill: def $eax killed $eax killed $rax +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v1i32_v1f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: retq entry: %result = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64( <1 x double>, @@ -4621,20 +4679,28 @@ define <2 x i32> @constrained_vector_fptoui_v2i32_v2f64() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v2i32_v2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v2i32_v2f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v2i32_v2f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v2i32_v2f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64( <2 x double>, @@ -4645,25 +4711,35 @@ define <3 x i32> @constrained_vector_fptoui_v3i32_v3f64() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v3i32_v3f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v3i32_v3f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v3i32_v3f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v3i32_v3f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <3 x i32> @llvm.experimental.constrained.fptoui.v3i32.v3f64( <3 x double> @constrained_vector_fptoui_v4i32_v4f64() #0 { ; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm1 ; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm2 -; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %eax +; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax ; CHECK-NEXT: movd %eax, %xmm0 ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v4i32_v4f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %ecx -; AVX-NEXT: vmovd %ecx, %xmm0 -; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %eax -; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %eax +; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: retq entry: %result = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64( <4 x double> @llvm.experimental.constrained.fptoui.v1i64.v1f64( <1 x double>, @@ -4734,14 +4827,23 @@ ; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq ; -; AVX-LABEL: constrained_vector_fptoui_v2i64_v2f64: -; AVX: # %bb.0: # %entry -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: vcvttsd2si {{.*}}(%rip), %rax -; AVX-NEXT: vmovq %rax, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: retq +; AVX1-LABEL: constrained_vector_fptoui_v2i64_v2f64: +; AVX1: # %bb.0: # %entry +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: retq +; +; AVX512-LABEL: constrained_vector_fptoui_v2i64_v2f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: retq entry: %result = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64( <2 x double>, @@ -4771,12 +4873,12 @@ ; ; AVX512-LABEL: constrained_vector_fptoui_v3i64_v3f64: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: retq @@ -4820,14 +4922,14 @@ ; ; AVX512-LABEL: constrained_vector_fptoui_v4i64_v4f64: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm1 -; AVX512-NEXT: vcvttsd2si {{.*}}(%rip), %rax +; AVX512-NEXT: vcvttsd2usi {{.*}}(%rip), %rax ; AVX512-NEXT: vmovq %rax, %xmm2 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0