diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -23007,6 +23007,46 @@ %t = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +'``llvm.vp.is.fpclass.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare @llvm.vp.is.fpclass.nxv2f32( , i32 , , i32 ) + declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> , i32 , <2 x i1> , i32 ) + +Overview: +""""""""" + +Predicated llvm.is.fpclass :ref:`llvm.is.fpclass ` + +Arguments: +"""""""""" + +The first operand is a floating-point vector, the result type is a vector of +boolean with the same number of elements as the first argument. The second +operand specifies, which tests to perform :ref:`llvm.is.fpclass `. +The third operand is the vector mask and has the same number of elements as the +result vector type. The fourth operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.is.fpclass``' intrinsic performs llvm.is.fpclass (:ref:`llvm.is.fpclass `). + + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl) + %t = call @llvm.vp.is.fpclass.nxv2f16( %x, i32 3, %m, i32 %evl) .. _int_mload_mstore: diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -2176,6 +2176,14 @@ llvm_i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg>]>; +def int_vp_is_fpclass: + DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [ llvm_anyvector_ty, + llvm_i32_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, ImmArg>]>; + //===-------------------------- Masked Intrinsics -------------------------===// // def int_masked_load: diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -490,6 +490,10 @@ ///// } Comparisons +// llvm.vp.is.fpclass(on_true,on_false,mask,vlen) +BEGIN_REGISTER_VP(vp_is_fpclass, 2, 3, VP_IS_FPCLASS, 0) +END_REGISTER_VP(vp_is_fpclass, VP_IS_FPCLASS) + ///// Memory Operations { // llvm.vp.store(val,ptr,mask,vlen) BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7924,6 +7924,21 @@ } break; } + case ISD::VP_IS_FPCLASS: { + MachineFunction &MF = DAG.getMachineFunction(); + SDNodeFlags SDFlags; + SDFlags.setNoFPExcept( + !MF.getFunction().getAttributes().hasFnAttr(llvm::Attribute::StrictFP)); + const DataLayout DLayout = DAG.getDataLayout(); + EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType()); + auto Constant = cast(OpValues[1])->getZExtValue(); + SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32); + SDValue V = + DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT, + {OpValues[0], Check, OpValues[2], OpValues[3]}, SDFlags); + setValue(&VPIntrin, V); + return; + } case ISD::VP_INTTOPTR: { SDValue N = OpValues[0]; EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType()); diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp --- a/llvm/lib/IR/IntrinsicInst.cpp +++ b/llvm/lib/IR/IntrinsicInst.cpp @@ -621,6 +621,9 @@ VPFunc = Intrinsic::getDeclaration(M, VPID, {ReturnType, Params[0]->getType()}); break; + case Intrinsic::vp_is_fpclass: + VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[0]->getType()}); + break; case Intrinsic::vp_merge: case Intrinsic::vp_select: VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()}); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -6045,6 +6045,11 @@ Check(CmpInst::isIntPredicate(Pred), "invalid predicate for VP integer comparison intrinsic", &VPI); } + if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) { + const ConstantInt *TestMask = cast(VPI.getOperand(1)); + Check((TestMask->getZExtValue() & ~static_cast(fcAllFlags)) == 0, + "unsupported bits for llvm.vp.is.fpclass test mask"); + } } void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -562,7 +562,7 @@ ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM, ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND, ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO, - ISD::VP_FRINT, ISD::VP_FNEARBYINT}; + ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS}; static const unsigned IntegerVecReduceOps[] = { ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, @@ -4496,6 +4496,10 @@ if (VT.isScalableVector()) { MVT DstVT = VT0.changeVectorElementTypeToInteger(); auto [Mask, VL] = getDefaultScalableVLOps(VT0, DL, DAG, Subtarget); + if (Op.getOpcode() == ISD::VP_IS_FPCLASS) { + Mask = Op.getOperand(2); + VL = Op.getOperand(3); + } SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, DstVT, Op0, Mask, VL, Op->getFlags()); if (IsOneBitMask) @@ -4512,7 +4516,13 @@ MVT ContainerVT = getContainerForFixedLengthVector(VT); MVT ContainerDstVT = ContainerVT0.changeVectorElementTypeToInteger(); auto [Mask, VL] = getDefaultVLOps(VT0, ContainerVT0, DL, DAG, Subtarget); - + if (Op.getOpcode() == ISD::VP_IS_FPCLASS) { + Mask = Op.getOperand(2); + MVT MaskContainerVT = + getContainerForFixedLengthVector(Mask.getSimpleValueType()); + Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget); + VL = Op.getOperand(3); + } Op0 = convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget); SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, ContainerDstVT, Op0, @@ -5230,6 +5240,8 @@ return lowerVPOp(Op, DAG, RISCVISD::SRL_VL, /*HasMergeOp*/ true); case ISD::VP_SHL: return lowerVPOp(Op, DAG, RISCVISD::SHL_VL, /*HasMergeOp*/ true); + case ISD::VP_IS_FPCLASS: + return LowerIS_FPCLASS(Op, DAG); case ISD::VP_FADD: return lowerVPOp(Op, DAG, RISCVISD::FADD_VL, /*HasMergeOp*/ true); case ISD::VP_FSUB: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -1944,10 +1944,11 @@ (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; // 14.14. Vector Floating-Point Classify Instruction - def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), - (vti.Mask true_mask), VLOpFrag), - (!cast("PseudoVFCLASS_V_"# vti.LMul.MX) - vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), + (vti.Mask V0), VLOpFrag), + (!cast("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, + (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll @@ -0,0 +1,319 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + +define <2 x i1> @isnan_v2f16(<2 x half> %x, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl) ; nan + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f16_unmasked(<2 x half> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl) ; nan + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> %m, i32 %evl) + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f32_unmasked(<2 x float> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> %m, i32 %evl) + ret <2 x i1> %1 +} + +define <4 x i1> @isnan_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> %m, i32 %evl) ; nan + ret <4 x i1> %1 +} + +define <4 x i1> @isnan_v4f32_unmasked(<4 x float> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> %m, i32 %evl) ; nan + ret <4 x i1> %1 +} + +define <8 x i1> @isnan_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v10, v8, v0.t +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vmseq.vx v8, v10, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x i1> %1 +} + +define <8 x i1> @isnan_v8f32_unmasked(<8 x float> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x i1> %1 +} + +define <16 x i1> @isnan_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v12, v8, v0.t +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vmseq.vx v8, v12, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> %m, i32 %evl) + ret <16 x i1> %1 +} + +define <16 x i1> @isnan_v16f32_unmasked(<16 x float> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> %m, i32 %evl) + ret <16 x i1> %1 +} + +define <2 x i1> @isnormal_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnormal_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> %m, i32 %evl) ; 0x204 = "inf" + ret <2 x i1> %1 +} + +define <2 x i1> @isnormal_v2f64_unmasked(<2 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnormal_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> %m, i32 %evl) ; 0x204 = "inf" + ret <2 x i1> %1 +} + +define <4 x i1> @isposinf_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isposinf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v10, v8, v0.t +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vmseq.vx v8, v10, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> %m, i32 %evl) ; 0x200 = "+inf" + ret <4 x i1> %1 +} + +define <4 x i1> @isposinf_v4f64_unmasked(<4 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isposinf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> %m, i32 %evl) ; 0x200 = "+inf" + ret <4 x i1> %1 +} + +define <8 x i1> @isneginf_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isneginf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v12, v8, v0.t +; CHECK-NEXT: vmseq.vi v8, v12, 1, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> %m, i32 %evl) ; "-inf" + ret <8 x i1> %1 +} + +define <8 x i1> @isneginf_v8f64_unmasked(<8 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isneginf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vmseq.vi v0, v8, 1 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> %m, i32 %evl) ; "-inf" + ret <8 x i1> %1 +} + +define <16 x i1> @isfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> %m, i32 %evl) ; 0x1f8 = "finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isfinite_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> %m, i32 %evl) ; 0x1f8 = "finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isposfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isposfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 112 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 448, <16 x i1> %m, i32 %evl) ; 0x1c0 = "+finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnegfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnegfinite_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 14 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 56, <16 x i1> %m, i32 %evl) ; 0x38 = "-finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnotfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: isnotfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> %m, i32 %evl) ; 0x207 = "inf|nan" + ret <16 x i1> %1 +} + +define <16 x i1> @isnotfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) { +; CHECK-LABEL: isnotfinite_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> %m, i32 %evl) ; 0x207 = "inf|nan" + ret <16 x i1> %1 +} + +declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half>, i32, <2 x i1>, i32) +declare <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float>, i32, <2 x i1>, i32) +declare <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float>, i32, <4 x i1>, i32) +declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x i1>, i32) +declare <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float>, i32, <16 x i1>, i32) +declare <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double>, i32, <2 x i1>, i32) +declare <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double>, i32, <4 x i1>, i32) +declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32) +declare <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double>, i32, <16 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll @@ -0,0 +1,245 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @isnan_nxv2f16( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv2f16( %x, i32 3, %m, i32 %evl) ; nan + ret %1 +} + +define @isnan_nxv2f16_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv2f16( %x, i32 3, %m, i32 %evl) ; nan + ret %1 +} + +define @isnan_nxv2f32( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv2f32( %x, i32 639, %m, i32 %evl) + ret %1 +} + +define @isnan_nxv2f32_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv2f32( %x, i32 639, %m, i32 %evl) + ret %1 +} + +define @isnan_nxv4f32( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv4f32( %x, i32 3, %m, i32 %evl) ; nan + ret %1 +} + +define @isnan_nxv4f32_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv4f32( %x, i32 3, %m, i32 %evl) ; nan + ret %1 +} + +define @isnan_nxv8f32( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv8f32( %x, i32 2, %m, i32 %evl) + ret %1 +} + +define @isnan_nxv8f32_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv8f32( %x, i32 2, %m, i32 %evl) + ret %1 +} + +define @isnan_nxv16f32( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv16f32( %x, i32 1, %m, i32 %evl) + ret %1 +} + +define @isnan_nxv16f32_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnan_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv16f32( %x, i32 1, %m, i32 %evl) + ret %1 +} + +define @isnormal_nxv2f64( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isnormal_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv2f64( %x, i32 516, %m, i32 %evl) ; 0x204 = "inf" + ret %1 +} + +define @isnormal_nxv2f64_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isnormal_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv2f64( %x, i32 516, %m, i32 %evl) ; 0x204 = "inf" + ret %1 +} + +define @isposinf_nxv4f64( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isposinf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv4f64( %x, i32 512, %m, i32 %evl) ; 0x200 = "+inf" + ret %1 +} + +define @isposinf_nxv4f64_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isposinf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv4f64( %x, i32 512, %m, i32 %evl) ; 0x200 = "+inf" + ret %1 +} + +define @isneginf_nxv8f64( %x, %m, i32 zeroext %evl) { +; CHECK-LABEL: isneginf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8, v0.t +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 1 +; CHECK-NEXT: ret + %1 = call @llvm.vp.is.fpclass.nxv8f64( %x, i32 4, %m, i32 %evl) ; "-inf" + ret %1 +} + +define @isneginf_nxv8f64_unmasked( %x, i32 zeroext %evl) { +; CHECK-LABEL: isneginf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 1 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %1 = call @llvm.vp.is.fpclass.nxv8f64( %x, i32 4, %m, i32 %evl) ; "-inf" + ret %1 +} + + +declare @llvm.vp.is.fpclass.nxv2f16(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv2f32(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv4f32(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv8f32(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv16f32(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv2f64(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv4f64(, i32, , i32) +declare @llvm.vp.is.fpclass.nxv8f64(, i32, , i32) diff --git a/llvm/test/Verifier/llvm.vp.is.fpclass.ll b/llvm/test/Verifier/llvm.vp.is.fpclass.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Verifier/llvm.vp.is.fpclass.ll @@ -0,0 +1,23 @@ +; RUN: not llvm-as < %s 2>&1 | FileCheck %s + +; CHECK: immarg operand has non-immediate parameter +; CHECK-NEXT: i32 %variable +; CHECK-NEXT: %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 %evl) +define <8 x i1> @test_mask_variable(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 zeroext %evl) { + %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 %evl) + ret <8 x i1> %ret +} + +; CHECK: unsupported bits for llvm.vp.is.fpclass test mask +define <8 x i1> @test_mask_neg1(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { + %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 -1, <8 x i1> %m, i32 %evl) + ret <8 x i1> %ret +} + +; CHECK: unsupported bits for llvm.vp.is.fpclass test mask +define <8 x i1> @test_mask_bit11(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) { + %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 2048, <8 x i1> %m, i32 %evl) + ret <8 x i1> %ret +} + +declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32) diff --git a/llvm/test/Verifier/vp-intrinsics.ll b/llvm/test/Verifier/vp-intrinsics.ll --- a/llvm/test/Verifier/vp-intrinsics.ll +++ b/llvm/test/Verifier/vp-intrinsics.ll @@ -24,6 +24,7 @@ %r2 = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n) %r3 = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n) %r4 = call <8 x double> @llvm.vp.frem.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n) + %r5 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %f0, i32 639, <8 x i1> %m, i32 %n) ret void } @@ -99,6 +100,7 @@ declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) declare <8 x double> @llvm.vp.frem.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) +declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32) ; reductions declare i32 @llvm.vp.reduce.add.v8i32(i32, <8 x i32>, <8 x i1>, i32) declare i32 @llvm.vp.reduce.mul.v8i32(i32, <8 x i32>, <8 x i1>, i32) diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -115,6 +115,8 @@ "i32>, i32)"; Str << " declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x " "i32>, i32)"; + Str << " declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x " + "i1>, i32)"; Str << " declare <8 x i32> @llvm.experimental.vp.splice.v8i32(<8 x " "i32>, <8 x i32>, i32, <8 x i1>, i32, i32) ";