diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -299,7 +299,21 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } - + // For FP classify operations. + // Output: (bit mask type output) + // Input: (vector_in, vl) + class RISCVClassifyNoMask + : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], + [llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // For FP classify operations with mask. + // Output: (bit mask type output) + // Input: (maskedoff, vector_in, mask, vl) + class RISCVClassifyMask + : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], + [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For Saturating binary operations. // The destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) @@ -485,6 +499,10 @@ def "int_riscv_" # NAME : RISCVUnaryAANoMask; def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask; } + multiclass RISCVUnaryAB { + def "int_riscv_" # NAME : RISCVUnaryABNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask; + } // AAX means the destination type(A) is the same as the first source // type(A). X means any type for the second source operand. multiclass RISCVBinaryAAX { @@ -526,6 +544,10 @@ def "int_riscv_" # NAME : RISCVCompareNoMask; def "int_riscv_" # NAME # "_mask" : RISCVCompareMask; } + multiclass RISCVClassify { + def "int_riscv_" # NAME : RISCVClassifyNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask; + } multiclass RISCVTernaryWide { def "int_riscv_" # NAME : RISCVTernaryWideNoMask; def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask; @@ -538,10 +560,6 @@ def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask; def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask; } - multiclass RISCVUnaryAB { - def "int_riscv_" # NAME : RISCVUnaryABNoMask; - def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask; - } multiclass RISCVMaskUnaryMOut { def "int_riscv_" # NAME : RISCVUnaryNoMask; def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask; @@ -708,6 +726,8 @@ defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; + defm vfclass : RISCVClassify; + defm vfmerge : RISCVBinaryWithV0; defm vslideup : RISCVTernaryAAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2706,6 +2706,11 @@ defm PseudoVMFGT : VPseudoBinaryM_VX; defm PseudoVMFGE : VPseudoBinaryM_VX; +//===----------------------------------------------------------------------===// +// 14.14. Vector Floating-Point Classify Instruction +//===----------------------------------------------------------------------===// +defm PseudoVFCLASS : VPseudoUnaryV_V; + //===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction //===----------------------------------------------------------------------===// @@ -2785,6 +2790,7 @@ //===----------------------------------------------------------------------===// defm PseudoVFWREDSUM : VPseudoReductionV_VS; defm PseudoVFWREDOSUM : VPseudoReductionV_VS; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// @@ -3347,6 +3353,11 @@ defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>; defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.14. Vector Floating-Point Classify Instruction +//===----------------------------------------------------------------------===// +defm "" : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; + //===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction //===----------------------------------------------------------------------===// @@ -3441,6 +3452,7 @@ //===----------------------------------------------------------------------===// defm "" : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>; defm "" : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv32.ll @@ -0,0 +1,512 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfclass.nxv1i16( + , + i32); + +define @intrinsic_vfclass_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i16( + , + i32); + +define @intrinsic_vfclass_v_nxv2i16_nxv2f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i16( + , + i32); + +define @intrinsic_vfclass_v_nxv4i16_nxv4f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i16( + , + i32); + +define @intrinsic_vfclass_v_nxv8i16_nxv8f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vfclass.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i16( + , + i32); + +define @intrinsic_vfclass_v_nxv16i16_nxv16f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vfclass.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv32i16( + , + i32); + +define @intrinsic_vfclass_v_nxv32i16_nxv32f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv32i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv32i16( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vfclass.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv1i32( + , + i32); + +define @intrinsic_vfclass_v_nxv1i32_nxv1f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i32( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i32( + , + i32); + +define @intrinsic_vfclass_v_nxv2i32_nxv2f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i32( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i32( + , + i32); + +define @intrinsic_vfclass_v_nxv4i32_nxv4f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i32( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vfclass.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i32( + , + i32); + +define @intrinsic_vfclass_v_nxv8i32_nxv8f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i32( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vfclass.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i32( + , + i32); + +define @intrinsic_vfclass_v_nxv16i32_nxv16f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i32( + , + , + , + i32); + +define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vfclass.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-rv64.ll @@ -0,0 +1,698 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfclass.nxv1i16( + , + i64); + +define @intrinsic_vfclass_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i16( + , + i64); + +define @intrinsic_vfclass_v_nxv2i16_nxv2f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i16( + , + i64); + +define @intrinsic_vfclass_v_nxv4i16_nxv4f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i16( + , + i64); + +define @intrinsic_vfclass_v_nxv8i16_nxv8f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vfclass.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i16( + , + i64); + +define @intrinsic_vfclass_v_nxv16i16_nxv16f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vfclass.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv32i16( + , + i64); + +define @intrinsic_vfclass_v_nxv32i16_nxv32f16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv32i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv32i16( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vfclass.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv1i32( + , + i64); + +define @intrinsic_vfclass_v_nxv1i32_nxv1f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i32( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i32( + , + i64); + +define @intrinsic_vfclass_v_nxv2i32_nxv2f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i32( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i32( + , + i64); + +define @intrinsic_vfclass_v_nxv4i32_nxv4f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i32( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vfclass.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i32( + , + i64); + +define @intrinsic_vfclass_v_nxv8i32_nxv8f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i32( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vfclass.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i32( + , + i64); + +define @intrinsic_vfclass_v_nxv16i32_nxv16f32( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i32( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vfclass.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv1i64( + , + i64); + +define @intrinsic_vfclass_v_nxv1i64_nxv1f64( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i64( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vfclass.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i64( + , + i64); + +define @intrinsic_vfclass_v_nxv2i64_nxv2f64( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i64( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vfclass.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i64( + , + i64); + +define @intrinsic_vfclass_v_nxv4i64_nxv4f64( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i64( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vfclass.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i64( + , + i64); + +define @intrinsic_vfclass_v_nxv8i64_nxv8f64( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfclass.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i64( + , + , + , + i64); + +define @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vfclass.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +}