diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -389,6 +389,21 @@ : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>, RISCVVIntrinsic; + // For Conversion unary operations. + // For destination vector type is the same as first source vector. + // Input: (vector_in, vl) + class RISCVConversionNoMask + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // For Conversion unary operations with mask. + // For destination vector type is the same as first source vector. + // Input: (maskedoff, vector_in, mask, vl) + class RISCVConversionMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -469,6 +484,10 @@ def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask; def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask; } + multiclass RISCVConversion { + def "int_riscv_" #NAME :RISCVConversionNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -686,4 +705,11 @@ defm vpopc : RISCVMaskUnarySOut; defm vfirst : RISCVMaskUnarySOut; + defm vfwcvt_f_xu_v : RISCVConversion; + defm vfwcvt_f_x_v : RISCVConversion; + defm vfwcvt_xu_f_v : RISCVConversion; + defm vfwcvt_x_f_v : RISCVConversion; + defm vfwcvt_rtz_xu_f_v : RISCVConversion; + defm vfwcvt_rtz_x_f_v : RISCVConversion; + defm vfwcvt_f_f_v : RISCVConversion; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -178,6 +178,16 @@ } } +// This functor is used to obtain the float vector type that has the same SEW +// and multiplier as the input parameter type +class GetFloatVTypeInfo +{ + // Equivalent float vector type. Eg. + // VI16M8 -> VF16M8 + // VI32M1 -> VF32M1 (identity) + VTypeInfo FVti = !cast(!subst("VI", "VF", !cast(vti))); +} + // This functor is used to obtain the int vector type that has the same SEW and // multiplier as the input parameter type class GetIntVTypeInfo @@ -247,6 +257,19 @@ def : VTypeInfoToWide; } +defset list AllWidenableIntToFloatVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + // This class holds the record of the RISCVVPseudoTable below. // This represents the information we need in codegen for each pseudo. // The definition should be consistent with `struct PseudoInfo` in @@ -653,6 +676,43 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoConversionNoMask : + Pseudo<(outs RetClass:$rd), + (ins Op1Class:$rs2, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = Constraint; + let Uses = [VL, VTYPE]; + let VLIndex = 2; + let SEWIndex = 3; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoConversionMask : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1041,9 +1101,60 @@ defm _VS : VPseudoTernary; } +multiclass VPseudoConversion { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoConversionNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoConversionMask; + } +} + +multiclass VPseudoConversionW_V { + foreach m = MxList.m[0-5] in + defm _V : VPseudoConversion; +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// +class VPatConversionNoMask : + Pat<(result_type (!cast(intrinsic_name) + (op1_type op1_reg_class:$rs1), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + (NoX0 GPR:$vl), sew)>; + +class VPatConversionMask : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (mask_type V0), (NoX0 GPR:$vl), sew)>; + class VPatBinaryNoMask; } +multiclass VPatConversion +{ + def : VPatConversionNoMask; + def : VPatConversionMask; +} + multiclass VPatBinaryV_VV vtilist> { foreach vti = vtilist in @@ -1815,6 +1942,42 @@ } } +multiclass VPatConversionWI_VF { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VI { + foreach vtiToWti = AllWidenableIntToFloatVectors in + { + defvar vti = vtiToWti.Vti; + defvar fwti = GetFloatVTypeInfo.FVti; + + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VF { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + + defm : VPatConversion; + } +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -2140,6 +2303,17 @@ //===----------------------------------------------------------------------===// defm PseudoVFMERGE : VPseudoBinaryV_XM; + +//===----------------------------------------------------------------------===// +// 14.18. Widening Floating-Point/Integer Type-Convert Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFWCVT_XU_F : VPseudoConversionW_V; +defm PseudoVFWCVT_X_F : VPseudoConversionW_V; +defm PseudoVFWCVT_RTZ_XU_F : VPseudoConversionW_V; +defm PseudoVFWCVT_RTZ_X_F : VPseudoConversionW_V; +defm PseudoVFWCVT_F_XU : VPseudoConversionW_V; +defm PseudoVFWCVT_F_X : VPseudoConversionW_V; +defm PseudoVFWCVT_F_F : VPseudoConversionW_V; } // Predicates = [HasStdExtV, HasStdExtF] let Predicates = [HasStdExtV] in { @@ -2644,6 +2818,16 @@ defm "" : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE", /*CarryOut = */0, /*vtilist=*/AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.18. Widening Floating-Point/Integer Type-Convert Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_xu_f_v", "PseudoVFWCVT_XU_F">; +defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_x_f_v", "PseudoVFWCVT_X_F">; +defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_xu_f_v", "PseudoVFWCVT_RTZ_XU_F">; +defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_F">; +defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">; +defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">; +defm "" : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">; } // Predicates = [HasStdExtV, HasStdExtF] let Predicates = [HasStdExtV] in { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , + i32); + +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + , + i32); + +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + , + i32); + +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + , + i32); + +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + , + i32); + +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( + , + i64); + +define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + , + i32); + +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + , + i32); + +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + , + i32); + +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + , + i32); + +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + , + i32); + +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( + , + i64); + +define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.x.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + , + i32); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + , + i32); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + , + i32); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + , + i32); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + , + i32); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + , + i64); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.f.xu.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + , + i32); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + , + i64); + +define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.rtz.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , + i32); + +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + , + i32); + +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + , + i32); + +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + , + i32); + +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + , + i32); + +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( + , + i64); + +define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.x.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , + i32); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + , + i32); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + , + i32); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + , + i32); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + , + i32); + +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + , + i64); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwcvt.xu.f.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +}