Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -293,7 +293,6 @@ [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let ExtendOperand = 3; } - // For vmv.v.v, vmv.v.x, vmv.v.i // Input: (vector_in/scalar_in, vl) class RISCVUnary : Intrinsic<[llvm_anyvector_ty], @@ -301,7 +300,23 @@ [IntrNoMem] >, RISCVVIntrinsic { let ExtendOperand = 1; } - + // For destination vector type is the same as source vector. + // Input: (vector_in, vl) + class RISCVUnaryAANoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 1; + } + // For destination vector type is the same as source vector (with mask). + // Input: (maskedoff, vector_in, mask, vl) + class RISCVUnaryAAMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } class RISCVTernaryAAAXNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -337,6 +352,10 @@ def "int_riscv_" # NAME : RISCVIStore; def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask; } + multiclass RISCVUnaryAA { + def "int_riscv_" # NAME : RISCVUnaryAANoMask; + def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; @@ -467,6 +486,8 @@ defm vfwmul : RISCVBinaryABX; + defm vfsqrt : RISCVUnaryAA; + defm vfmin : RISCVBinaryAAX; defm vfmax : RISCVBinaryAAX; Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -467,6 +467,22 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUnaryNoMask : + Pseudo<(outs RetClass:$rd), + (ins Op2Class:$rs2, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 2; + let SEWIndex = 3; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryNoMask(PseudoToVInst.VInst); } +class VPseudoUnaryMask : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + Op2Class:$rs2, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryMask { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoUnaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMask; + } +} + multiclass VPseudoBinary; +} + multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; defm "" : VPseudoBinaryV_VX; @@ -924,6 +972,42 @@ //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// +class VPatUnaryNoMask : + Pat<(result_type (!cast(intrinsic_name) + (op2_type op2_reg_class:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op2_type op2_reg_class:$rs2), + (NoX0 GPR:$vl), sew)>; + +class VPatUnaryMask : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (NoX0 GPR:$vl), sew)>; + class VPatBinaryNoMask; } +multiclass VPatUnary { + def : VPatUnaryNoMask; + def : VPatUnaryMask; +} + multiclass VPatBinary; } +multiclass VPatUnaryV_V vtilist> { + foreach vti = vtilist in + defm : VPatUnary; +} + multiclass VPatBinaryV_VV vtilist> { foreach vti = vtilist in @@ -1729,6 +1838,11 @@ defm PseudoVFWMUL : VPseudoBinaryW_VV_VX; //===----------------------------------------------------------------------===// +// 14.8. Vector Floating-Point Square-Root Instruction +//===----------------------------------------------------------------------===// +defm PseudoVFSQRT : VPseudoUnaryV_V; + +//===----------------------------------------------------------------------===// // 14.9. Vector Floating-Point Min/Max Instructions //===----------------------------------------------------------------------===// defm PseudoVFMIN : VPseudoBinaryV_VV_VX; @@ -2066,6 +2180,11 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; //===----------------------------------------------------------------------===// +// 14.8. Vector Floating-Point Square-Root Instruction +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>; + +//===----------------------------------------------------------------------===// // 14.9. Vector Floating-Point Min/Max Instructions //===----------------------------------------------------------------------===// defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>; Index: llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll @@ -0,0 +1,397 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsqrt.nxv1f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv1f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv2f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv4f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv8f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv16f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv32f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv32f16( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv1f32( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv2f32( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv4f32( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv8f32( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( + undef, + undef, + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv16f32( + undef, + i32 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( + undef, + undef, + undef, + i32 undef) + + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsqrt.nxv1f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv1f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv2f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv4f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv8f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv16f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv32f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv32f16( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv1f32( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv2f32( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv4f32( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv8f32( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv16f32( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv1f64( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv2f64( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv4f64( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfsqrt.nxv8f64( + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vfsqrt.v {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfsqrt.mask.nxv8f64( + undef, + undef, + undef, + i64 undef) + + ret %a +}