Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -189,6 +189,19 @@ LLVMPointerType>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For destination vector type is the same as source vector. + // Input: (vector_in, vl) + class RISCVUnaryAANoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // For destination vector type is the same as first source vector (with mask). + // Input: (vector_in, mask, vl) + class RISCVUnaryAAMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; // For destination vector type is the same as first and second source vector. // Input: (vector_in, vector_in, vl) class RISCVBinaryAAANoMask @@ -210,7 +223,6 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; } - // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) class RISCVBinaryAAXMask @@ -326,7 +338,6 @@ [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic { let ExtendOperand = 3; } - class RISCVTernaryAAAXNoMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, @@ -470,7 +481,10 @@ def "int_riscv_" # NAME : RISCVIStore; def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask; } - + multiclass RISCVUnaryAA { + def "int_riscv_" # NAME : RISCVUnaryAANoMask; + def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask; + } // AAX means the destination type(A) is the same as the first source // type(A). X means any type for the second source operand. multiclass RISCVBinaryAAX { @@ -685,6 +699,8 @@ defm vfwmsac : RISCVTernaryWide; defm vfwnmsac : RISCVTernaryWide; + defm vfsqrt : RISCVUnaryAA; + defm vfmin : RISCVBinaryAAX; defm vfmax : RISCVBinaryAAX; Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1092,6 +1092,15 @@ } } +multiclass VPseudoUnaryV_V { + foreach m = MxList.m in { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoUnaryNoMask; + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask; + } + } +} + multiclass PseudoUnaryV_VF2 { defvar constraints = "@earlyclobber $rd"; foreach m = MxList.m[1-6] in @@ -1683,6 +1692,18 @@ } } +multiclass VPatUnaryV_V vtilist> { + foreach vti = vtilist in { + def : VPatUnaryNoMask; + def : VPatUnaryMask; + } +} + multiclass VPatNullaryV { foreach vti = AllIntegerVectors in { @@ -1707,7 +1728,6 @@ (NoX0 GPR:$vl), mti.SEW)>; } - multiclass VPatBinary; //===----------------------------------------------------------------------===// +// 14.8. Vector Floating-Point Square-Root Instruction +//===----------------------------------------------------------------------===// +defm PseudoVFSQRT : VPseudoUnaryV_V; + +//===----------------------------------------------------------------------===// // 14.9. Vector Floating-Point Min/Max Instructions //===----------------------------------------------------------------------===// defm PseudoVFMIN : VPseudoBinaryV_VV_VX; @@ -3237,6 +3261,11 @@ defm "" : VPatTernaryW_VV_VX<"int_riscv_vfwnmsac", "PseudoVFWNMSAC", AllWidenableFloatVectors>; //===----------------------------------------------------------------------===// +// 14.8. Vector Floating-Point Square-Root Instruction +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryV_V<"int_riscv_vfsqrt", "PseudoVFSQRT", AllFloatVectors>; + +//===----------------------------------------------------------------------===// // 14.9. Vector Floating-Point Min/Max Instructions //===----------------------------------------------------------------------===// defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN", AllFloatVectors>; Index: llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv32.ll @@ -0,0 +1,512 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsqrt.nxv1f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv32f16( + , + i32); + +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv32f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f32( + , + i32); + +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i32 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i32 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfsqrt-rv64.ll @@ -0,0 +1,698 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfsqrt.nxv1f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv32f16( + , + i64); + +define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv32f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv16f32( + , + i64); + +define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv1f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv2f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv4f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfsqrt.nxv8f64( + , + i64); + +define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( +; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfsqrt.v v16, v16 +; CHECK-NEXT: jalr zero, 0(ra) + %0, + i64 %1) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfsqrt.mask.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64( +; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu +; CHECK-NEXT: vfsqrt.v v16, v8, v0.t +; CHECK-NEXT: jalr zero, 0(ra) + %0, + %1, + %2, + i64 %3) nounwind { +entry: + %a = call @llvm.riscv.vfsqrt.mask.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +}