Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -268,4 +268,12 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; } + + def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyfloat_ty], + [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMVectorElementType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; } // TargetPrefix = "riscv" Index: llvm/lib/Target/RISCV/RISCVInstrInfoV.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -980,8 +980,9 @@ // Floating-Point Scalar Move Instructions def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">; -def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd), - (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; +let Constraints = "$vd = $vd_wb" in +def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb), + (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 } // Predicates = [HasStdExtV, HasStdExtF] Index: llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1196,12 +1196,37 @@ Constraints = "$rd = $rs1" in def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd), (ins m.vrclass:$rs1, GPR:$rs2, - GPR:$vl, ixlenimm:$sew), + GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo; } } } +} // Predicates = [HasStdExtV] + +//===----------------------------------------------------------------------===// +// 17.2. Floating-Point Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV, HasStdExtF] in { +let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1, + Uses = [VL, VTYPE] in { + foreach m = MxList.m in { + let VLMul = m.value in { + let SEWIndex = 2, BaseInstr = VFMV_F_S in + def PseudoVFMV_F_S # "_" # m.MX : Pseudo<(outs FPR32:$rd), + (ins m.vrclass:$rs2, + ixlenimm:$sew), + []>, RISCVVPseudo; + let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F, + Constraints = "$rd = $rs1" in + def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd), + (ins m.vrclass:$rs1, FPR32:$rs2, + GPR:$vl, ixlenimm:$sew), + []>, RISCVVPseudo; + } + } } +} // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// // Patterns. @@ -1342,3 +1367,34 @@ (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>; } } // Predicates = [HasStdExtV] + +//===----------------------------------------------------------------------===// +// 17.2. Floating-Point Scalar Move Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasStdExtV, HasStdExtF] in { +foreach fvti = AllFloatVectors in { + defvar instr = !cast("PseudoVFMV_F_S_" # fvti.LMul.MX); + def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))), + // Floating point instructions with a scalar result will always + // generate the result in a register of class FPR32. When dealing + // with the f64 variant of a pattern we need to promote the FPR32 + // subregister generated by the instruction to the FPR64 base + // register expected by the type in the pattern + !cond(!eq(!cast(fvti.ScalarRegClass), + !cast(FPR64)): + (SUBREG_TO_REG (i32 -1), + (instr $rs2, fvti.SEW), sub_32), + !eq(!cast(fvti.ScalarRegClass), + !cast(FPR16)): + (EXTRACT_SUBREG (instr $rs2, fvti.SEW), sub_16), + !eq(1, 1): + (instr $rs2, fvti.SEW))>; + + def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1), + (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), + (!cast("PseudoVFMV_S_F_" # fvti.LMul.MX) + (fvti.Vector $rs1), ToFPR32.ret, + (NoX0 GPR:$vl), fvti.SEW)>; +} +} // Predicates = [HasStdExtV, HasStdExtF] Index: llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -0,0 +1,204 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare half @llvm.riscv.vfmv.f.s.nxv1f16() + +define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv1f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv2f16() + +define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv2f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv4f16() + +define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv4f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv8f16() + +define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv8f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv16f16() + +define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv16f16( %0) + ret half %a +} + +declare half @llvm.riscv.vfmv.f.s.nxv32f16() + +define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: # kill: def $f10_h killed $f10_h killed $f10_f +; CHECK-NEXT: ret +entry: + %a = call half @llvm.riscv.vfmv.f.s.nxv32f16( %0) + ret half %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv1f32() + +define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv1f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv2f32() + +define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv2f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv4f32() + +define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv4f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv8f32() + +define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv8f32( %0) + ret float %a +} + +declare float @llvm.riscv.vfmv.f.s.nxv16f32() + +define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call float @llvm.riscv.vfmv.f.s.nxv16f32( %0) + ret float %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv1f64() + +define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv1f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv2f64() + +define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv2f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv4f64() + +define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv4f64( %0) + ret double %a +} + +declare double @llvm.riscv.vfmv.f.s.nxv8f64() + +define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu +; CHECK-NEXT: vfmv.f.s fa0, v16 +; CHECK-NEXT: ret +entry: + %a = call double @llvm.riscv.vfmv.f.s.nxv8f64( %0) + ret double %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i32) + +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i32) + +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i32 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i32) + +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i32 %2) + ret %a +} Index: llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32f16(, half, i64) + +define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f10_h killed $f10_h def $f10_f +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32f16( %0, half %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16f32(, float, i64) + +define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16f32( %0, float %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4f64( %0, double %1, i64 %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8f64(, double, i64) + +define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: vfmv.s.f v16, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8f64( %0, double %1, i64 %2) + ret %a +}