diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+zfh \ ; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , @@ -688,9 +688,8 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f16.f16( @@ -712,9 +711,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( @@ -735,9 +733,8 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f16.f16( @@ -759,9 +756,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( @@ -782,9 +778,8 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f16.f16( @@ -806,9 +801,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( @@ -829,9 +823,8 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f16.f16( @@ -853,9 +846,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( @@ -876,9 +868,8 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f16.f16( @@ -900,9 +891,8 @@ define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( @@ -923,9 +913,8 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv32f16.f16( @@ -947,9 +936,8 @@ define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( @@ -970,9 +958,8 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f32.f32( @@ -994,9 +981,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( @@ -1017,9 +1003,8 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f32.f32( @@ -1041,9 +1026,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( @@ -1064,9 +1048,8 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f32.f32( @@ -1088,9 +1071,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( @@ -1111,9 +1093,8 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f32.f32( @@ -1135,9 +1116,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( @@ -1158,9 +1138,8 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv16f32.f32( @@ -1182,9 +1161,8 @@ define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( @@ -1205,9 +1183,8 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv1f64.f64( @@ -1229,9 +1206,8 @@ define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( @@ -1252,9 +1228,8 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv2f64.f64( @@ -1276,9 +1251,8 @@ define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( @@ -1299,9 +1273,8 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv4f64.f64( @@ -1323,9 +1296,8 @@ define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( @@ -1346,9 +1318,8 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.nxv8f64.f64( @@ -1370,9 +1341,8 @@ define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfdiv.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmadd.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmax.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmax.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmax.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmax.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmax.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmax.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( , , @@ -32,9 +32,8 @@ define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f16.f16( @@ -77,9 +76,8 @@ define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f16.f16( @@ -122,9 +120,8 @@ define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f16.f16( @@ -167,9 +164,8 @@ define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f16.f16( @@ -212,9 +208,8 @@ define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f16.f16( @@ -257,9 +252,8 @@ define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv32f16.f16( @@ -302,9 +296,8 @@ define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f32.f32( @@ -347,9 +340,8 @@ define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f32.f32( @@ -392,9 +384,8 @@ define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f32.f32( @@ -437,9 +428,8 @@ define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f32.f32( @@ -482,9 +472,8 @@ define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f32.f32( @@ -527,13 +516,8 @@ define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f64.f64( @@ -576,13 +560,8 @@ define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f64.f64( @@ -625,13 +604,8 @@ define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f64.f64( @@ -674,13 +648,8 @@ define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmerge.nxv1f16.nxv1f16( , , @@ -32,9 +32,8 @@ define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f16.f16( @@ -77,9 +76,8 @@ define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f16.f16( @@ -122,9 +120,8 @@ define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f16.f16( @@ -167,9 +164,8 @@ define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f16.f16( @@ -212,9 +208,8 @@ define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f16.f16( @@ -257,9 +252,8 @@ define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv32f16.f16( @@ -302,9 +296,8 @@ define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f32.f32( @@ -347,9 +340,8 @@ define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f32.f32( @@ -392,9 +384,8 @@ define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f32.f32( @@ -437,9 +428,8 @@ define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f32.f32( @@ -482,9 +472,8 @@ define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv16f32.f32( @@ -527,9 +516,8 @@ define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv1f64.f64( @@ -572,9 +560,8 @@ define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv2f64.f64( @@ -617,9 +604,8 @@ define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv4f64.f64( @@ -662,9 +648,8 @@ define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmerge.vfm v8, v8, ft0, v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmerge.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmin.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmin.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmin.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmin.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmin.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsac.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmsub.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfmul.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfmul_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfmul_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfmul_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfmul_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfmul_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfmul_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfmul_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfmul_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfmul_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfmul_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfmul_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfmul_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfmul_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfmul_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfmul_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmadd.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( @@ -994,13 +976,8 @@ define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f64.f64( @@ -1022,13 +999,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64( @@ -1050,13 +1022,8 @@ define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f64.f64( @@ -1078,13 +1045,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64( @@ -1106,13 +1068,8 @@ define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f64.f64( @@ -1134,13 +1091,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( , , @@ -562,9 +562,8 @@ define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( @@ -586,9 +585,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( @@ -610,9 +608,8 @@ define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( @@ -634,9 +631,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( @@ -658,9 +654,8 @@ define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( @@ -682,9 +677,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( @@ -706,9 +700,8 @@ define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( @@ -730,9 +723,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( @@ -754,9 +746,8 @@ define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( @@ -778,9 +769,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( @@ -802,9 +792,8 @@ define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( @@ -826,9 +815,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( @@ -850,9 +838,8 @@ define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( @@ -874,9 +861,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( @@ -898,9 +884,8 @@ define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( @@ -922,9 +907,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( @@ -946,9 +930,8 @@ define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( @@ -970,9 +953,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( @@ -994,9 +976,8 @@ define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv1f64.f64( @@ -1018,9 +999,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64( @@ -1042,9 +1022,8 @@ define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv2f64.f64( @@ -1066,9 +1045,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64( @@ -1090,9 +1068,8 @@ define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.nxv4f64.f64( @@ -1114,9 +1091,8 @@ define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vfnmsub.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( @@ -33,9 +32,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( @@ -56,9 +54,8 @@ define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( @@ -80,9 +77,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( @@ -103,9 +99,8 @@ define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( @@ -127,9 +122,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( @@ -150,9 +144,8 @@ define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( @@ -174,9 +167,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( @@ -197,9 +189,8 @@ define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( @@ -221,9 +212,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( @@ -244,9 +234,8 @@ define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( @@ -268,9 +257,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( @@ -291,9 +279,8 @@ define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( @@ -315,9 +302,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( @@ -338,9 +324,8 @@ define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( @@ -362,9 +347,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( @@ -385,9 +369,8 @@ define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( @@ -409,9 +392,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( @@ -432,9 +414,8 @@ define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( @@ -456,9 +437,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( @@ -479,9 +459,8 @@ define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( @@ -503,9 +482,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( @@ -526,13 +504,8 @@ define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( @@ -554,13 +527,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( @@ -581,13 +549,8 @@ define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( @@ -609,13 +572,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( @@ -636,13 +594,8 @@ define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( @@ -664,13 +617,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( @@ -691,13 +639,8 @@ define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( @@ -719,13 +662,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfrdiv.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfrdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( @@ -33,9 +32,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( @@ -56,9 +54,8 @@ define @intrinsic_vfrdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( @@ -80,9 +77,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( @@ -103,9 +99,8 @@ define @intrinsic_vfrdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( @@ -127,9 +122,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( @@ -150,9 +144,8 @@ define @intrinsic_vfrdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( @@ -174,9 +167,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( @@ -197,9 +189,8 @@ define @intrinsic_vfrdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( @@ -221,9 +212,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( @@ -244,9 +234,8 @@ define @intrinsic_vfrdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( @@ -268,9 +257,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( @@ -291,9 +279,8 @@ define @intrinsic_vfrdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( @@ -315,9 +302,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( @@ -338,9 +324,8 @@ define @intrinsic_vfrdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( @@ -362,9 +347,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( @@ -385,9 +369,8 @@ define @intrinsic_vfrdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( @@ -409,9 +392,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( @@ -432,9 +414,8 @@ define @intrinsic_vfrdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( @@ -456,9 +437,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( @@ -479,9 +459,8 @@ define @intrinsic_vfrdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( @@ -503,9 +482,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( @@ -526,9 +504,8 @@ define @intrinsic_vfrdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( @@ -550,9 +527,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( @@ -573,9 +549,8 @@ define @intrinsic_vfrdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( @@ -597,9 +572,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( @@ -620,9 +594,8 @@ define @intrinsic_vfrdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( @@ -644,9 +617,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( @@ -667,9 +639,8 @@ define @intrinsic_vfrdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( @@ -691,9 +662,8 @@ define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfrdiv.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f16.f16( @@ -33,9 +32,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( @@ -56,9 +54,8 @@ define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f16.f16( @@ -80,9 +77,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( @@ -103,9 +99,8 @@ define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f16.f16( @@ -127,9 +122,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( @@ -150,9 +144,8 @@ define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f16.f16( @@ -174,9 +167,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( @@ -197,9 +189,8 @@ define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f16.f16( @@ -221,9 +212,8 @@ define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( @@ -244,9 +234,8 @@ define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv32f16.f16( @@ -268,9 +257,8 @@ define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( @@ -291,9 +279,8 @@ define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f32.f32( @@ -315,9 +302,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( @@ -338,9 +324,8 @@ define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f32.f32( @@ -362,9 +347,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( @@ -385,9 +369,8 @@ define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f32.f32( @@ -409,9 +392,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( @@ -432,9 +414,8 @@ define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f32.f32( @@ -456,9 +437,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( @@ -479,9 +459,8 @@ define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f32.f32( @@ -503,9 +482,8 @@ define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( @@ -526,13 +504,8 @@ define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f64.f64( @@ -554,13 +527,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( @@ -581,13 +549,8 @@ define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f64.f64( @@ -609,13 +572,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( @@ -636,13 +594,8 @@ define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f64.f64( @@ -664,13 +617,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( @@ -691,13 +639,8 @@ define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f64.f64( @@ -719,13 +662,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-rv64.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+zfh \ ; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfrsub.nxv1f16.f16( , half, @@ -10,9 +10,8 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f16.f16( @@ -34,9 +33,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f16.f16( @@ -57,9 +55,8 @@ define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f16.f16( @@ -81,9 +78,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f16.f16( @@ -104,9 +100,8 @@ define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f16.f16( @@ -128,9 +123,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f16.f16( @@ -151,9 +145,8 @@ define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f16.f16( @@ -175,9 +168,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f16.f16( @@ -198,9 +190,8 @@ define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f16.f16( @@ -222,9 +213,8 @@ define @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv16f16.f16( @@ -245,9 +235,8 @@ define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv32f16.f16( @@ -269,9 +258,8 @@ define @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv32f16.f16( @@ -292,9 +280,8 @@ define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f32.f32( @@ -316,9 +303,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f32.f32( @@ -339,9 +325,8 @@ define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f32.f32( @@ -363,9 +348,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f32.f32( @@ -386,9 +370,8 @@ define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f32.f32( @@ -410,9 +393,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f32.f32( @@ -433,9 +415,8 @@ define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f32.f32( @@ -457,9 +438,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f32.f32( @@ -480,9 +460,8 @@ define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv16f32.f32( @@ -504,9 +483,8 @@ define @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv16f32.f32( @@ -527,9 +505,8 @@ define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv1f64.f64( @@ -551,9 +528,8 @@ define @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv1f64.f64( @@ -574,9 +550,8 @@ define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv2f64.f64( @@ -598,9 +573,8 @@ define @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv2f64.f64( @@ -621,9 +595,8 @@ define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv4f64.f64( @@ -645,9 +618,8 @@ define @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv4f64.f64( @@ -668,9 +640,8 @@ define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.nxv8f64.f64( @@ -692,9 +663,8 @@ define @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfrsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsub.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfsgnj.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnj_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnj_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnj_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnj_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnj_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnj_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnj_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnj_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnj_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnj_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnj_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfsgnj_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfsgnj_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfsgnj_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfsgnj_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfsgnj_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfsgnjn.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnjn_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnjn_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnjn_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfsgnjn_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfsgnjn_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfsgnjn_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfsgnjn_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfsgnjn_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjn.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfsgnjx.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsgnjx_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsgnjx_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsgnjx_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32( @@ -1204,9 +1182,8 @@ define @intrinsic_vfsgnjx_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv1f64.f64( @@ -1228,9 +1205,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64( @@ -1251,9 +1227,8 @@ define @intrinsic_vfsgnjx_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv2f64.f64( @@ -1275,9 +1250,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64( @@ -1298,9 +1272,8 @@ define @intrinsic_vfsgnjx_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv4f64.f64( @@ -1322,9 +1295,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64( @@ -1345,9 +1317,8 @@ define @intrinsic_vfsgnjx_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.nxv8f64.f64( @@ -1369,9 +1340,8 @@ define @intrinsic_vfsgnjx_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsgnjx.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( @@ -33,9 +32,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( @@ -56,9 +54,8 @@ define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( @@ -80,9 +77,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( @@ -103,9 +99,8 @@ define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( @@ -127,9 +122,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( @@ -150,9 +144,8 @@ define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( @@ -174,9 +167,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( @@ -197,9 +189,8 @@ define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( @@ -221,9 +212,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( @@ -244,9 +234,8 @@ define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( @@ -268,9 +257,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( @@ -291,9 +279,8 @@ define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( @@ -315,9 +302,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( @@ -338,9 +324,8 @@ define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( @@ -362,9 +347,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( @@ -385,9 +369,8 @@ define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( @@ -409,9 +392,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( @@ -432,9 +414,8 @@ define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( @@ -456,9 +437,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( @@ -479,9 +459,8 @@ define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( @@ -503,9 +482,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( @@ -526,13 +504,8 @@ define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( @@ -554,13 +527,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( @@ -581,13 +549,8 @@ define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( @@ -609,13 +572,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( @@ -636,13 +594,8 @@ define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( @@ -664,13 +617,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( @@ -691,13 +639,8 @@ define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( @@ -719,13 +662,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfslide1down.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( @@ -33,9 +32,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( @@ -56,9 +54,8 @@ define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( @@ -80,9 +77,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( @@ -103,9 +99,8 @@ define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( @@ -127,9 +122,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( @@ -150,9 +144,8 @@ define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( @@ -174,9 +167,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( @@ -197,9 +189,8 @@ define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( @@ -221,9 +212,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( @@ -244,9 +234,8 @@ define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( @@ -268,9 +257,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( @@ -291,9 +279,8 @@ define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( @@ -315,9 +302,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( @@ -338,9 +324,8 @@ define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( @@ -362,9 +347,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( @@ -385,9 +369,8 @@ define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( @@ -409,9 +392,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( @@ -432,9 +414,8 @@ define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( @@ -456,9 +437,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( @@ -479,9 +459,8 @@ define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( @@ -503,9 +482,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( @@ -526,9 +504,8 @@ define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( @@ -550,9 +527,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( @@ -573,9 +549,8 @@ define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( @@ -597,9 +572,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( @@ -620,9 +594,8 @@ define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( @@ -644,9 +617,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( @@ -667,9 +639,8 @@ define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( @@ -691,9 +662,8 @@ define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfslide1down.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -34,9 +33,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( @@ -57,9 +55,8 @@ define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -82,9 +79,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( @@ -105,9 +101,8 @@ define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -130,9 +125,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( @@ -153,9 +147,8 @@ define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: @@ -178,9 +171,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( @@ -201,9 +193,8 @@ define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: @@ -226,9 +217,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( @@ -249,9 +239,8 @@ define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: @@ -274,9 +263,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( @@ -297,9 +285,8 @@ define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -322,9 +309,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( @@ -345,9 +331,8 @@ define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -370,9 +355,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( @@ -393,9 +377,8 @@ define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: @@ -418,9 +401,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( @@ -441,9 +423,8 @@ define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: @@ -466,9 +447,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( @@ -489,9 +469,8 @@ define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: @@ -514,9 +493,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( @@ -537,14 +515,9 @@ define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( @@ -566,13 +539,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64( @@ -593,14 +561,9 @@ define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( @@ -622,13 +585,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64( @@ -649,14 +607,9 @@ define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( @@ -678,13 +631,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64( @@ -705,14 +653,9 @@ define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 -; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( @@ -734,13 +677,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfslide1up.nxv1f16.f16( , half, @@ -9,9 +9,8 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -34,9 +33,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( @@ -57,9 +55,8 @@ define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -82,9 +79,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( @@ -105,9 +101,8 @@ define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -130,9 +125,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( @@ -153,9 +147,8 @@ define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: @@ -178,9 +171,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( @@ -201,9 +193,8 @@ define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: @@ -226,9 +217,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( @@ -249,9 +239,8 @@ define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: @@ -274,9 +263,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( @@ -297,9 +285,8 @@ define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -322,9 +309,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( @@ -345,9 +331,8 @@ define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -370,9 +355,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( @@ -393,9 +377,8 @@ define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: @@ -418,9 +401,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( @@ -441,9 +423,8 @@ define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: @@ -466,9 +447,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( @@ -489,9 +469,8 @@ define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: @@ -514,9 +493,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( @@ -537,9 +515,8 @@ define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -562,9 +539,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64( @@ -585,9 +561,8 @@ define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: @@ -610,9 +585,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64( @@ -633,9 +607,8 @@ define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: @@ -658,9 +631,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64( @@ -681,9 +653,8 @@ define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: @@ -706,9 +677,8 @@ define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfslide1up.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( , , @@ -687,9 +687,8 @@ define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f16.f16( @@ -711,9 +710,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( @@ -734,9 +732,8 @@ define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f16.f16( @@ -758,9 +755,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( @@ -781,9 +777,8 @@ define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f16.f16( @@ -805,9 +800,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( @@ -828,9 +822,8 @@ define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f16.f16( @@ -852,9 +845,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( @@ -875,9 +867,8 @@ define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f16.f16( @@ -899,9 +890,8 @@ define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( @@ -922,9 +912,8 @@ define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv32f16.f16( @@ -946,9 +935,8 @@ define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( @@ -969,9 +957,8 @@ define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f32.f32( @@ -993,9 +980,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( @@ -1016,9 +1002,8 @@ define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f32.f32( @@ -1040,9 +1025,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( @@ -1063,9 +1047,8 @@ define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f32.f32( @@ -1087,9 +1070,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( @@ -1110,9 +1092,8 @@ define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f32.f32( @@ -1134,9 +1115,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( @@ -1157,9 +1137,8 @@ define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f32.f32( @@ -1181,9 +1160,8 @@ define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( @@ -1204,13 +1182,8 @@ define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f64.f64( @@ -1232,13 +1205,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( @@ -1259,13 +1227,8 @@ define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f64.f64( @@ -1287,13 +1250,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( @@ -1314,13 +1272,8 @@ define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f64.f64( @@ -1342,13 +1295,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( @@ -1369,13 +1317,8 @@ define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f64.f64( @@ -1397,13 +1340,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: sw a1, 12(sp) -; CHECK-NEXT: fld ft0, 8(sp) -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+zfh \ ; RUN: -mattr=+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfsub.nxv1f16.nxv1f16( , , @@ -688,9 +688,8 @@ define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f16.f16( @@ -712,9 +711,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f16.f16( @@ -735,9 +733,8 @@ define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f16.f16( @@ -759,9 +756,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f16.f16( @@ -782,9 +778,8 @@ define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f16.f16( @@ -806,9 +801,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f16.f16( @@ -829,9 +823,8 @@ define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f16.f16( @@ -853,9 +846,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f16.f16( @@ -876,9 +868,8 @@ define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f16.f16( @@ -900,9 +891,8 @@ define @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv16f16.f16( @@ -923,9 +913,8 @@ define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv32f16.f16( @@ -947,9 +936,8 @@ define @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv32f16.f16( @@ -970,9 +958,8 @@ define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f32.f32( @@ -994,9 +981,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f32.f32( @@ -1017,9 +1003,8 @@ define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f32.f32( @@ -1041,9 +1026,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f32.f32( @@ -1064,9 +1048,8 @@ define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f32.f32( @@ -1088,9 +1071,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f32.f32( @@ -1111,9 +1093,8 @@ define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f32.f32( @@ -1135,9 +1116,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f32.f32( @@ -1158,9 +1138,8 @@ define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv16f32.f32( @@ -1182,9 +1161,8 @@ define @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv16f32.f32( @@ -1205,9 +1183,8 @@ define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv1f64.f64( @@ -1229,9 +1206,8 @@ define @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vfsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv1f64.f64( @@ -1252,9 +1228,8 @@ define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv2f64.f64( @@ -1276,9 +1251,8 @@ define @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vfsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv2f64.f64( @@ -1299,9 +1273,8 @@ define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv4f64.f64( @@ -1323,9 +1296,8 @@ define @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vfsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv4f64.f64( @@ -1346,9 +1318,8 @@ define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.nxv8f64.f64( @@ -1370,9 +1341,8 @@ define @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vfsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsub.mask.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , @@ -416,9 +416,8 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( @@ -440,9 +439,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -463,9 +461,8 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( @@ -487,9 +484,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -510,9 +506,8 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( @@ -534,9 +529,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -557,9 +551,8 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( @@ -581,9 +574,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -604,9 +596,8 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( @@ -628,9 +619,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -651,9 +641,8 @@ define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( @@ -675,9 +664,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( @@ -698,9 +686,8 @@ define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( @@ -722,9 +709,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( @@ -745,9 +731,8 @@ define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( @@ -769,9 +754,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( @@ -792,9 +776,8 @@ define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( @@ -816,9 +799,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( @@ -987,9 +969,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -1005,9 +986,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -1023,9 +1003,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -1041,9 +1020,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -1059,9 +1037,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -1077,9 +1054,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( @@ -1095,9 +1071,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( @@ -1113,9 +1088,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( @@ -1131,9 +1105,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16( , , @@ -416,9 +416,8 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( @@ -440,9 +439,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -463,9 +461,8 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( @@ -487,9 +484,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -510,9 +506,8 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( @@ -534,9 +529,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -557,9 +551,8 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( @@ -581,9 +574,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -604,9 +596,8 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( @@ -628,9 +619,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -651,9 +641,8 @@ define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( @@ -675,9 +664,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( @@ -698,9 +686,8 @@ define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( @@ -722,9 +709,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( @@ -745,9 +731,8 @@ define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( @@ -769,9 +754,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( @@ -792,9 +776,8 @@ define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( @@ -816,9 +799,8 @@ define @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( @@ -987,9 +969,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( @@ -1005,9 +986,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( @@ -1023,9 +1003,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( @@ -1041,9 +1020,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( @@ -1059,9 +1037,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( @@ -1077,9 +1054,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( @@ -1095,9 +1071,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( @@ -1113,9 +1088,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( @@ -1131,9 +1105,8 @@ define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwadd.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwmacc.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwmsac.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwmul.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmacc.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16( , , @@ -424,9 +424,8 @@ define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv1f32.f16( @@ -448,9 +447,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.f16( @@ -472,9 +470,8 @@ define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv2f32.f16( @@ -496,9 +493,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.f16( @@ -520,9 +516,8 @@ define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv4f32.f16( @@ -544,9 +539,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.f16( @@ -568,9 +562,8 @@ define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv8f32.f16( @@ -592,9 +585,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.f16( @@ -616,9 +608,8 @@ define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv16f32.f16( @@ -640,9 +631,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.f16( @@ -664,9 +654,8 @@ define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv1f64.f32( @@ -688,9 +677,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v9, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32( @@ -712,9 +700,8 @@ define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv2f64.f32( @@ -736,9 +723,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v10, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32( @@ -760,9 +746,8 @@ define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv4f64.f32( @@ -784,9 +769,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v12, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32( @@ -808,9 +792,8 @@ define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.nxv8f64.f32( @@ -832,9 +815,8 @@ define @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vfwnmsac.vf v8, ft0, v16, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16( , , @@ -423,9 +423,8 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -448,9 +447,8 @@ define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16( @@ -471,9 +469,8 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -496,9 +493,8 @@ define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16( @@ -519,9 +515,8 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -544,9 +539,8 @@ define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16( @@ -567,9 +561,8 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -592,9 +585,8 @@ define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16( @@ -615,9 +607,8 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -640,9 +631,8 @@ define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16( @@ -663,9 +653,8 @@ define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v9, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret entry: @@ -688,9 +677,8 @@ define @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32( @@ -711,9 +699,8 @@ define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v10, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -736,9 +723,8 @@ define @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32( @@ -759,9 +745,8 @@ define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v12, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret entry: @@ -784,9 +769,8 @@ define @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32( @@ -807,9 +791,8 @@ define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v16, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret entry: @@ -832,9 +815,8 @@ define @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.vf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=ilp32d < %s | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , @@ -416,9 +416,8 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( @@ -440,9 +439,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -463,9 +461,8 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( @@ -487,9 +484,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -510,9 +506,8 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( @@ -534,9 +529,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -557,9 +551,8 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( @@ -581,9 +574,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -604,9 +596,8 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( @@ -628,9 +619,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -651,9 +641,8 @@ define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( @@ -675,9 +664,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( @@ -698,9 +686,8 @@ define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( @@ -722,9 +709,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( @@ -745,9 +731,8 @@ define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( @@ -769,9 +754,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( @@ -792,9 +776,8 @@ define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( @@ -816,9 +799,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( @@ -987,9 +969,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -1005,9 +986,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -1023,9 +1003,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -1041,9 +1020,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -1059,9 +1037,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -1077,9 +1054,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( @@ -1095,9 +1071,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( @@ -1113,9 +1088,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( @@ -1131,9 +1105,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+zfh -verify-machineinstrs \ -; RUN: < %s | FileCheck %s +; RUN: -target-abi=lp64d < %s | FileCheck %s declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16( , , @@ -416,9 +416,8 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( @@ -440,9 +439,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -463,9 +461,8 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( @@ -487,9 +484,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -510,9 +506,8 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( @@ -534,9 +529,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -557,9 +551,8 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( @@ -581,9 +574,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -604,9 +596,8 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( @@ -628,9 +619,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -651,9 +641,8 @@ define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( @@ -675,9 +664,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v9, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( @@ -698,9 +686,8 @@ define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( @@ -722,9 +709,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v10, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( @@ -745,9 +731,8 @@ define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( @@ -769,9 +754,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v12, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( @@ -792,9 +776,8 @@ define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( @@ -816,9 +799,8 @@ define @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v16, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( @@ -987,9 +969,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( @@ -1005,9 +986,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( @@ -1023,9 +1003,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( @@ -1041,9 +1020,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( @@ -1059,9 +1037,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16( %0, half %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.h.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( @@ -1077,9 +1054,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( @@ -1095,9 +1071,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( @@ -1113,9 +1088,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( @@ -1131,9 +1105,8 @@ define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32( %0, float %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.w.x ft0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vfwsub.wf v8, v8, ft0, v0.t +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(