diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1601,12 +1601,16 @@ // 15.3. Vector Single-Width Floating-Point Reduction Instructions defm vfredmax : RVVFloatingReductionBuiltin; defm vfredmin : RVVFloatingReductionBuiltin; -defm vfredsum : RVVFloatingReductionBuiltin; +defm vfredusum : RVVFloatingReductionBuiltin; defm vfredosum : RVVFloatingReductionBuiltin; +let IRName = "vfredusum", IRNameMask = "vfredusum_mask" in +defm vfredsum : RVVFloatingReductionBuiltin; // 15.4. Vector Widening Floating-Point Reduction Instructions -defm vfwredsum : RVVFloatingWidenReductionBuiltin; +defm vfwredusum : RVVFloatingWidenReductionBuiltin; defm vfwredosum : RVVFloatingWidenReductionBuiltin; +let IRName = "vfwredusum", IRNameMask = "vfwredusum_mask" in +defm vfwredsum : RVVFloatingWidenReductionBuiltin; // 16. Vector Mask Instructions // 16.1. Vector Mask-Register Logical Instructions diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -6,211 +6,211 @@ #include // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(dst, vector, scalar, vl); + return vfredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum(mask, dst, vector, scalar, vl); + return vfredusum(mask, dst, vector, scalar, vl); } // @@ -428,3 +428,15 @@ vfloat64m1_t scalar, size_t vl) { return vfredosum(mask, dst, vector, scalar, vl); } + +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -6,117 +6,129 @@ #include // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(dst, vector, scalar, vl); + return vfwredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(dst, vector, scalar, vl); + return vfwredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(dst, vector, scalar, vl); + return vfwredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(dst, vector, scalar, vl); + return vfwredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(dst, vector, scalar, vl); + return vfwredusum(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(mask, dst, vector, scalar, vl); + return vfwredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(mask, dst, vector, scalar, vl); + return vfwredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(mask, dst, vector, scalar, vl); + return vfwredusum(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum(mask, dst, vector, scalar, vl); + return vfwredusum(mask, dst, vector, scalar, vl); +} + +// +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredusum(mask, dst, vector, scalar, vl); } // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c @@ -6,211 +6,211 @@ #include // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl); + return vfredusum_vs_f32mf2_f32m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl); + return vfredusum_vs_f32m1_f32m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl); + return vfredusum_vs_f32m2_f32m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl); + return vfredusum_vs_f32m4_f32m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl); + return vfredusum_vs_f32m8_f32m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl); + return vfredusum_vs_f64m1_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl); + return vfredusum_vs_f64m2_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl); + return vfredusum_vs_f64m4_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl); + return vfredusum_vs_f64m8_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); + return vfredusum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); } // @@ -428,3 +428,15 @@ vfloat64m1_t scalar, size_t vl) { return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); } + +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c @@ -6,117 +6,129 @@ #include // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl); + return vfwredusum_vs_f32mf2_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl); + return vfwredusum_vs_f32m1_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl); + return vfwredusum_vs_f32m2_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl); + return vfwredusum_vs_f32m4_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl); + return vfwredusum_vs_f32m8_f64m1(dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); + return vfwredusum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); + return vfwredusum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); + return vfwredusum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); } // -// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); + return vfwredusum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// +// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredusum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl); } // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1159,11 +1159,11 @@ defm vwredsum : RISCVReduction; defm vfredosum : RISCVReduction; - defm vfredsum : RISCVReduction; + defm vfredusum : RISCVReduction; defm vfredmin : RISCVReduction; defm vfredmax : RISCVReduction; - defm vfwredsum : RISCVReduction; + defm vfwredusum : RISCVReduction; defm vfwredosum : RISCVReduction; def int_riscv_vmand: RISCVBinaryAAANoMask; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -921,10 +921,12 @@ // Vector Single-Width Floating-Point Reduction Instructions let RVVConstraint = NoConstraint in { defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>; -defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>; +defm VFREDUSUM : VALU_FV_V<"vfredusum", 0b000001>; defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>; defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>; } // RVVConstraint = NoConstraint +def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm", + (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; // Vector Widening Floating-Point Reduction Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { @@ -933,8 +935,10 @@ // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>; -defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>; +defm VFWREDUSUM : VALU_FV_V<"vfwredusum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint +def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm", + (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>; } // Predicates = [HasStdExtV, HasStdExtF] let Predicates = [HasStdExtV] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3776,14 +3776,14 @@ // 15.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// defm PseudoVFREDOSUM : VPseudoReductionV_VS; -defm PseudoVFREDSUM : VPseudoReductionV_VS; +defm PseudoVFREDUSUM : VPseudoReductionV_VS; defm PseudoVFREDMIN : VPseudoReductionV_VS; defm PseudoVFREDMAX : VPseudoReductionV_VS; //===----------------------------------------------------------------------===// // 15.4. Vector Widening Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// -defm PseudoVFWREDSUM : VPseudoReductionV_VS; +defm PseudoVFWREDUSUM : VPseudoReductionV_VS; defm PseudoVFWREDOSUM : VPseudoReductionV_VS; } // Predicates = [HasStdExtV, HasStdExtF] @@ -4441,14 +4441,14 @@ // 15.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>; -defm : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>; +defm : VPatReductionV_VS<"int_riscv_vfredusum", "PseudoVFREDUSUM", /*IsFloat=*/1>; defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>; defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>; //===----------------------------------------------------------------------===// // 15.4. Vector Widening Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// -defm : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>; +defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>; defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>; } // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -920,7 +920,7 @@ // 15.3. Vector Single-Width Floating-Point Reduction Instructions let Predicates = [HasStdExtV, HasStdExtF] in { defm : VPatReductionVL; -defm : VPatReductionVL; +defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; } // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -45,7 +45,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -82,7 +82,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v26, v25 +; CHECK-NEXT: vfredusum.vs v25, v26, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vfmv.v.f v25, ft0 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV32-NEXT: vfredsum.vs v25, v28, v25 +; RV32-NEXT: vfredusum.vs v25, v28, v25 ; RV32-NEXT: vfmv.f.s ft0, v25 ; RV32-NEXT: fadd.h fa0, fa0, ft0 ; RV32-NEXT: ret @@ -209,7 +209,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vfmv.v.f v25, ft0 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; RV64-NEXT: vfredsum.vs v25, v28, v25 +; RV64-NEXT: vfredusum.vs v25, v28, v25 ; RV64-NEXT: vfmv.f.s ft0, v25 ; RV64-NEXT: fadd.h fa0, fa0, ft0 ; RV64-NEXT: ret @@ -248,7 +248,7 @@ ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vfmv.v.f v25, ft0 ; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV32-NEXT: vfredsum.vs v25, v8, v25 +; RV32-NEXT: vfredusum.vs v25, v8, v25 ; RV32-NEXT: vfmv.f.s ft0, v25 ; RV32-NEXT: fadd.h fa0, fa0, ft0 ; RV32-NEXT: ret @@ -263,7 +263,7 @@ ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vfmv.v.f v25, ft0 ; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; RV64-NEXT: vfredsum.vs v25, v8, v25 +; RV64-NEXT: vfredusum.vs v25, v8, v25 ; RV64-NEXT: vfmv.f.s ft0, v25 ; RV64-NEXT: fadd.h fa0, fa0, ft0 ; RV64-NEXT: ret @@ -305,7 +305,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -381,7 +381,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -418,7 +418,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v26, v25 +; CHECK-NEXT: vfredusum.vs v25, v26, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v28, v25 +; CHECK-NEXT: vfredusum.vs v25, v28, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -530,7 +530,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vfmv.v.f v25, ft0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV32-NEXT: vfredsum.vs v25, v8, v25 +; RV32-NEXT: vfredusum.vs v25, v8, v25 ; RV32-NEXT: vfmv.f.s ft0, v25 ; RV32-NEXT: fadd.s fa0, fa0, ft0 ; RV32-NEXT: ret @@ -545,7 +545,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vfmv.v.f v25, ft0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; RV64-NEXT: vfredsum.vs v25, v8, v25 +; RV64-NEXT: vfredusum.vs v25, v8, v25 ; RV64-NEXT: vfmv.f.s ft0, v25 ; RV64-NEXT: fadd.s fa0, fa0, ft0 ; RV64-NEXT: ret @@ -587,7 +587,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -663,7 +663,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v26, ft0 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v25, v26 +; CHECK-NEXT: vfredusum.vs v25, v25, v26 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v26, v25 +; CHECK-NEXT: vfredusum.vs v25, v26, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -737,7 +737,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v28, v25 +; CHECK-NEXT: vfredusum.vs v25, v28, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -774,7 +774,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -814,7 +814,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredsum.nxv4f16.nxv1f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv1f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16( %0, %1, %2, @@ -23,21 +23,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1( %0, %1, %2, @@ -47,20 +47,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv2f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv2f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16( %0, %1, %2, @@ -69,21 +69,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1( %0, %1, %2, @@ -93,20 +93,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv4f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv4f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16( %0, %1, %2, @@ -115,21 +115,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1( %0, %1, %2, @@ -139,20 +139,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv8f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv8f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16( %0, %1, %2, @@ -161,21 +161,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1( %0, %1, %2, @@ -185,20 +185,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv16f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv16f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16( %0, %1, %2, @@ -207,21 +207,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1( %0, %1, %2, @@ -231,20 +231,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv32f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( , , , i32); -define @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv32f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16( %0, %1, %2, @@ -253,21 +253,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1( %0, %1, %2, @@ -277,20 +277,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv1f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( , , , i32); -define @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32( %0, %1, %2, @@ -299,21 +299,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1( %0, %1, %2, @@ -323,20 +323,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv2f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( , , , i32); -define @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32( %0, %1, %2, @@ -345,21 +345,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1( %0, %1, %2, @@ -369,20 +369,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv4f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( , , , i32); -define @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32( %0, %1, %2, @@ -391,21 +391,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1( %0, %1, %2, @@ -415,20 +415,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv8f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( , , , i32); -define @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32( %0, %1, %2, @@ -437,21 +437,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1( %0, %1, %2, @@ -461,20 +461,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv16f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( , , , i32); -define @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32( %0, %1, %2, @@ -483,21 +483,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1( %0, %1, %2, @@ -507,20 +507,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv1f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( , , , i32); -define @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64( %0, %1, %2, @@ -529,21 +529,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1( %0, %1, %2, @@ -553,20 +553,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv2f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( , , , i32); -define @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64( %0, %1, %2, @@ -575,21 +575,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1( %0, %1, %2, @@ -599,20 +599,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv4f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( , , , i32); -define @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64( %0, %1, %2, @@ -621,21 +621,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1( %0, %1, %2, @@ -645,20 +645,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv8f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( , , , i32); -define @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64( %0, %1, %2, @@ -667,21 +667,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( , , , , i32); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfredsum.nxv4f16.nxv1f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv1f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv1f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv1f16( %0, %1, %2, @@ -23,21 +23,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16( %0, %1, %2, @@ -47,20 +47,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv2f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv2f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv2f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv2f16( %0, %1, %2, @@ -69,21 +69,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16( %0, %1, %2, @@ -93,20 +93,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv4f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv4f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv4f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv4f16( %0, %1, %2, @@ -115,21 +115,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16( %0, %1, %2, @@ -139,20 +139,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv8f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv8f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv8f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv8f16( %0, %1, %2, @@ -161,21 +161,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16( %0, %1, %2, @@ -185,20 +185,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv16f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv16f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv16f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv16f16( %0, %1, %2, @@ -207,21 +207,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16( %0, %1, %2, @@ -231,20 +231,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv4f16.nxv32f16( +declare @llvm.riscv.vfredusum.nxv4f16.nxv32f16( , , , i64); -define @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16: +define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv4f16.nxv32f16( + %a = call @llvm.riscv.vfredusum.nxv4f16.nxv32f16( %0, %1, %2, @@ -253,21 +253,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( +declare @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16: +define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16( + %a = call @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16( %0, %1, %2, @@ -277,20 +277,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv1f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv1f32( , , , i64); -define @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv1f32( %0, %1, %2, @@ -299,21 +299,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32( %0, %1, %2, @@ -323,20 +323,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv2f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv2f32( , , , i64); -define @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv2f32( %0, %1, %2, @@ -345,21 +345,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32( %0, %1, %2, @@ -369,20 +369,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv4f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv4f32( , , , i64); -define @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv4f32( %0, %1, %2, @@ -391,21 +391,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32( %0, %1, %2, @@ -415,20 +415,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv8f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32( , , , i64); -define @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv8f32( %0, %1, %2, @@ -437,21 +437,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32( %0, %1, %2, @@ -461,20 +461,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv2f32.nxv16f32( +declare @llvm.riscv.vfredusum.nxv2f32.nxv16f32( , , , i64); -define @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32: +define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32( + %a = call @llvm.riscv.vfredusum.nxv2f32.nxv16f32( %0, %1, %2, @@ -483,21 +483,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( +declare @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32: +define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32( + %a = call @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32( %0, %1, %2, @@ -507,20 +507,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv1f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv1f64( , , , i64); -define @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10 +; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv1f64( %0, %1, %2, @@ -529,21 +529,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64( %0, %1, %2, @@ -553,20 +553,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv2f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv2f64( , , , i64); -define @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9 +; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv2f64( %0, %1, %2, @@ -575,21 +575,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64( %0, %1, %2, @@ -599,20 +599,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv4f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv4f64( , , , i64); -define @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9 +; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv4f64( %0, %1, %2, @@ -621,21 +621,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64( %0, %1, %2, @@ -645,20 +645,20 @@ ret %a } -declare @llvm.riscv.vfredsum.nxv1f64.nxv8f64( +declare @llvm.riscv.vfredusum.nxv1f64.nxv8f64( , , , i64); -define @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64: +define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9 +; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64( + %a = call @llvm.riscv.vfredusum.nxv1f64.nxv8f64( %0, %1, %2, @@ -667,21 +667,21 @@ ret %a } -declare @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64( +declare @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( , , , , i64); -define @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64: +define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu -; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64( + %a = call @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredsum.nxv2f32.nxv1f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv1f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( %0, %1, %2, @@ -23,21 +23,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( %0, %1, %2, @@ -47,20 +47,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv2f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv2f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( %0, %1, %2, @@ -69,21 +69,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( %0, %1, %2, @@ -93,20 +93,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv4f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv4f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( %0, %1, %2, @@ -115,21 +115,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( %0, %1, %2, @@ -139,20 +139,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv8f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9 +; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv8f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( %0, %1, %2, @@ -161,21 +161,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( %0, %1, %2, @@ -185,20 +185,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv16f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9 +; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv16f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( %0, %1, %2, @@ -207,21 +207,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( %0, %1, %2, @@ -231,20 +231,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv32f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( , , , i32); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9 +; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv32f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( %0, %1, %2, @@ -253,21 +253,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( %0, %1, %2, @@ -277,20 +277,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv1f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( , , , i32); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( %0, %1, %2, @@ -299,21 +299,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( %0, %1, %2, @@ -323,20 +323,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv2f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( , , , i32); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( %0, %1, %2, @@ -345,21 +345,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( %0, %1, %2, @@ -369,20 +369,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv4f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( , , , i32); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9 +; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( %0, %1, %2, @@ -391,21 +391,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( %0, %1, %2, @@ -415,20 +415,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv8f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( , , , i32); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9 +; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( %0, %1, %2, @@ -437,21 +437,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( %0, %1, %2, @@ -461,20 +461,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv16f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( , , , i32); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9 +; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( %0, %1, %2, @@ -483,21 +483,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64( , , , , i32); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vfwredsum.nxv2f32.nxv1f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv1f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv1f16( %0, %1, %2, @@ -23,21 +23,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32( %0, %1, %2, @@ -47,20 +47,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv2f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv2f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv2f16( %0, %1, %2, @@ -69,21 +69,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32( %0, %1, %2, @@ -93,20 +93,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv4f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv4f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv4f16( %0, %1, %2, @@ -115,21 +115,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32( %0, %1, %2, @@ -139,20 +139,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv8f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9 +; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv8f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv8f16( %0, %1, %2, @@ -161,21 +161,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32( %0, %1, %2, @@ -185,20 +185,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv16f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9 +; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv16f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv16f16( %0, %1, %2, @@ -207,21 +207,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32( %0, %1, %2, @@ -231,20 +231,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv2f32.nxv32f16( +declare @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( , , , i64); -define @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32: +define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9 +; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv2f32.nxv32f16( + %a = call @llvm.riscv.vfwredusum.nxv2f32.nxv32f16( %0, %1, %2, @@ -253,21 +253,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( +declare @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32: +define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16( + %a = call @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16( %0, %1, %2, @@ -277,20 +277,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv1f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( , , , i64); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv1f32( %0, %1, %2, @@ -299,21 +299,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64( %0, %1, %2, @@ -323,20 +323,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv2f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( , , , i64); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10 +; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv2f32( %0, %1, %2, @@ -345,21 +345,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64( %0, %1, %2, @@ -369,20 +369,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv4f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( , , , i64); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9 +; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv4f32( %0, %1, %2, @@ -391,21 +391,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64( %0, %1, %2, @@ -415,20 +415,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv8f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( , , , i64); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9 +; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv8f32( %0, %1, %2, @@ -437,21 +437,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64( %0, %1, %2, @@ -461,20 +461,20 @@ ret %a } -declare @llvm.riscv.vfwredsum.nxv1f64.nxv16f32( +declare @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( , , , i64); -define @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64: +define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9 +; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32( + %a = call @llvm.riscv.vfwredusum.nxv1f64.nxv16f32( %0, %1, %2, @@ -483,21 +483,21 @@ ret %a } -declare @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32( +declare @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( , , , , i64); -define @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { -; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64: +define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu -; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t +; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32( + %a = call @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32( %0, %1, %2, diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -45,7 +45,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ ; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -195,7 +195,7 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -225,7 +225,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -256,7 +256,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vfmv.v.f v25, ft0 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfredsum.vs v25, v8, v25 +; CHECK-NEXT: vfredusum.vs v25, v8, v25 ; CHECK-NEXT: vfmv.f.s ft0, v25 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret diff --git a/llvm/test/MC/RISCV/rvv/aliases.s b/llvm/test/MC/RISCV/rvv/aliases.s --- a/llvm/test/MC/RISCV/rvv/aliases.s +++ b/llvm/test/MC/RISCV/rvv/aliases.s @@ -87,3 +87,7 @@ # ALIAS: vse1.v v8, (a0) # encoding: [0x27,0x04,0xb5,0x02] # NO-ALIAS: vsm.v v8, (a0) # encoding: [0x27,0x04,0xb5,0x02] vse1.v v8, (a0) +# ALIAS: vfredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0x04] +vfredsum.vs v8, v4, v20, v0.t +# ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4] +vfwredsum.vs v8, v4, v20, v0.t diff --git a/llvm/test/MC/RISCV/rvv/freduction.s b/llvm/test/MC/RISCV/rvv/freduction.s --- a/llvm/test/MC/RISCV/rvv/freduction.s +++ b/llvm/test/MC/RISCV/rvv/freduction.s @@ -23,14 +23,14 @@ # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 0e -vfredsum.vs v8, v4, v20, v0.t -# CHECK-INST: vfredsum.vs v8, v4, v20, v0.t +vfredusum.vs v8, v4, v20, v0.t +# CHECK-INST: vfredusum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x04] # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 04 -vfredsum.vs v8, v4, v20 -# CHECK-INST: vfredsum.vs v8, v4, v20 +vfredusum.vs v8, v4, v20 +# CHECK-INST: vfredusum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x06] # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 06 @@ -71,14 +71,14 @@ # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ce -vfwredsum.vs v8, v4, v20, v0.t -# CHECK-INST: vfwredsum.vs v8, v4, v20, v0.t +vfwredusum.vs v8, v4, v20, v0.t +# CHECK-INST: vfwredusum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xc4] # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c4 -vfwredsum.vs v8, v4, v20 -# CHECK-INST: vfwredsum.vs v8, v4, v20 +vfwredusum.vs v8, v4, v20 +# CHECK-INST: vfwredusum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xc6] # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c6