diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -24,7 +24,8 @@ // c -> char // s -> short // i -> int -// h -> half +// h -> half (__fp16, OpenCL) +// x -> half (_Float16) // f -> float // d -> double // z -> size_t diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h --- a/clang/include/clang/Basic/IdentifierTable.h +++ b/clang/include/clang/Basic/IdentifierTable.h @@ -56,7 +56,7 @@ /// of a pointer to one of these classes. enum { IdentifierInfoAlignment = 8 }; -static constexpr int ObjCOrBuiltinIDBits = 15; +static constexpr int ObjCOrBuiltinIDBits = 16; /// One of these records is kept for each identifier that /// is lexed. This contains information about whether the token was \#define'd, diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -40,7 +40,7 @@ // s: int16_t (i16) // i: int32_t (i32) // l: int64_t (i64) -// h: float16_t (half) +// x: float16_t (half) // f: float32_t (float) // d: float64_t (double) // @@ -361,31 +361,31 @@ ["vx", "Uv", "UvUvUeUv"]]>; } multiclass RVVFloatingTerBuiltinSet { - defm "" : RVVOutOp1BuiltinSet; } } -let HasMaskedOffOperand = false, Log2LMUL = [-1, 0, 1, 2] in { +let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in { multiclass RVVFloatingWidenTerBuiltinSet { - defm "" : RVVOutOp1Op2BuiltinSet; } } multiclass RVVFloatingBinBuiltinSet - : RVVOutOp1BuiltinSet; multiclass RVVFloatingBinVFBuiltinSet - : RVVOutOp1BuiltinSet; multiclass RVVFloatingMaskOutBuiltinSet - : RVVOp0Op1BuiltinSet; @@ -415,7 +415,7 @@ let HasMaskedOffOperand = false in { multiclass RVVSlideBuiltinSet { - defm "" : RVVOutBuiltinSet; defm "" : RVVOutBuiltinSet; @@ -424,7 +424,7 @@ class RVVFloatingUnaryBuiltin - : RVVOutBuiltin { + : RVVOutBuiltin { let Name = NAME # "_" # builtin_suffix; } @@ -438,22 +438,22 @@ } class RVVConvToSignedBuiltin - : RVVConvBuiltin<"Iv", "Ivv", "fd", mangled_name>; + : RVVConvBuiltin<"Iv", "Ivv", "xfd", mangled_name>; class RVVConvToUnsignedBuiltin - : RVVConvBuiltin<"Uv", "Uvv", "fd", mangled_name>; + : RVVConvBuiltin<"Uv", "Uvv", "xfd", mangled_name>; class RVVConvToWidenSignedBuiltin - : RVVConvBuiltin<"Iw", "Iwv", "f", mangled_name>; + : RVVConvBuiltin<"Iw", "Iwv", "xf", mangled_name>; class RVVConvToWidenUnsignedBuiltin - : RVVConvBuiltin<"Uw", "Uwv", "f", mangled_name>; + : RVVConvBuiltin<"Uw", "Uwv", "xf", mangled_name>; class RVVConvToNarrowingSignedBuiltin - : RVVConvBuiltin<"Iv", "IvFw", "si", mangled_name>; + : RVVConvBuiltin<"Iv", "IvFw", "csi", mangled_name>; class RVVConvToNarrowingUnsignedBuiltin - : RVVConvBuiltin<"Uv", "UvFw", "si", mangled_name>; + : RVVConvBuiltin<"Uv", "UvFw", "csi", mangled_name>; let HasMaskedOffOperand = false in { multiclass RVVSignedReductionBuiltin { @@ -465,11 +465,11 @@ [["vs", "UvUSv", "USvUSvUvUSv"]]>; } multiclass RVVFloatingReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; } multiclass RVVFloatingWidenReductionBuiltin { - defm "" : RVVOutOp1BuiltinSet; } } @@ -531,23 +531,23 @@ ["wx", "Uw", "UwUwUe"]]>; multiclass RVVFloatingWidenBinBuiltinSet - : RVVWidenBuiltinSet; multiclass RVVFloatingWidenOp0BinBuiltinSet - : RVVWidenWOp0BuiltinSet; -defvar TypeList = ["c","s","i","l","f","d"]; +defvar TypeList = ["c","s","i","l","x","f","d"]; defvar EEWList = [["8", "(Log2EEW:3)"], ["16", "(Log2EEW:4)"], ["32", "(Log2EEW:5)"], ["64", "(Log2EEW:6)"]]; class IsFloat { - bit val = !or(!eq(type, "h"), !eq(type, "f"), !eq(type, "d")); + bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d")); } let HasNoMaskedOverloaded = false, @@ -1061,12 +1061,12 @@ // 7.5. Vector Strided Instructions defm vlse8: RVVVLSEBuiltin<["c"]>; -defm vlse16: RVVVLSEBuiltin<["s"]>; +defm vlse16: RVVVLSEBuiltin<["s","x"]>; defm vlse32: RVVVLSEBuiltin<["i","f"]>; defm vlse64: RVVVLSEBuiltin<["l","d"]>; defm vsse8 : RVVVSSEBuiltin<["c"]>; -defm vsse16: RVVVSSEBuiltin<["s"]>; +defm vsse16: RVVVSSEBuiltin<["s","x"]>; defm vsse32: RVVVSSEBuiltin<["i","f"]>; defm vsse64: RVVVSSEBuiltin<["l","d"]>; @@ -1079,7 +1079,7 @@ // 7.7. Unit-stride Fault-Only-First Loads defm vle8ff: RVVVLEFFBuiltin<["c"]>; -defm vle16ff: RVVVLEFFBuiltin<["s"]>; +defm vle16ff: RVVVLEFFBuiltin<["s","x"]>; defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>; defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; @@ -1251,7 +1251,7 @@ let MangledName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; - defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilfd", + defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd", [["v", "v", "vv"]]>; } let HasNoMaskedOverloaded = false in @@ -1304,8 +1304,8 @@ defm vfrdiv : RVVFloatingBinVFBuiltinSet; // 14.5. Vector Widening Floating-Point Multiply -let Log2LMUL = [-1, 0, 1, 2] in { - defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "f", +let Log2LMUL = [-2, -1, 0, 1, 2] in { + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf", [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; } @@ -1343,8 +1343,8 @@ defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; -defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">; -defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "fd">; +defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; +defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; // 14.13. Vector Floating-Point Compare Instructions defm vmfeq : RVVFloatingMaskOutBuiltinSet; @@ -1356,7 +1356,7 @@ // 14.14. Vector Floating-Point Classify Instruction let Name = "vfclass_v" in - def vfclass : RVVOp0Builtin<"Uv", "Uvv", "fd">; + def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) @@ -1365,15 +1365,15 @@ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; }] in { - defm vmerge : RVVOutOp1BuiltinSet<"vfmerge", "fd", + defm vmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", [["vvm", "v", "vmvv"]]>; - defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "fd", + defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", [["vfm", "v", "vmve"]]>; } // 14.16. Vector Floating-Point Move Instruction let HasMask = false, HasNoMaskedOverloaded = false in - defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "fd", + defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", [["f", "v", "ve"]]>; // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions @@ -1392,7 +1392,7 @@ def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">; def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">; def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">; - def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "hf", "vfwcvt_f">; + def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "xf", "vfwcvt_f">; } // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions @@ -1401,10 +1401,10 @@ def vfncvt_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_x">; def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">; def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">; - def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "si", "vfncvt_f">; - def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "si", "vfncvt_f">; - def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_f">; - def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_rod_f">; + def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "csi", "vfncvt_f">; + def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "csi", "vfncvt_f">; + def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">; + def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">; } // 15. Vector Reduction Operations @@ -1493,10 +1493,10 @@ // 17.2. Floating-Point Scalar Move Instructions let HasMask = false in { let HasVL = false, MangledName = "vfmv_f" in - defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "fd", + defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", [["s", "ve", "ev"]]>; let MangledName = "vfmv_s" in - defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "fd", + defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd", [["f", "v", "vve"], ["x", "Uv", "UvUvUe"]]>; } @@ -1517,11 +1517,11 @@ // 17.4. Vector Register Gather Instructions // signed and floating type -defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd", +defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd", [["vv", "v", "vvUv"]]>; -defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd", +defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd", [["vx", "v", "vvz"]]>; -defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilfd", +defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd", [["vv", "v", "vv(Log2EEW:4)Uv"]]>; // unsigned type defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil", @@ -1538,7 +1538,7 @@ IntrinsicTypes = {ResultType, Ops[3]->getType()}; }] in { // signed and floating type - defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd", + defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", [["vm", "v", "vmvv"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", @@ -1554,11 +1554,11 @@ }] in { // Reinterpret between different type under the same SEW and LMUL def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil">; - def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il">; + def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "sil">; def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil">; - def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il">; - def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il">; - def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il">; + def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "sil">; + def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "sil">; + def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "sil">; // Reinterpret between different SEW under the same LMUL foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", @@ -1572,7 +1572,7 @@ ManualCodegen = [{ return llvm::UndefValue::get(ResultType); }] in { - def vundefined : RVVBuiltin<"v", "v", "csilfd">; + def vundefined : RVVBuiltin<"v", "v", "csilxfd">; def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">; } @@ -1587,7 +1587,7 @@ } }] in { foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)", "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { - def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; + def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilxfd">; def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; } } @@ -1605,7 +1605,7 @@ }] in { foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)", "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { - def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; + def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilxfd">; def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; } } diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -3869,7 +3869,7 @@ llvm::ElementCount::getScalable(NumEls), NF}; #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ case BuiltinType::Id: \ - return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \ + return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ llvm::ElementCount::getScalable(NumEls), NF}; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ case BuiltinType::Id: \ @@ -10370,6 +10370,11 @@ // Read the base type. switch (*Str++) { default: llvm_unreachable("Unknown builtin type letter!"); + case 'x': + assert(HowLong == 0 && !Signed && !Unsigned && + "Bad modifiers used with 'x'!"); + Type = Context.Float16Ty; + break; case 'y': assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers used with 'y'!"); diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h --- a/clang/lib/Basic/Targets/RISCV.h +++ b/clang/lib/Basic/Targets/RISCV.h @@ -60,6 +60,7 @@ WIntType = UnsignedInt; HasRISCVVTypes = true; MCountName = "_mcount"; + HasFloat16 = true; } bool setCPU(const std::string &Name) override { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -1,367 +1,487 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - \ +// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } -// +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfadd(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -1,367 +1,487 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - \ +// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd_vv_f32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { return vfadd_vf_f32mf2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd_vv_f32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { return vfadd_vf_f32m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd_vv_f32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { return vfadd_vf_f32m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd_vv_f32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { return vfadd_vf_f32m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd_vv_f32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { return vfadd_vf_f32m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd_vv_f64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { return vfadd_vf_f64m1(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd_vv_f64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { return vfadd_vf_f64m2(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd_vv_f64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { return vfadd_vf_f64m4(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd_vv_f64m8(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfadd_vf_f64m8(op1, op2, vl); } -// +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfadd_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfadd_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -411,7 +411,7 @@ case ScalarTypeKind::Float: switch (ElementBitwidth) { case 16: - BuiltinStr += "h"; + BuiltinStr += "x"; break; case 32: BuiltinStr += "f"; @@ -507,8 +507,10 @@ Str += "double"; else if (ElementBitwidth == 32) Str += "float"; - assert((ElementBitwidth == 32 || ElementBitwidth == 64) && - "Unhandled floating type"); + else if (ElementBitwidth == 16) + Str += "_Float16"; + else + llvm_unreachable("Unhandled floating type."); } else Str += getTypeString("float"); break; @@ -565,7 +567,7 @@ ElementBitwidth = 64; ScalarType = ScalarTypeKind::SignedInteger; break; - case 'h': + case 'x': ElementBitwidth = 16; ScalarType = ScalarTypeKind::Float; break; @@ -931,7 +933,7 @@ } OS << "#if defined(__riscv_zfh)\n"; for (int Log2LMUL : Log2LMULs) { - auto T = computeType('h', Log2LMUL, "v"); + auto T = computeType('x', Log2LMUL, "v"); if (T.hasValue()) printType(T.getValue()); }