diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -480,7 +480,9 @@ foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; - let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask" in { + let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []) in { def: RVVOutOp1Builtin<"v", "vPCe" # eew_type # "Uv", type>; if !not(IsFloat.val) then { def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew_type # "Uv", type>; @@ -490,7 +492,8 @@ defvar eew64 = "64"; defvar eew64_type = "(Log2EEW:6)"; let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", - RequiredFeatures = ["RV64"] in { + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh", "RV64"], + ["RV64"]) in { def: RVVOutOp1Builtin<"v", "vPCe" # eew64_type # "Uv", type>; if !not(IsFloat.val) then { def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew64_type # "Uv", type>; @@ -585,7 +588,9 @@ foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; - let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask" in { + let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask", + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []) in { def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>; if !not(IsFloat.val) then { def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>; @@ -595,7 +600,8 @@ defvar eew64 = "64"; defvar eew64_type = "(Log2EEW:6)"; let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask", - RequiredFeatures = ["RV64"] in { + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh", "RV64"], + ["RV64"]) in { def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; if !not(IsFloat.val) then { def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>; @@ -706,6 +712,8 @@ IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); @@ -771,6 +779,8 @@ IRName = op # nf # "ff", MaskedIRName = op # nf # "ff_mask", NF = nf, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); @@ -838,6 +848,8 @@ IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); @@ -901,6 +913,8 @@ IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); @@ -981,6 +995,8 @@ NF = nf, HasMaskedOffOperand = false, MaskedPolicyScheme = NonePolicy, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { if (IsMasked) { @@ -1026,6 +1042,8 @@ NF = nf, HasMaskedOffOperand = false, MaskedPolicyScheme = NonePolicy, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { if (IsMasked) { @@ -1066,6 +1084,8 @@ NF = nf, HasMaskedOffOperand = false, MaskedPolicyScheme = NonePolicy, + RequiredFeatures = !if(!eq(type, "x"), ["ZvfhminOrZvfh"], + []), ManualCodegen = [{ { if (IsMasked) { @@ -1461,24 +1481,32 @@ // 7.4. Vector Unit-Stride Instructions def vlm: RVVVLEMaskBuiltin; defm vle8: RVVVLEBuiltin<["c"]>; -defm vle16: RVVVLEBuiltin<["s","x"]>; +defm vle16: RVVVLEBuiltin<["s"]>; +let Name = "vle16_v", RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vle16_h: RVVVLEBuiltin<["x"]>; defm vle32: RVVVLEBuiltin<["i","f"]>; defm vle64: RVVVLEBuiltin<["l","d"]>; def vsm : RVVVSEMaskBuiltin; defm vse8 : RVVVSEBuiltin<["c"]>; -defm vse16: RVVVSEBuiltin<["s","x"]>; +defm vse16: RVVVSEBuiltin<["s"]>; +let Name = "vse16_v", RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vse16_h: RVVVSEBuiltin<["x"]>; defm vse32: RVVVSEBuiltin<["i","f"]>; defm vse64: RVVVSEBuiltin<["l","d"]>; // 7.5. Vector Strided Instructions defm vlse8: RVVVLSEBuiltin<["c"]>; -defm vlse16: RVVVLSEBuiltin<["s","x"]>; +defm vlse16: RVVVLSEBuiltin<["s"]>; +let Name = "vlse16_v", RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vlse16_h: RVVVLSEBuiltin<["x"]>; defm vlse32: RVVVLSEBuiltin<["i","f"]>; defm vlse64: RVVVLSEBuiltin<["l","d"]>; defm vsse8 : RVVVSSEBuiltin<["c"]>; -defm vsse16: RVVVSSEBuiltin<["s","x"]>; +defm vsse16: RVVVSSEBuiltin<["s"]>; +let Name = "vsse16_v", RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vsse16_h: RVVVSSEBuiltin<["x"]>; defm vsse32: RVVVSSEBuiltin<["i","f"]>; defm vsse64: RVVVSSEBuiltin<["l","d"]>; @@ -1491,7 +1519,9 @@ // 7.7. Unit-stride Fault-Only-First Loads defm vle8ff: RVVVLEFFBuiltin<["c"]>; -defm vle16ff: RVVVLEFFBuiltin<["s","x"]>; +defm vle16ff: RVVVLEFFBuiltin<["s"]>; +let Name = "vle16ff_v", RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vle16ff: RVVVLEFFBuiltin<["x"]>; defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>; defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; @@ -2055,8 +2085,11 @@ OverloadedName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; - defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd", + defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilfd", [["v", "v", "vv"]]>; + let RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x", + [["v", "v", "vv"]]>; let SupportOverloading = false in defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil", [["x", "v", "ve"], @@ -2183,8 +2216,11 @@ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { - defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", + defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "fd", [["vvm", "v", "vvvm"]]>; + let RequiredFeatures = ["ZvfhminOrZvfh"] in + defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x", + [["vvm", "v", "vvvm"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", [["vfm", "v", "vvem"]]>; } @@ -2438,11 +2474,17 @@ }] in { // Reinterpret between different type under the same SEW and LMUL def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">; - def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "sil", "v">; + def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">; def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">; - def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "sil", "Uv">; - def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "sil", "Fv">; - def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "sil", "Fv">; + def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">; + def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">; + def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">; + let RequiredFeatures = ["ZvfhminOrZvfh"] in { + def vreinterpret_i_h : RVVBuiltin<"Fvv", "vFv", "s", "v">; + def vreinterpret_u_h : RVVBuiltin<"FvUv", "UvFv", "s", "Uv">; + def vreinterpret_h_i : RVVBuiltin<"vFv", "Fvv", "s", "Fv">; + def vreinterpret_h_u : RVVBuiltin<"UvFv", "FvUv", "s", "Fv">; + } // Reinterpret between different SEW under the same LMUL foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/zvfhmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/zvfhmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/zvfhmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/zvfhmin.c @@ -25,3 +25,208 @@ vfloat32m2_t test_vfwcvt_f_f_v_f16m1(vfloat16m1_t src, size_t vl) { return __riscv_vfwcvt_f(src, vl); } + +// CHECK-ZVFHMIN-LABEL: @test_vle16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vle.nxv4f16.i64( poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { + return __riscv_vle16_v_f16m1(base, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vse16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { + return __riscv_vse16_v_f16m1(base, value, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vlse16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64( poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlse16_v_f16m1(base, bstride, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vsse16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return __riscv_vsse16_v_f16m1(base, bstride, value, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vluxei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return __riscv_vluxei32_v_f16m1(base, bindex, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vsuxei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return __riscv_vsuxei32_v_f16m1(base, bindex, value, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vloxei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64( poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return __riscv_vloxei32_v_f16m1(base, bindex, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vsoxei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return __riscv_vsoxei32_v_f16m1(base, bindex, value, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vle16ff_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64( poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , i64 } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store i64 [[TMP2]], ptr [[NEW_VL:%.*]], align 8 +// CHECK-ZVFHMIN-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16ff_v_f16m1(const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1(base, new_vl, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vlseg2e16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vlseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { + return __riscv_vlseg2e16_v_f16m1(v0, v1, base, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vlseg2e16ff_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-ZVFHMIN-NEXT: store i64 [[TMP3]], ptr [[NEW_VL:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vlsseg2e16_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( poison, poison, ptr [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vluxseg2ei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vloxseg2ei32_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64( poison, poison, ptr [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-ZVFHMIN-NEXT: store [[TMP1]], ptr [[V0:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-ZVFHMIN-NEXT: store [[TMP2]], ptr [[V1:%.*]], align 2 +// CHECK-ZVFHMIN-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vmerge_vvm_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( poison, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vmv_v_v_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_f16m1_i16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) { + return __riscv_vreinterpret_v_f16m1_i16m1(src); +} + +// CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_f16m1_u16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) { + return __riscv_vreinterpret_v_f16m1_u16m1(src); +} + +// CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_i16m1_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) { + return __riscv_vreinterpret_v_i16m1_f16m1(src); +} + +// CHECK-ZVFHMIN-LABEL: @test_vreinterpret_v_u16m1_f16m1( +// CHECK-ZVFHMIN-NEXT: entry: +// CHECK-ZVFHMIN-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-ZVFHMIN-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) { + return __riscv_vreinterpret_v_u16m1_f16m1(src); +}