diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -137,6 +137,8 @@ def NonePolicy : PolicyScheme<0>; def HasPassthruOperand : PolicyScheme<1>; def HasPolicyOperand : PolicyScheme<2>; +// Specail case for passthru operand which is not a first opeand. +def HasPassthruOperandAtIdx1 : PolicyScheme<3>; class RVVBuiltin { @@ -1819,13 +1821,16 @@ } // 12.15. Vector Integer Merge Instructions -// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) -let HasMasked = false, MaskedPolicyScheme = NonePolicy, +// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl) +let HasMasked = false, + UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); - IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert undef passthru - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + if (DefaultPolicy == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", [["vvm", "v", "vmvv"], @@ -1960,12 +1965,15 @@ // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMasked = false, MaskedPolicyScheme = NonePolicy, +let HasMasked = false, + UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); - IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // insert undef passthru - Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + if (DefaultPolicy == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()}; }] in { defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd", [["vvm", "v", "vmvv"]]>; @@ -2152,10 +2160,16 @@ } // 17.5. Vector Compress Instruction -let HasMasked = false, MaskedPolicyScheme = NonePolicy, +let IsPrototypeDefaultTU = true, + HasMasked = false, + UnMaskedPolicyScheme = HasPassthruOperandAtIdx1, + MaskedPolicyScheme = NonePolicy, ManualCodegen = [{ - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); - IntrinsicTypes = {ResultType, Ops[3]->getType()}; + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + // insert undef passthru + if (DefaultPolicy == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); + IntrinsicTypes = {ResultType, Ops.back()->getType()}; }] in { // signed and floating type defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -290,8 +290,12 @@ enum PolicyScheme : uint8_t { SchemeNone, + // Passthru operand is at first parameter in C builtin. HasPassthruOperand, HasPolicyOperand, + // Special case for vmerge, the passthru operand is second + // parameter in C builtin. + HasPassthruOperandAtIdx1, }; // TODO refactor RVVIntrinsic class design after support all intrinsic diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -973,6 +973,15 @@ else if (DefaultPolicy == Policy::TA && HasPassthruOp && IsPrototypeDefaultTU) NewPrototype.erase(NewPrototype.begin() + 1); + if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) { + if (DefaultPolicy == Policy::TU && !IsPrototypeDefaultTU) { + // Insert undisturbed output to index 1 + NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]); + } else if (DefaultPolicy == Policy::TA && IsPrototypeDefaultTU) { + // Erase passthru for TA policy + NewPrototype.erase(NewPrototype.begin() + 2); + } + } } // If HasVL, append PrototypeDescriptor:VL to last operand diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c @@ -482,4 +482,56 @@ return vcompress(mask, dest, src, vl); } +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { + return vcompress_tu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vcompress_tu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vcompress_tu(mask, merge, src, vl); +} +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vcompress_ta(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vcompress_ta(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vcompress_ta(mask, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -94,3 +94,21 @@ size_t vl) { return vfmerge(mask, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmerge_ta(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -974,3 +974,93 @@ vfloat64m8_t op2, size_t vl) { return vmerge(mask, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmerge_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmerge_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmerge_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmerge_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmerge_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmerge_ta(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c @@ -536,3 +536,57 @@ vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { return vcompress_vm_f16m8(mask, dest, src, vl); } + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) { + return vcompress_vm_i32mf2_tu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { + return vcompress_vm_u32mf2_tu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { + return vcompress_vm_f32mf2_tu(mask, merge, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) { + return vcompress_vm_i32mf2_ta(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) { + return vcompress_vm_u32mf2_ta(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( undef, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return vcompress_vm_f32mf2_ta(mask, src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c @@ -101,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmerge_vfm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { +vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16mf4(mask, op1, op2, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmerge_vfm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { +vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16mf2(mask, op1, op2, vl); } @@ -119,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmerge_vfm_f16m1 (vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { +vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16m1(mask, op1, op2, vl); } @@ -128,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmerge_vfm_f16m2 (vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { +vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16m2(mask, op1, op2, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmerge_vfm_f16m4 (vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { +vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16m4(mask, op1, op2, vl); } @@ -146,6 +146,24 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32f16.f16.i64( undef, [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmerge_vfm_f16m8 (vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { +vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { return vfmerge_vfm_f16m8(mask, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[MERGE:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmerge_vfm_f32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( undef, [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) { + return vfmerge_vfm_f32mf2_ta(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c @@ -981,7 +981,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmerge_vvm_f16mf4 (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { +vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { return vmerge_vvm_f16mf4(mask, op1, op2, vl); } @@ -990,7 +990,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmerge_vvm_f16mf2 (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { +vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { return vmerge_vvm_f16mf2(mask, op1, op2, vl); } @@ -999,7 +999,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmerge_vvm_f16m1 (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { +vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { return vmerge_vvm_f16m1(mask, op1, op2, vl); } @@ -1008,7 +1008,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmerge_vvm_f16m2 (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { +vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { return vmerge_vvm_f16m2(mask, op1, op2, vl); } @@ -1017,7 +1017,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmerge_vvm_f16m4 (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { +vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { return vmerge_vvm_f16m4(mask, op1, op2, vl); } @@ -1026,6 +1026,96 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmerge_vvm_f16m8 (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { +vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { return vmerge_vvm_f16m8(mask, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmerge_vvm_i32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmerge_vxm_i32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmerge_vvm_u32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmerge_vxm_u32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { + return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { + return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( undef, [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmerge_vvm_f32mf2_tu(mask, merge, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64( undef, [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl); +}