diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2091,7 +2091,10 @@ // 16.3. vfirst find-first-set mask bit def vfirst : RVVMaskOp0Builtin<"lm">; +} +let MaskedPolicyScheme = HasPassthruOperand, + HasTailPolicy = false in { // 16.4. vmsbf.m set-before-first mask bit def vmsbf : RVVMaskUnaryBuiltin; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c @@ -219,3 +219,39 @@ size_t vl) { return vid(mask, maskedoff, vl); } + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i32.i64( [[MERGE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) { + return vid_tu(merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_tuma(mask, merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_tumu(mask, merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_tamu(mask, merge, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c @@ -6,7 +6,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -36,7 +36,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -46,7 +46,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -56,7 +56,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -66,7 +66,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -76,7 +76,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -86,7 +86,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -96,7 +96,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -136,7 +136,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -146,7 +146,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -156,7 +156,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -166,7 +166,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -176,7 +176,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -186,7 +186,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -216,10 +216,46 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { return viota(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c @@ -122,3 +122,21 @@ size_t vl) { return vmsbf(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsbf_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsbf_mu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c @@ -122,3 +122,21 @@ size_t vl) { return vmsif(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsif_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsif_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsif_mu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c @@ -122,3 +122,21 @@ size_t vl) { return vmsof(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsof_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsof_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsof_mu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c @@ -373,3 +373,57 @@ size_t vl) { return vid_v_u64m8_m(mask, maskedoff, vl); } + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i32.i64( [[MERGE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) { + return vid_v_u32mf2_tu(merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.nxv1i32.i64( undef, i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_ta(size_t vl) { + return vid_v_u32mf2_ta(vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_v_u32mf2_tuma(mask, merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_v_u32mf2_tumu(mask, merge, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( undef, [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tama(vbool64_t mask, size_t vl) { + return vid_v_u32mf2_tama(mask, vl); +} + +// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MERGE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vid_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, size_t vl) { + return vid_v_u32mf2_tamu(mask, merge, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c @@ -204,7 +204,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -234,7 +234,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -244,7 +244,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -254,7 +254,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -264,7 +264,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -274,7 +274,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -284,7 +284,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -294,7 +294,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -334,7 +334,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -344,7 +344,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -354,7 +354,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -364,7 +364,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -374,7 +374,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -384,7 +384,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -414,10 +414,64 @@ // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { return viota_m_u64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_tu(merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.nxv1i32.i64( undef, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_ta(vbool64_t op1, size_t vl) { + return viota_m_u32mf2_ta(op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_tuma(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_tumu(mask, merge, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tama(vbool64_t mask, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_tama(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_viota_m_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) { + return viota_m_u32mf2_tamu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c @@ -136,3 +136,21 @@ size_t vl) { return vmsbf_m_b64_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsbf_m_b4_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsbf_m_b4_mu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c @@ -136,3 +136,21 @@ size_t vl) { return vmsif_m_b64_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsif_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsif_m_b4_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsif_m_b4_mu(mask, merge, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c @@ -136,3 +136,21 @@ size_t vl) { return vmsof_m_b64_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vmsof_m_b4_ma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( undef, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) { + return vmsof_m_b4_ma(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MERGE:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) { + return vmsof_m_b4_mu(mask, merge, op1, vl); +}